}
-// -------------------------------------------------------------------------
-// Deferred code objects
-//
-// These subclasses of DeferredCode add pieces of code to the end of generated
-// code. They are branched to from the generated code, and
-// keep some slower code out of the main body of the generated code.
-// Many of them call a code stub or a runtime function.
-
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
}
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
allocator_ = NULL;
}
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(rax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- DeleteFrame();
-}
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
-}
-#endif
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(rsi)); // do not overwrite context register
+ Register context = rsi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
+ default:
+ UNREACHABLE();
+ return Operand(rsp, 0);
}
+}
- virtual void Generate();
- Label* patch_site() { return &patch_site_; }
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+ Result tmp,
+ JumpTarget* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ ASSERT(tmp.is_register());
+ Register context = rsi;
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp.reg(), slot->index());
+}
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, dest);
+ Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
}
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
}
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
- if (!dst_.is(rax)) __ movq(dst_, rax);
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ // TODO(X64): Make control flow to control destinations work.
+ ToBoolean(dest);
+ }
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
}
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ // TODO(x64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
- virtual void Generate();
- Label* patch_site() { return &patch_site_; }
+void CodeGenerator::Load(Expression* expr) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(expr, &dest, false);
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
-};
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded;
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
+ if (true_target.is_linked()) {
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
+ }
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded;
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
- } else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded;
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
}
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
}
+ loaded.Bind();
}
}
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
- }
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
} else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
+ Result temp = allocator_->Allocate();
+ __ movq(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
}
+}
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
+
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ movq(reg, GlobalObject());
+ __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
}
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ } else {
+ // Anything else can be handled normally.
+ Load(expr);
+ }
+}
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope()->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope()->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope()->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
+ // We have to skip updating the arguments object if it has been
+ // assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
} else {
__ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
probe.Unuse();
- __ j(not_equal, &slow);
+ done.Branch(not_equal);
}
+ }
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ return frame_->Pop();
+}
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ movq(rax, Operand(rsp, 0));
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ movq(rax, Operand(rsp, kPointerSize));
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- is_smi = masm_->CheckSmi(rdi);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ Set(rax, scope()->num_parameters());
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
+ cgen->LoadReference(this);
+}
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ SmiToInteger32(rax,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movl(rcx, rax);
- __ cmpl(rax, Immediate(kArgumentsLimit));
- __ j(above, &build_args);
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // rcx is a small non-negative integer, due to the test above.
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
+Reference::~Reference() {
+ ASSERT(is_unloaded() || is_illegal());
+}
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rax);
- // Stack now has 1 element:
- // rsp[0]: result
- __ jmp(&done);
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
+void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // rsp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ Load(property->obj());
+ if (property->key()->IsPropertyName()) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ // If rax is free, the register allocator prefers it. Thus the code
+ // generator will load the global object into rax, which is where
+ // LoadIC wants it. Most uses of Reference call LoadIC directly
+ // after the reference is created.
+ frame_->Spill(rax);
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ Load(e);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
+ in_spilled_code_ = was_in_spilled_code;
}
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ frame_->Nip(ref->size());
+ ref->set_unloaded();
}
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- // TODO(X64): No architecture specific code. Move to shared location.
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+ Comment cmnt(masm_, "[ ToBoolean");
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
+ if (value.is_number()) {
+ // Fast case if TypeInfo indicates only numbers.
+ if (FLAG_debug_code) {
+ __ AbortIfNotNumber(value.reg());
+ }
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ if (value.is_smi()) {
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ value.Unuse();
+ dest->Split(not_zero);
+ }
+ } else {
+ // Fast case checks.
+ // 'false' => false.
+ __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+ dest->false_target()->Branch(equal);
+ // 'true' => true.
+ __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+ dest->true_target()->Branch(equal);
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
-}
+ // 'undefined' => false.
+ __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+ dest->false_target()->Branch(equal);
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
}
- node->break_target()->Unuse();
}
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->slot();
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Smi::FromInt(0)); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
+};
- ASSERT(!var->is_global());
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int len = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
}
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
+ OS::SNPrintF(Vector<char>(name_, len),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
}
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
+// Call the specialized stub for a binary operation.
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ set_comment("[ DeferredInlineBinaryOperation");
+ }
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
+ virtual void Generate();
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
+};
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
+void DeferredInlineBinaryOperation::Generate() {
+ Label done;
+ if ((op_ == Token::ADD)
+ || (op_ == Token::SUB)
+ || (op_ == Token::MUL)
+ || (op_ == Token::DIV)) {
+ Label call_runtime;
+ Label left_smi, right_smi, load_right, do_op;
+ __ JumpIfSmi(left_, &left_smi);
+ __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ movq(dst_, left_);
}
+ __ jmp(&load_right);
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
+ __ bind(&left_smi);
+ __ SmiToInteger32(left_, left_);
+ __ cvtlsi2sd(xmm0, left_);
+ __ Integer32ToSmi(left_, left_);
+ if (mode_ == OVERWRITE_LEFT) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
}
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
+ __ bind(&load_right);
+ __ JumpIfSmi(right_, &right_smi);
+ __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ movq(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+ __ jmp(&do_op);
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
+ __ bind(&right_smi);
+ __ SmiToInteger32(right_, right_);
+ __ cvtlsi2sd(xmm1, right_);
+ __ Integer32ToSmi(right_, right_);
+ if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
}
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
+ __ bind(&do_op);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
}
- }
+ __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+ __ jmp(&done);
- if (exit.is_linked()) {
- exit.Bind();
+ __ bind(&call_runtime);
}
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ bind(&done);
}
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
+static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
+ Token::Value op,
+ const Result& right,
+ const Result& left) {
+ // Set TypeInfo of result according to the operation performed.
+ // We rely on the fact that smis have a 32 bit payload on x64.
+ STATIC_ASSERT(kSmiValueSize == 32);
+ switch (op) {
+ case Token::COMMA:
+ return right.type_info();
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ return operands_type;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SAR:
+ case Token::SHL:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SHR:
+ // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
+ return (right.is_constant() && right.handle()->IsSmi()
+ && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
+ ? TypeInfo::Smi()
+ : TypeInfo::Number();
+ case Token::ADD:
+ if (operands_type.IsNumber()) {
+ return TypeInfo::Number();
+ } else if (left.type_info().IsString() || right.type_info().IsString()) {
+ return TypeInfo::String();
+ } else {
+ return TypeInfo::Unknown();
+ }
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ return TypeInfo::Number();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
}
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
+ Comment cmnt_token(masm_, Token::String(op));
+ if (op == Token::COMMA) {
+ // Simply discard left value.
+ frame_->Nip(1);
+ return;
+ }
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
+ if (op == Token::ADD) {
+ const bool left_is_string = left.type_info().IsString();
+ const bool right_is_string = right.type_info().IsString();
+ // Make sure constant strings have string type info.
+ ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+ left_is_string);
+ ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+ right_is_string);
+ if (left_is_string || right_is_string) {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ Result answer;
+ if (left_is_string) {
+ if (right_is_string) {
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
+ } else {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ }
+ } else if (right_is_string) {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ }
+ answer.set_type_info(TypeInfo::String());
+ frame_->Push(&answer);
+ return;
}
+ // Neither operand is known to be a string.
}
-}
+ bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi_constant =
+ right.is_constant() && !right.handle()->IsSmi();
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
+ if (left_is_smi_constant && right_is_smi_constant) {
+ // Compute the constant result at compile time, and leave it on the frame.
+ int left_int = Smi::cast(*left.handle())->value();
+ int right_int = Smi::cast(*right.handle())->value();
+ if (FoldConstantSmis(op, left_int, right_int)) return;
}
- // Update context local.
- frame_->SaveContextRegister();
+ // Get number type of left and right sub-expressions.
+ TypeInfo operands_type =
+ TypeInfo::Combine(left.type_info(), right.type_info());
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
-}
+ TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
+ Result answer;
+ if (left_is_non_smi_constant || right_is_non_smi_constant) {
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_SMI_CODE_IN_STUB,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ } else if (right_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+ false, overwrite_mode);
+ } else if (left_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+ true, overwrite_mode);
+ } else {
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 &&
+ (Token::IsBitOp(op) ||
+ operands_type.IsInteger32() ||
+ expr->type()->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_GENERIC_BINARY_FLAGS,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ }
+ }
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
+ answer.set_type_info(result_type);
+ frame_->Push(&answer);
}
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- // TODO(X64): This code is completely generic and should be moved somewhere
- // where it can be shared between architectures.
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ Object* answer_object = Heap::undefined_value();
+ switch (op) {
+ case Token::ADD:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
+ answer_object = Smi::FromInt(left + right);
+ }
+ break;
+ case Token::SUB:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
+ answer_object = Smi::FromInt(left - right);
+ }
+ break;
+ case Token::MUL: {
+ double answer = static_cast<double>(left) * right;
+ if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+ // If the product is zero and the non-zero factor is negative,
+ // the spec requires us to return floating point negative zero.
+ if (answer != 0 || (left + right) >= 0) {
+ answer_object = Smi::FromInt(static_cast<int>(answer));
+ }
+ }
+ }
+ break;
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ answer_object = Smi::FromInt(left | right);
+ break;
+ case Token::BIT_AND:
+ answer_object = Smi::FromInt(left & right);
+ break;
+ case Token::BIT_XOR:
+ answer_object = Smi::FromInt(left ^ right);
+ break;
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
+ case Token::SHL: {
+ int shift_amount = right & 0x1F;
+ if (Smi::IsValid(left << shift_amount)) {
+ answer_object = Smi::FromInt(left << shift_amount);
+ }
+ break;
+ }
+ case Token::SHR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ unsigned_left >>= shift_amount;
+ if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+ answer_object = Smi::FromInt(unsigned_left);
+ }
+ break;
+ }
+ case Token::SAR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ if (left < 0) {
+ // Perform arithmetic shift of a negative number by
+ // complementing number, logical shifting, complementing again.
+ unsigned_left = ~unsigned_left;
+ unsigned_left >>= shift_amount;
+ unsigned_left = ~unsigned_left;
+ } else {
+ unsigned_left >>= shift_amount;
+ }
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (answer_object == Heap::undefined_value()) {
+ return false;
+ }
+ frame_->Push(Handle<Object>(answer_object));
+ return true;
+}
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
+void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred) {
+ if (!type.IsSmi()) {
+ __ JumpIfNotSmi(reg, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(reg);
+ }
+}
- // Duplicate the switch value.
- frame_->Dup();
- // Compile the label expression.
- Load(clause->label());
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (!left_info.IsSmi() && !right_info.IsSmi()) {
+ __ JumpIfNotBothSmi(left, right, deferred->entry_label());
+ } else if (!left_info.IsSmi()) {
+ __ JumpIfNotSmi(left, deferred->entry_label());
+ } else if (!right_info.IsSmi()) {
+ __ JumpIfNotSmi(right, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+}
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ // Copy the type info because left and right may be overwritten.
+ TypeInfo left_type_info = left->type_info();
+ TypeInfo right_type_info = right->type_info();
+ Token::Value op = expr->op();
+ Result answer;
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need rax as the quotient register, rdx as the remainder
+ // register, neither left nor right in rax or rdx, and left copied
+ // to rax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_rax = false;
+ // Step 1: get rax for quotient.
+ if ((left->is_register() && left->reg().is(rax)) ||
+ (right->is_register() && right->reg().is(rax))) {
+ // One or both is in rax. Use a fresh non-rdx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(rdx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(rax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_rax = true;
+ }
+ if (right->is_register() && right->reg().is(rax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rax);
+ } else {
+ // Neither left nor right is in rax.
+ quotient = allocator_->Allocate(rax);
}
- }
+ ASSERT(quotient.is_register() && quotient.reg().is(rax));
+ ASSERT(!(left->is_register() && left->reg().is(rax)));
+ ASSERT(!(right->is_register() && right->reg().is(rax)));
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
+ // Step 2: get rdx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(rdx)) ||
+ (right->is_register() && right->reg().is(rdx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(rdx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(rdx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rdx);
+ } else {
+ // Neither left nor right is in rdx.
+ remainder = allocator_->Allocate(rdx);
+ }
}
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
+ ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+ ASSERT(!(left->is_register() && left->reg().is(rdx)));
+ ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(rax);
+ frame_->Spill(rdx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? rax : rdx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
+
+ if (op == Token::DIV) {
+ __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = quotient;
} else {
- node->break_target()->Jump();
+ ASSERT(op == Token::MOD);
+ __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = remainder;
}
+ ASSERT(answer.is_valid());
+ return answer;
}
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of rcx if necessary.
+ if (left->is_register() && left->reg().is(rcx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ movq(left->reg(), rcx);
+ }
+ right->ToRegister(rcx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(rcx));
+ ASSERT(right->is_register() && right->reg().is(rcx));
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
+ // We will modify right, it must be spilled.
+ frame_->Spill(rcx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ rcx,
+ overwrite_mode);
+
+ Label do_op;
+ if (right_type_info.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right->reg());
+ }
+ __ movq(answer.reg(), left->reg());
+ // If left is not known to be a smi, check if it is.
+ // If left is not known to be a number, and it isn't a smi, check if
+ // it is a HeapNumber.
+ if (!left_type_info.IsSmi()) {
+ __ JumpIfSmi(answer.reg(), &do_op);
+ if (!left_type_info.IsNumber()) {
+ // Branch if not a heapnumber.
+ __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ deferred->Branch(not_equal);
}
+ // Load integer value into answer register using truncation.
+ __ cvttsd2si(answer.reg(),
+ FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+ // Branch if we might have overflowed.
+ // (False negative for Smi::kMinValue)
+ __ cmpq(answer.reg(), Immediate(0x80000000));
+ deferred->Branch(equal);
+ // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
+ __ Integer32ToSmi(answer.reg(), answer.reg());
} else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
+ // Fast case - both are actually smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left->reg());
+ }
}
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
+ } else {
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
+ left_type_info, right_type_info, deferred);
}
- clause->body_target()->Unuse();
- }
+ __ bind(&do_op);
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+ break;
+ case Token::SHR: {
+ __ SmiShiftLogicalRight(answer.reg(),
+ left->reg(),
+ rcx,
+ deferred->entry_label());
+ break;
+ }
+ case Token::SHL: {
+ __ SmiShiftLeft(answer.reg(),
+ left->reg(),
+ rcx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
}
- node->break_target()->Unuse();
-}
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
+ // Perform the smi tag check.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
+ switch (op) {
+ case Token::ADD:
+ __ SmiAdd(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ case Token::SUB:
+ __ SmiSub(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
+
+ case Token::MUL: {
+ __ SmiMul(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
- }
+ }
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
+ case Token::BIT_OR:
+ __ SmiOr(answer.reg(), left->reg(), right->reg());
+ break;
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
+ case Token::BIT_AND:
+ __ SmiAnd(answer.reg(), left->reg(), right->reg());
break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
+
+ case Token::BIT_XOR:
+ __ SmiXor(answer.reg(), left->reg(), right->reg());
break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
+
+ default:
+ UNREACHABLE();
break;
}
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
}
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ src_(src),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
}
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
+ virtual void Generate();
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register src_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
+void DeferredInlineSmiOperation::Generate() {
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the
- // bottom, jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiOperationReversed(Token::Value op,
+ Register dst,
+ Smi* value,
+ Register src,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ value_(value),
+ src_(src),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperationReversed");
}
- DecrementLoopNesting();
-}
+ virtual void Generate();
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
- // A fast smi loop is a for loop with an initializer
- // that is a simple assignment of a smi to a stack variable,
- // a test that is a simple test of that variable against a smi constant,
- // and a step that is a increment/decrement of the variable, and
- // where the variable isn't modified in the loop body.
- // This guarantees that the variable is always a smi.
+ private:
+ Token::Value op_;
+ Register dst_;
+ Smi* value_;
+ Register src_;
+ OverwriteMode overwrite_mode_;
+};
- Variable* loop_var = node->loop_variable();
- Smi* initial_value = *Handle<Smi>::cast(node->init()
- ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
- Smi* limit_value = *Handle<Smi>::cast(
- node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
- Token::Value compare_op =
- node->cond()->AsCompareOperation()->op();
- bool increments =
- node->next()->StatementAsCountOperation()->op() == Token::INC;
- // Check that the condition isn't initially false.
- bool initially_false = false;
- int initial_int_value = initial_value->value();
- int limit_int_value = limit_value->value();
- switch (compare_op) {
- case Token::LT:
- initially_false = initial_int_value >= limit_int_value;
- break;
- case Token::LTE:
- initially_false = initial_int_value > limit_int_value;
- break;
- case Token::GT:
- initially_false = initial_int_value <= limit_int_value;
- break;
- case Token::GTE:
- initially_false = initial_int_value < limit_int_value;
- break;
- default:
- UNREACHABLE();
+void DeferredInlineSmiOperationReversed::Generate() {
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, value_, src_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
}
- if (initially_false) return;
-
- // Only check loop condition at the end.
- Visit(node->init());
+ virtual void Generate();
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // Set type and stack height of BreakTargets.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
- IncrementLoopNesting();
- loop.Bind();
- // Set number type of the loop variable to smi.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
+void DeferredInlineSmiAdd::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
- SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
- Visit(node->body());
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
}
- if (has_valid_frame()) {
- CodeForStatementPosition(node);
- Slot* loop_var_slot = loop_var->slot();
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->PushParameterAt(loop_var_slot->index());
- }
- Result loop_var_result = frame_->Pop();
- if (!loop_var_result.is_register()) {
- loop_var_result.ToRegister();
- }
+ virtual void Generate();
- if (increments) {
- __ SmiAddConstant(loop_var_result.reg(),
- loop_var_result.reg(),
- Smi::FromInt(1));
- } else {
- __ SmiSubConstant(loop_var_result.reg(),
- loop_var_result.reg(),
- Smi::FromInt(1));
- }
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
- {
- __ SmiCompare(loop_var_result.reg(), limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
- }
- loop_var_result.Unuse();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
+void DeferredInlineSmiAddReversed::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
- if (node->is_fast_smi_loop()) {
- GenerateFastSmiLoop(node);
- return;
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
}
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
+ virtual void Generate();
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
+void DeferredInlineSmiSub::Generate() {
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
+Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
+ Result* operand,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // Generate inline code for a binary operation when one of the
+ // operands is a constant smi. Consumes the argument "operand".
+ if (IsUnsafeSmi(value)) {
+ Result unsafe_operand(value);
+ if (reversed) {
+ return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
+ overwrite_mode);
+ } else {
+ return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
+ overwrite_mode);
}
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
}
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- Visit(node->body());
+ // Get the literal value.
+ Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
+ Token::Value op = expr->op();
+ Result answer;
+ switch (op) {
+ case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = NULL;
+ if (reversed) {
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ }
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiAddConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ answer = *operand;
+ break;
}
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
+ case Token::SUB: {
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ // A smi currently fits in a 32-bit Immediate.
+ __ SmiSubConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ answer = *operand;
+ }
+ break;
}
- }
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
+ case Token::SAR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftArithmeticRightConstant(operand->reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ answer = *operand;
}
break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
+
+ case Token::SHR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
} else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLogicalRightConstant(answer.reg(),
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ operand->Unuse();
}
break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
+ case Token::SHL:
+ if (reversed) {
+ operand->ToRegister();
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(rax);
+ // We need rcx to be available to hold operand, and to be spilled.
+ // SmiShiftLeft implicitly modifies rcx.
+ if (operand->reg().is(rcx)) {
+ frame_->Spill(operand->reg());
+ answer = allocator()->Allocate();
+ } else {
+ Result rcx_reg = allocator()->Allocate(rcx);
+ // answer must not be rcx.
+ answer = allocator()->Allocate();
+ // rcx_reg goes out of scope.
+ }
- // rax: value to be iterated over
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- exit.Branch(equal);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- exit.Branch(equal);
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ operand->reg(),
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
+ __ Move(answer.reg(), smi_value);
+ __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
+ operand->Unuse();
- // Check if enumerable is already a JSObject
- // rax: value to be iterated over
- Condition is_smi = masm_->CheckSmi(rax);
- primitive.Branch(is_smi);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(rax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in rax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // rax: value to be iterated over
- frame_->EmitPush(rax); // Push the object being iterated over.
+ deferred->BindExit();
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ if (shift_value == 0) {
+ // Spill operand so it can be overwritten in the slow case.
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ deferred->BindExit();
+ answer = *operand;
+ } else {
+ // Use a fresh temporary for nonzero shift values.
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLeftConstant(answer.reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ }
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ if (reversed) {
+ // Bit operations with a constant smi are commutative.
+ // We can swap left and right operands with no problem.
+ // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
+ overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
+ }
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ if (op == Token::BIT_AND) {
+ __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
+ } else if (op == Token::BIT_XOR) {
+ if (int_value != 0) {
+ __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ } else {
+ ASSERT(op == Token::BIT_OR);
+ if (int_value != 0) {
+ __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break;
+ }
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ movq(rcx, rax);
- loop.Bind();
- // Check that there are no elements.
- __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- is_smi = masm_->CheckSmi(rdx);
- call_runtime.Branch(is_smi);
- // For all objects but the receiver, check that the cache is empty.
- __ cmpq(rcx, rax);
- check_prototype.Branch(equal);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ CompareRoot(rcx, Heap::kNullValueRootIndex);
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- use_cache.Jump();
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ Move(operand->reg(), Smi::FromInt(0));
+ } else {
+ __ SmiAndConstant(operand->reg(),
+ operand->reg(),
+ Smi::FromInt(int_value - 1));
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break; // This break only applies if we generated code for MOD.
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ default: {
+ Result constant_operand(value);
+ if (reversed) {
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
+ overwrite_mode);
+ }
+ break;
+ }
+ }
+ ASSERT(answer.is_valid());
+ return answer;
+}
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
- // rax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ movq(rdx, rax);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
- fixed_array.Branch(not_equal);
+static bool CouldBeNaN(const Result& result) {
+ if (result.type_info().IsSmi()) return false;
+ if (result.type_info().IsInteger32()) return false;
+ if (!result.is_constant()) return true;
+ if (!result.handle()->IsHeapNumber()) return false;
+ return isnan(HeapNumber::cast(*result.handle())->value());
+}
- use_cache.Bind();
- // Get enum cache
- // rax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ movq(rcx, rax);
- __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- frame_->EmitPush(rax); // <- slot 3
- frame_->EmitPush(rdx); // <- slot 2
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
- entry.Jump();
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+ switch (cc) {
+ case less: return below;
+ case equal: return equal;
+ case less_equal: return below_equal;
+ case greater: return above;
+ case greater_equal: return above_equal;
+ default: UNREACHABLE();
+ }
+ UNREACHABLE();
+ return equal;
+}
- fixed_array.Bind();
- // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
- frame_->EmitPush(rax); // <- slot 2
- // Push the length of the array and the initial index onto the stack.
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
+void CodeGenerator::Comparison(AstNode* node,
+ Condition cc,
+ bool strict,
+ ControlDestination* dest) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ Result left_side;
+ Result right_side;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == greater || cc == less_equal) {
+ cc = ReverseCondition(cc);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
+ } else {
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
+ }
+ ASSERT(cc == less || cc == equal || cc == greater_equal);
- __ movq(rax, frame_->ElementAt(0)); // load the current count
- __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
- node->break_target()->Branch(below_equal);
-
- // Get the i'th entry of the array.
- __ movq(rdx, frame_->ElementAt(2));
- SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx,
- FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case rax: current iteration count rbx: i'th entry
- // of the enum cache
- __ movq(rdx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // rax: current iteration count
- // rbx: i'th entry of the enum cache
- // rdx: expected map value
- __ movq(rcx, frame_->ElementAt(4));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpq(rcx, rdx);
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(rbx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ movq(rbx, rax);
-
- // If the property has been removed while iterating, we just skip it.
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. rdx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(rbx);
- { Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2); // Drop the original and the copy of the element.
- } else {
- // If the reference has size zero then we can use the value below
- // the reference as if it were above the reference, instead of pushing
- // a new copy of it above the reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(); // Drop the original of the element.
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(rax);
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- frame_->EmitPush(rax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(rax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->slot() != NULL);
- StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ movq(kScratchRegister, handler_address);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(rax);
- // In case of thrown exceptions, this is where we continue.
- __ Move(rcx, Smi::FromInt(THROWING));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- __ Move(rcx, Smi::FromInt(FALLING));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(rax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- }
- __ Move(rcx, Smi::FromInt(JUMPING + i));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(rcx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(rcx);
- frame_->EmitPop(rax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in rax.
- Result return_value = allocator_->Allocate(rax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ SmiCompare(rcx, Smi::FromInt(THROWING));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(rax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() && function_info->num_literals() == 0) {
- FastNewClosureStub stub;
- frame_->Push(function_info);
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame_->EmitPush(rsi);
- frame_->EmitPush(function_info);
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script(), this);
- // Check for stack-overflow exception.
- if (HasStackOverflow()) return;
- InstantiateFunction(function_info);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- frame_->Push(node->handle());
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // RegExp pattern (2).
- __ Push(node_->pattern());
- // RegExp flags (3).
- __ Push(node_->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the boilerplate object.
- frame_->Push(&boilerplate);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- frame_->Push(key);
- Result ignored = frame_->CallStoreIC();
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(length);
- clone = frame_->CallStub(&stub, 3);
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi = false;
+ bool left_side_constant_null = false;
+ bool left_side_constant_1_char_string = false;
+ if (left_side.is_constant()) {
+ left_side_constant_smi = left_side.handle()->IsSmi();
+ left_side_constant_null = left_side.handle()->IsNull();
+ left_side_constant_1_char_string =
+ (left_side.handle()->IsString() &&
+ String::cast(*left_side.handle())->length() == 1 &&
+ String::cast(*left_side.handle())->IsAsciiRepresentation());
}
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements FixedArray.
- __ movq(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+ bool right_side_constant_smi = false;
+ bool right_side_constant_null = false;
+ bool right_side_constant_1_char_string = false;
+ if (right_side.is_constant()) {
+ right_side_constant_smi = right_side.handle()->IsSmi();
+ right_side_constant_null = right_side.handle()->IsNull();
+ right_side_constant_1_char_string =
+ (right_side.handle()->IsString() &&
+ String::cast(*right_side.handle())->length() == 1 &&
+ String::cast(*right_side.handle())->IsAsciiRepresentation());
}
-}
+ if (left_side_constant_smi || right_side_constant_smi) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side.handle())->value();
+ int right_value = Smi::cast(*right_side.handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may re-introduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side.ToRegister();
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ if (left_side.is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_side.reg());
+ }
+ } else {
+ Condition left_is_smi = masm_->CheckSmi(left_side.reg());
+ is_smi.Branch(left_is_smi);
-void CodeGenerator::VisitAssignment(Assignment* node) {
- Comment cmnt(masm_, "[ Assignment");
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
+ node->AsExpression()->is_loop_condition();
+ if (!is_loop_condition && right_val->IsSmi()) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ JumpTarget not_number;
+ __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ not_number.Branch(not_equal, &left_side);
+ __ movsd(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ movl(temp.reg(), Immediate(value));
+ __ cvtlsi2sd(xmm0, temp.reg());
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, &left_side);
+ left_side.Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+ not_number.Bind(&left_side);
+ }
- { Reference target(this, node->target(), node->is_compound());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->Push(Smi::FromInt(0));
- return;
- }
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
- if (node->starts_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- // Change to slow case in the beginning of an initialization
- // block to avoid the quadratic behavior of repeatedly adding
- // fast properties.
+ is_smi.Bind();
+ }
- // The receiver is the argument to the runtime call. It is the
- // first value pushed when the reference was loaded to the
- // frame.
- frame_->PushElementAt(target.size() - 1);
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
+ // Test smi equality and comparison by signed int comparison.
+ // Both sides are smis, so we can use an Immediate.
+ __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
}
- if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- if (target.type() == Reference::NAMED) {
- frame_->Dup();
- // Dup target receiver on stack.
- } else {
- ASSERT(target.type() == Reference::KEYED);
- Result temp = frame_->Pop();
- frame_->Dup();
- frame_->Push(&temp);
- }
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+ dest->true_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ dest->false_target()->Branch(is_smi);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
}
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- Load(node->value());
+ } else if (left_side_constant_1_char_string ||
+ right_side_constant_1_char_string) {
+ if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+ // Trivial case, comparing two constants.
+ int left_value = String::cast(*left_side.handle())->Get(0);
+ int right_value = String::cast(*right_side.handle())->Get(0);
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant 1 character string.
+ // If left side is a constant 1-character string, reverse the operands.
+ // Since one side is a constant string, conversion order does not matter.
+ if (left_side_constant_1_char_string) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant string, inlining the case
+ // where both sides are strings.
+ left_side.ToRegister();
- } else { // Assignment is a compound assignment.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
- // There are two cases where the target is not read in the right hand
- // side, that are easy to test for: the right hand side is a literal,
- // or the right hand side is a different variable. TakeValue invalidates
- // the target, with an implicit promise that it will be written to again
- // before it is read.
- if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue();
- } else {
- target.GetValue();
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_not_string, is_string;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
+ Condition is_smi = masm()->CheckSmi(left_reg);
+ is_not_string.Branch(is_smi, &left_side);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(left_reg, HeapObject::kMapOffset));
+ __ movzxbl(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ // If we are testing for equality then make use of the symbol shortcut.
+ // Check if the left hand side has the same type as the right hand
+ // side (which is always a symbol).
+ if (cc == equal) {
+ Label not_a_symbol;
+ ASSERT(kSymbolTag != 0);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
+ __ j(zero, ¬_a_symbol);
+ // They are symbols, so do identity compare.
+ __ Cmp(left_reg, right_side.handle());
+ dest->true_target()->Branch(equal);
+ dest->false_target()->Branch(not_equal);
+ __ bind(¬_a_symbol);
}
- Load(node->value());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- }
+ // Call the compare stub if the left side is not a flat ascii string.
+ __ andb(temp.reg(),
+ Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask));
+ __ cmpb(temp.reg(),
+ Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ temp.Unuse();
+ is_string.Branch(equal, &left_side);
- if (var != NULL &&
- var->mode() == Variable::CONST &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
+ // Setup and call the compare stub.
+ is_not_string.Bind(&left_side);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_string.Bind(&left_side);
+ // left_side is a sequential ASCII string.
+ ASSERT(left_side.reg().is(left_reg));
+ right_side = Result(right_val);
+ Result temp2 = allocator_->Allocate();
+ ASSERT(temp2.is_valid());
+ // Test string equality and comparison.
+ if (cc == equal) {
+ Label comparison_done;
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ j(not_equal, &comparison_done);
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_val)->Get(0));
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ Immediate(char_value));
+ __ bind(&comparison_done);
} else {
- target.SetValue(NOT_CONST_INIT);
- }
- if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::UNLOADED);
- // End of initialization block. Revert to fast case. The
- // argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment.
- // Swap the receiver and the value of the assignment expression.
- Result lhs = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&lhs);
- frame_->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ __ movq(temp2.reg(),
+ FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
+ Label comparison;
+ // If the length is 0 then the subtraction gave -1 which compares less
+ // than any character.
+ __ j(negative, &comparison);
+ // Otherwise load the first character.
+ __ movzxbl(temp2.reg(),
+ FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+ __ bind(&comparison);
+ // Compare the first character of the string with the
+ // constant 1-character string.
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmpb(temp2.reg(), Immediate(char_value));
+ Label characters_were_different;
+ __ j(not_equal, &characters_were_different);
+ // If the first character is the same then the long string sorts after
+ // the short one.
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ bind(&characters_were_different);
}
+ temp2.Unuse();
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
}
- }
-}
-
+ } else {
+ // Neither side is a constant Smi, constant 1-char string, or constant null.
+ // If either side is a non-smi constant, skip the smi check.
+ bool known_non_smi =
+ (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+ (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+ left_side.type_info().IsDouble() ||
+ right_side.type_info().IsDouble();
-void CodeGenerator::VisitThrow(Throw* node) {
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
+ NaNInformation nan_info =
+ (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+ kBothCouldBeNaN :
+ kCantBothBeNaN;
+ // Inline number comparison handling any combination of smi's and heap
+ // numbers if:
+ // code is in a loop
+ // the compare operation is different from equal
+ // compare is not a for-loop comparison
+ // The reason for excluding equal is that it will most likely be done
+ // with smi's (not heap numbers) and the code to comparing smi's is inlined
+ // separately. The same reason applies for for-loop comparison which will
+ // also most likely be smi comparisons.
+ bool is_loop_condition = (node->AsExpression() != NULL)
+ && node->AsExpression()->is_loop_condition();
+ bool inline_number_compare =
+ loop_nesting() > 0 && cc != equal && !is_loop_condition;
-void CodeGenerator::VisitProperty(Property* node) {
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
+ left_side.ToRegister();
+ right_side.ToRegister();
+ if (known_non_smi) {
+ // Inlined equality check:
+ // If at least one of the objects is not NaN, then if the objects
+ // are identical, they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
-void CodeGenerator::VisitCall(Call* node) {
- Comment cmnt(masm_, "[ Call");
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
- ZoneList<Expression*>* args = node->arguments();
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
- // Check if the function is a variable or a property.
- Expression* function = node->expression();
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
+ Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+ is_smi.Branch(both_smi);
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
+ // Inline the equality check if both operands can't be a NaN. If both
+ // objects are the same they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
+ answer.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
- // Prepare the stack for the call to the resolved function.
- Load(function);
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ SmiCompare(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
+ }
+}
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+ Result* operand,
+ XMMRegister xmm_reg,
+ Result* left_side,
+ Result* right_side,
+ JumpTarget* not_numbers) {
+ Label done;
+ if (operand->type_info().IsDouble()) {
+ // Operand is known to be a heap number, just load it.
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ } else if (operand->type_info().IsSmi()) {
+ // Operand is known to be a smi. Convert it to double and keep the original
+ // smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ } else {
+ // Operand type not known, check for smi or heap number.
+ Label smi;
+ __ JumpIfSmi(operand->reg(), &smi);
+ if (!operand->type_info().IsNumber()) {
+ __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ not_numbers->Branch(not_equal, left_side, right_side, taken);
}
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ jmp(&done);
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
+ __ bind(&smi);
+ // Comvert smi to float and keep the original smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ __ jmp(&done);
+ }
+ __ bind(&done);
+}
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->slot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+ Result* right_side,
+ Condition cc,
+ ControlDestination* dest) {
+ ASSERT(left_side->is_register());
+ ASSERT(right_side->is_register());
- done.Jump(&result);
- slow.Bind();
- }
+ JumpTarget not_numbers;
+ // Load left and right operand into registers xmm0 and xmm1 and compare.
+ LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
+ ¬_numbers);
+ LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
+ ¬_numbers);
+ __ ucomisd(xmm0, xmm1);
+ // Bail out if a NaN is involved.
+ not_numbers.Branch(parity_even, left_side, right_side);
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
+ // Split to destination targets based on comparison.
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ not_numbers.Bind(left_side, right_side);
+}
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(rdx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop, flags);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+}
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
+void CodeGenerator::CallApplyLazy(Expression* applicand,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
+ ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+ ASSERT(arguments->IsArguments());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
+ // Load applicand.apply onto the stack. This will usually
+ // give us a megamorphic load site. Not super, but it works.
+ Load(applicand);
+ frame()->Dup();
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
- // Push the name of the function on the frame.
- frame_->Push(var->name());
+ // Load the receiver and the existing arguments object onto the
+ // expression stack. Avoid allocating the arguments object here.
+ Load(receiver);
+ LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->Push(&result);
+ // Emit the source position information after having loaded the
+ // receiver and the arguments.
+ CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
+ // Check if the arguments object has been lazily allocated
+ // already. If so, just use that instead of copying the arguments
+ // from the stack. This also deals with cases where a local variable
+ // named 'arguments' has been introduced.
+ frame_->Dup();
+ Result probe = frame_->Pop();
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- JumpTarget slow, done;
- Result function;
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->slot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
+ // Check that the receiver really is a JavaScript object.
+ __ movq(rax, Operand(rsp, 0));
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
+ // We allow all JSObjects including JSFunctions. As long as
+ // JS_FUNCTION_TYPE is the last instance type and it is right
+ // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+ // bound.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &build_args);
- slow.Bind();
- // Load the function from the context. Sync the frame so we can
- // push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(var->name());
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in rax and rdx. The
- // looked-up function is in rax and the receiver is in rdx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(rax));
- frame_->EmitPush(rax);
+ // Check that applicand.apply is Function.prototype.apply.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+ __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ j(not_equal, &build_args);
- // Load the receiver.
- ASSERT(!allocator()->is_used(rdx));
- frame_->EmitPush(rdx);
+ // Check that applicand is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ is_smi = masm_->CheckSmi(rdi);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ Set(rax, scope()->num_parameters());
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ SmiToInteger32(rax,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movl(rcx, rax);
+ __ cmpl(rax, Immediate(kArgumentsLimit));
+ __ j(above, &build_args);
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // rcx is a small non-negative integer, due to the test above.
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
- Handle<String> name = Handle<String>::cast(literal->handle());
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+ // Stack now has 1 element:
+ // rsp[0]: result
+ __ jmp(&done);
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
+ }
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movq(Operand(rsp, 3 * kPointerSize), rbx);
- // Push the name of the function onto the frame.
- frame_->Push(name);
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // rsp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
+ frame_->RestoreContextRegister();
+}
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
+class DeferredStackCheck: public DeferredCode {
+ public:
+ DeferredStackCheck() {
+ set_comment("[ DeferredStackCheck");
+ }
- // Load the function to call from the property through a reference.
- if (property->is_synthetic()) {
- Reference ref(this, property, false);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
+ virtual void Generate();
+};
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
- // Load the name of the function.
- Load(property->key());
+void DeferredStackCheck::Generate() {
+ StackCheckStub stub;
+ __ CallStub(&stub);
+}
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
- // Load the function.
- Load(function);
+void CodeGenerator::CheckStack() {
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ deferred->Branch(below);
+ deferred->BindExit();
+}
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ // TODO(X64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
}
+ set_in_spilled_code(true);
}
-void CodeGenerator::VisitCallNew(CallNew* node) {
- Comment cmnt(masm_, "[ CallNew");
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+}
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
- Load(node->expression());
- LoadGlobal();
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ ASSERT(!in_spilled_code());
+ for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
+}
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
+
+void CodeGenerator::VisitBlock(Block* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ Block");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ VisitStatements(node->statements());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
}
+ node->break_target()->Unuse();
+}
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(rsi); // The context is the first argument.
+ frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
}
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) {
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->is_dynamic());
+ // For now, just do a runtime call. Sync the virtual frame eagerly
+ // so we can simply push the arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ frame_->EmitPush(Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ frame_->EmitPush(Heap::kTheHoleValueRootIndex);
+ } else if (node->fun() != NULL) {
+ Load(node->fun());
+ } else {
+ frame_->EmitPush(Smi::FromInt(0)); // no initial value!
+ }
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
return;
}
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
+ ASSERT(!var->is_global());
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), GlobalObject());
- __ movq(temp.reg(),
- FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
}
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
+ if (val != NULL) {
+ {
+ // Set the initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ target.SetValue(NOT_CONST_INIT);
+ // The reference is removed from the stack (preserving TOS) when
+ // it goes out of scope.
+ }
+ // Get rid of the assigned value (declarations are statements).
+ frame_->Drop();
}
+}
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ CodeForStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ Load(expression);
+ // Remove the lingering expression result from the top of stack.
+ frame_->Drop();
}
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "// EmptyStatement");
+ CodeForStatementPosition(node);
+ // nothing to do
+}
- Token::Value op = node->op();
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
- }
+ CodeForStatementPosition(node);
+ JumpTarget exit;
+ if (has_then_stm && has_else_stm) {
+ JumpTarget then;
+ JumpTarget else_;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), &dest, true);
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- Slot* slot = variable->slot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Visit(node->else_statement());
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(variable->name());
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
- frame_->Push(&answer);
- return;
+ // We may have dangling jumps to the then part.
+ if (then.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Visit(node->then_statement());
+
+ if (else_.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
}
+ }
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
+ } else if (has_then_stm) {
+ ASSERT(!has_else_stm);
+ JumpTarget then;
+ ControlDestination dest(&then, &exit, true);
+ LoadCondition(node->condition(), &dest, true);
+ if (dest.false_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // then part.
+ if (then.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
} else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
+ // The then label was bound.
+ Visit(node->then_statement());
}
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
+ } else if (has_else_stm) {
+ ASSERT(!has_then_stm);
+ JumpTarget else_;
+ ControlDestination dest(&exit, &else_, false);
+ LoadCondition(node->condition(), &dest, true);
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
+ if (dest.true_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // else part.
+ if (else_.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
} else {
- Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
+ // The else label was bound.
+ Visit(node->else_statement());
}
} else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
+ ASSERT(!has_then_stm && !has_else_stm);
+ // We only care about the condition's side effects (not its value
+ // or control flow effect). LoadCondition is called without
+ // forcing control flow.
+ ControlDestination dest(&exit, &exit, true);
+ LoadCondition(node->condition(), &dest, false);
+ if (!dest.is_used()) {
+ // We got a value on the frame rather than (or in addition to)
+ // control flow.
+ frame_->Drop();
+ }
+ }
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
+}
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- smi_label.Branch(is_smi, &operand);
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ContinueStatement");
+ CodeForStatementPosition(node);
+ node->target()->continue_target()->Jump();
+}
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- __ SmiNot(answer.reg(), answer.reg());
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- break;
- }
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ BreakStatement");
+ CodeForStatementPosition(node);
+ node->target()->break_target()->Jump();
+}
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- continue_label.Branch(is_smi, &operand);
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ReturnStatement");
+
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result return_value = frame_->Pop();
+ if (function_return_is_shadowed_) {
+ function_return_.Jump(&return_value);
+ } else {
+ frame_->PrepareForReturn();
+ if (function_return_.is_bound()) {
+ // If the function return label is already bound we reuse the
+ // code by jumping to the return site.
+ function_return_.Jump(&return_value);
+ } else {
+ function_return_.Bind(&return_value);
+ GenerateReturnSequence(&return_value);
}
}
}
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+ // The return value is a live (but not currently reference counted)
+ // reference to rax. This is safe because the current frame does not
+ // contain a reference to rax (it is prepared for the return by spilling
+ // all registers).
+ if (FLAG_trace) {
+ frame_->Push(return_value);
+ *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
}
+ return_value->ToRegister(rax);
- virtual void Generate();
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ frame_->Exit();
+ masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+ // with length 7 (3 + 1 + 3).
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ for (int i = 0; i < kPadding; ++i) {
+ masm_->int3();
+ }
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ DeleteFrame();
+}
-void DeferredPrefixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result context;
+ if (node->is_catch_block()) {
+ context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = rax;
+ context = frame_->CallRuntime(Runtime::kPushContext, 1);
}
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
+ // Update context local.
+ frame_->SaveContextRegister();
- if (!dst_.is(rax)) __ movq(dst_, rax);
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
}
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number. Update the original value in
-// old. Call the specialized add or subtract stub. The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithExitStatement");
+ CodeForStatementPosition(node);
+ // Pop context.
+ __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
+ // Update context local.
+ frame_->SaveContextRegister();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ // TODO(X64): This code is completely generic and should be moved somewhere
+ // where it can be shared between architectures.
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ SwitchStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Compile the switch value.
+ Load(node->tag());
+
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+ CaseClause* default_clause = NULL;
+
+ JumpTarget next_test;
+ // Compile the case label expressions and comparisons. Exit early
+ // if a comparison is unconditionally true. The target next_test is
+ // bound before the loop in order to indicate control flow to the
+ // first comparison.
+ next_test.Bind();
+ for (int i = 0; i < length && !next_test.is_unused(); i++) {
+ CaseClause* clause = cases->at(i);
+ // The default is not a test, but remember it for later.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ // We recycle the same target next_test for each test. Bind it if
+ // the previous test has not done so and then unuse it for the
+ // loop.
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ next_test.Unuse();
- virtual void Generate();
+ // Duplicate the switch value.
+ frame_->Dup();
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
+ // Compile the label expression.
+ Load(clause->label());
+ // Compare and branch to the body if true or the next test if
+ // false. Prefer the next test as a fall through.
+ ControlDestination dest(clause->body_target(), &next_test, false);
+ Comparison(node, equal, true, &dest);
-void DeferredPostfixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax); // Save the result of ToNumber to use as the old value.
- left = rax;
+ // If the comparison fell through to the true target, jump to the
+ // actual body.
+ if (dest.true_was_fall_through()) {
+ clause->body_target()->Unuse();
+ clause->body_target()->Jump();
+ }
}
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(old_);
-}
+ // If there was control flow to a next test from the last one
+ // compiled, compile a jump to the default or break target.
+ if (!next_test.is_unused()) {
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ // Drop the switch value.
+ frame_->Drop();
+ if (default_clause != NULL) {
+ default_clause->body_target()->Jump();
+ } else {
+ node->break_target()->Jump();
+ }
+ }
+ // The last instruction emitted was a jump, either to the default
+ // clause or the break target, or else to a case body from the loop
+ // that compiles the tests.
+ ASSERT(!has_valid_frame());
+ // Compile case bodies as needed.
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- Comment cmnt(masm_, "[ CountOperation");
+ // There are two ways to reach the body: from the corresponding
+ // test or as the fall through of the previous body.
+ if (clause->body_target()->is_linked() || has_valid_frame()) {
+ if (clause->body_target()->is_linked()) {
+ if (has_valid_frame()) {
+ // If we have both a jump to the test and a fall through, put
+ // a jump on the fall through path to avoid the dropping of
+ // the switch value on the test path. The exception is the
+ // default which has already had the switch value dropped.
+ if (clause->is_default()) {
+ clause->body_target()->Bind();
+ } else {
+ JumpTarget body;
+ body.Jump();
+ clause->body_target()->Bind();
+ frame_->Drop();
+ body.Bind();
+ }
+ } else {
+ // No fall through to worry about.
+ clause->body_target()->Bind();
+ if (!clause->is_default()) {
+ frame_->Drop();
+ }
+ }
+ } else {
+ // Otherwise, we have only fall through.
+ ASSERT(has_valid_frame());
+ }
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
+ // We are now prepared to compile the body.
+ Comment cmnt(masm_, "[ Case body");
+ VisitStatements(clause->statements());
+ }
+ clause->body_target()->Unuse();
+ }
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
+ // We may not have a valid frame here so bind the break target only
+ // if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
+}
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ DoWhileStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
- Result new_value = frame_->Pop();
- new_value.ToRegister();
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ // Label the top of the loop for the backward jump if necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // Use the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ break;
+ case ALWAYS_FALSE:
+ // No need to label it.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ break;
+ case DONT_KNOW:
+ // Continue is the test, so use the backward body target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ body.Bind();
+ break;
+ }
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ movq(old_value.reg(), new_value.reg());
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
+ // Compile the test.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // If control flow can fall off the end of the body, jump back
+ // to the top and bind the break target at the exit.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
}
- }
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ case ALWAYS_FALSE:
+ // We may have had continues or breaks in the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ case DONT_KNOW:
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), &dest, true);
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
- if (new_value.is_smi()) {
- if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
- } else {
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
- }
- if (is_increment) {
- __ SmiAddConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- } else {
- __ SmiSubConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- }
- deferred->BindExit();
+ DecrementLoopNesting();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+}
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
- new_value.set_type_info(TypeInfo::Number());
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WhileStatement");
+ CodeForStatementPosition(node);
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+ // If the condition is always false and has no side effects, we do not
+ // need to compile anything.
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ if (info == ALWAYS_FALSE) return;
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function literal
+ // twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ IncrementLoopNesting();
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
}
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
+ // Based on the condition analysis, compile the test as necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // We will not compile the test expression. Label the top of the
+ // loop with the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ break;
+ case DONT_KNOW: {
+ if (test_at_bottom) {
+ // Continue is the test at the bottom, no need to label the test
+ // at the top. The body is a backward target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Label the test at the top as the continue target. The body
+ // is a forward-only target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ }
+ // Compile the test with the body as the true target and preferred
+ // fall-through and with the break target as the false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), &dest, true);
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may have
+ // been unconditionally false (if there are no jumps to the
+ // body).
+ if (!body.is_linked()) {
+ DecrementLoopNesting();
+ return;
+ }
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
+ // Otherwise, jump around the body on the fall through and then
+ // bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ break;
+ }
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // The loop body has been labeled with the continue target.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ break;
+ case DONT_KNOW:
+ if (test_at_bottom) {
+ // If we have chosen to recompile the test at the bottom,
+ // then it is the continue target.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
+ // The break target is the fall-through (body is a backward
+ // jump from here and thus an invalid fall-through).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), &dest, true);
}
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
} else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
+ // If we have chosen not to recompile the test at the
+ // bottom, jump back to the one at the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
}
+ break;
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
+ // The break target may be already bound (by the condition), or there
+ // may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ DecrementLoopNesting();
+}
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
- // Pop the result of evaluating the first part.
- frame_->Drop();
+void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
+ ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
+ if (slot->type() == Slot::LOCAL) {
+ frame_->SetTypeForLocalAt(slot->index(), info);
+ } else {
+ frame_->SetTypeForParamAt(slot->index(), info);
+ }
+ if (FLAG_debug_code && info.IsSmi()) {
+ if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+ } else {
+ frame_->PushParameterAt(slot->index());
+ }
+ Result var = frame_->Pop();
+ var.ToRegister();
+ __ AbortIfNotSmi(var.reg());
+ }
+}
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
- // Exit (always with a materialized value).
- exit.Bind();
- }
+void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
+ // A fast smi loop is a for loop with an initializer
+ // that is a simple assignment of a smi to a stack variable,
+ // a test that is a simple test of that variable against a smi constant,
+ // and a step that is a increment/decrement of the variable, and
+ // where the variable isn't modified in the loop body.
+ // This guarantees that the variable is always a smi.
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
+ Variable* loop_var = node->loop_variable();
+ Smi* initial_value = *Handle<Smi>::cast(node->init()
+ ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
+ Smi* limit_value = *Handle<Smi>::cast(
+ node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
+ Token::Value compare_op =
+ node->cond()->AsCompareOperation()->op();
+ bool increments =
+ node->next()->StatementAsCountOperation()->op() == Token::INC;
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
+ // Check that the condition isn't initially false.
+ bool initially_false = false;
+ int initial_int_value = initial_value->value();
+ int limit_int_value = limit_value->value();
+ switch (compare_op) {
+ case Token::LT:
+ initially_false = initial_int_value >= limit_int_value;
+ break;
+ case Token::LTE:
+ initially_false = initial_int_value > limit_int_value;
+ break;
+ case Token::GT:
+ initially_false = initial_int_value <= limit_int_value;
+ break;
+ case Token::GTE:
+ initially_false = initial_int_value < limit_int_value;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (initially_false) return;
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
+ // Only check loop condition at the end.
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
+ Visit(node->init());
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ // Set type and stack height of BreakTargets.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- // Pop the result of evaluating the first part.
- frame_->Drop();
+ IncrementLoopNesting();
+ loop.Bind();
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
+ // Set number type of the loop variable to smi.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
+ SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+ Visit(node->body());
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
+ if (has_valid_frame()) {
+ CodeForStatementPosition(node);
+ Slot* loop_var_slot = loop_var->slot();
+ if (loop_var_slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(loop_var_slot->index());
+ } else {
+ ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+ frame_->PushParameterAt(loop_var_slot->index());
+ }
+ Result loop_var_result = frame_->Pop();
+ if (!loop_var_result.is_register()) {
+ loop_var_result.ToRegister();
}
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
+ if (increments) {
+ __ SmiAddConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
} else {
- Load(node->left());
- Load(node->right());
+ __ SmiSubConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
}
- GenericBinaryOperation(node, overwrite_mode);
+
+ {
+ __ SmiCompare(loop_var_result.reg(), limit_value);
+ Condition condition;
+ switch (compare_op) {
+ case Token::LT:
+ condition = less;
+ break;
+ case Token::LTE:
+ condition = less_equal;
+ break;
+ case Token::GT:
+ condition = greater;
+ break;
+ case Token::GTE:
+ condition = greater_equal;
+ break;
+ default:
+ condition = never;
+ UNREACHABLE();
+ }
+ loop.Branch(condition);
+ }
+ loop_var_result.Unuse();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
}
+ DecrementLoopNesting();
}
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ForStatement");
+ CodeForStatementPosition(node);
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- Comment cmnt(masm_, "[ CompareOperation");
+ if (node->is_fast_smi_loop()) {
+ GenerateFastSmiLoop(node);
+ return;
+ }
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
+ // Compile the init expression if present.
+ if (node->init() != NULL) {
+ Visit(node->init());
+ }
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
+ // If the condition is always false and has no side effects, we do not
+ // need to compile anything else.
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ if (info == ALWAYS_FALSE) return;
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->true_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
- answer.Unuse();
- destination()->Split(equal);
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function literal
+ // twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ IncrementLoopNesting();
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
+ // Target for backward edge if no test at the bottom, otherwise
+ // unused.
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // It can be an undetectable string object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
- answer.Unuse();
- destination()->Split(below); // Unsigned byte comparison needed.
+ // Target for backward edge if there is a test at the bottom,
+ // otherwise used as target for test at the top.
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
- destination()->true_target()->Branch(equal);
- __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
- answer.Unuse();
- destination()->Split(equal);
+ // Based on the condition analysis, compile the test as necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // We will not compile the test expression. Label the top of the
+ // loop.
+ if (node->next() == NULL) {
+ // Use the continue target if there is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // Otherwise use the backward loop target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+ break;
+ case DONT_KNOW: {
+ if (test_at_bottom) {
+ // Continue is either the update expression or the test at the
+ // bottom, no need to label the test at the top.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else if (node->next() == NULL) {
+ // We are not recompiling the test at the bottom and there is no
+ // update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // We are not recompiling the test at the bottom and there is an
+ // update expression.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
+ // Compile the test with the body as the true target and preferred
+ // fall-through and with the break target as the false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), &dest, true);
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may have
+ // been unconditionally false (if there are no jumps to the
+ // body).
+ if (!body.is_linked()) {
+ DecrementLoopNesting();
+ return;
+ }
- // It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- answer.Unuse();
- destination()->Split(not_zero);
+ // Otherwise, jump around the body on the fall through and then
+ // bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ break;
+ }
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
- destination()->true_target()->Branch(equal);
+ Visit(node->body());
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
- destination()->false_target()->Branch(equal);
+ // If there is an update expression, compile it if necessary.
+ if (node->next() != NULL) {
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
- // It can be an undetectable object.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- answer.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
+ // Control can reach the update by falling out of the body or by a
+ // continue.
+ if (has_valid_frame()) {
+ // Record the source position of the statement as this code which
+ // is after the code for the body actually belongs to the loop
+ // statement and not the body.
+ CodeForStatementPosition(node);
+ Visit(node->next());
}
- return;
}
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
break;
- case Token::GTE:
- cc = greater_equal;
+ case DONT_KNOW:
+ if (test_at_bottom) {
+ if (node->continue_target()->is_linked()) {
+ // We can have dangling jumps to the continue target if there
+ // was no update expression.
+ node->continue_target()->Bind();
+ }
+ // Control can reach the test at the bottom by falling out of
+ // the body, by a continue in the body, or from the update
+ // expression.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), &dest, true);
+ }
+ } else {
+ // Otherwise, jump back to the test at the top.
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ }
break;
- case Token::IN: {
- Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ testq(answer.reg(), answer.reg());
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
+ case ALWAYS_FALSE:
UNREACHABLE();
+ break;
}
- if (left->IsTrivial()) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(left);
- Load(right);
+ // The break target may be already bound (by the condition), or there
+ // may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
}
-
- Comparison(node, cc, strict, destination());
+ DecrementLoopNesting();
}
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ForInStatement");
+ CodeForStatementPosition(node);
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
+ // Get the object to enumerate over (converted to JSObject).
+ LoadAndSpill(node->enumerable());
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ frame_->EmitPop(rax);
+ // rax: value to be iterated over
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ exit.Branch(equal);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ exit.Branch(equal);
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
+ // Stack layout in body:
+ // [iteration counter (smi)] <- slot 0
+ // [length of array] <- slot 1
+ // [FixedArray] <- slot 2
+ // [Map or 0] <- slot 3
+ // [Object] <- slot 4
+ // Check if enumerable is already a JSObject
+ // rax: value to be iterated over
+ Condition is_smi = masm_->CheckSmi(rax);
+ primitive.Branch(is_smi);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ jsobject.Branch(above_equal);
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
+ primitive.Bind();
+ frame_->EmitPush(rax);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+ // function call returns the value in rax, which is where we want it below
+ jsobject.Bind();
+ // Get the set of properties (as a FixedArray or Map).
+ // rax: value to be iterated over
+ frame_->EmitPush(rax); // Push the object being iterated over.
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ Move(kScratchRegister, Factory::null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ movq(rcx, rax);
+ loop.Bind();
+ // Check that there are no elements.
+ __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
+ call_runtime.Branch(equal);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ is_smi = masm_->CheckSmi(rdx);
+ call_runtime.Branch(is_smi);
+ // For all objects but the receiver, check that the cache is empty.
+ __ cmpq(rcx, rax);
+ check_prototype.Branch(equal);
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ CompareRoot(rcx, Heap::kNullValueRootIndex);
+ loop.Branch(not_equal);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ use_cache.Jump();
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
+ frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // rax: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
+ __ movq(rdx, rax);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
+ fixed_array.Branch(not_equal);
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
+ use_cache.Bind();
+ // Get enum cache
+ // rax: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
+ __ movq(rcx, rax);
+ __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
+ // Get the bridge array held in the enumeration index field.
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+ // Get the cache from the bridge array.
+ __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ frame_->EmitPush(rax); // <- slot 3
+ frame_->EmitPush(rdx); // <- slot 2
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
+ entry.Jump();
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
+ fixed_array.Bind();
+ // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
+ frame_->EmitPush(rax); // <- slot 2
+ // Push the length of the array and the initial index onto the stack.
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+ // Condition.
+ entry.Bind();
+ // Grab the current frame's height for the break and continue
+ // targets only after all the state is pushed on the frame.
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rax, frame_->ElementAt(0)); // load the current count
+ __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
+ node->break_target()->Branch(below_equal);
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+ // Get the i'th entry of the array.
+ __ movq(rdx, frame_->ElementAt(2));
+ SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx,
+ FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case rax: current iteration count rbx: i'th entry
+ // of the enum cache
+ __ movq(rdx, frame_->ElementAt(3));
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ // rax: current iteration count
+ // rbx: i'th entry of the enum cache
+ // rdx: expected map value
+ __ movq(rcx, frame_->ElementAt(4));
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpq(rcx, rdx);
+ end_del_check.Branch(equal);
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(rbx); // push entry
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+ __ movq(rbx, rax);
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
+ // If the property has been removed while iterating, we just skip it.
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ node->continue_target()->Branch(equal);
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. rdx: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(rbx);
+ { Reference each(this, node->each());
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+ if (!each.is_illegal()) {
+ if (each.size() > 0) {
+ frame_->EmitPush(frame_->ElementAt(each.size()));
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2); // Drop the original and the copy of the element.
+ } else {
+ // If the reference has size zero then we can use the value below
+ // the reference as if it were above the reference, instead of pushing
+ // a new copy of it above the reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(); // Drop the original of the element.
+ }
+ }
+ }
+ // Unloading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
- Label exit;
+ // Body.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+ // Next. Reestablish a spilled frame in case we are coming here via
+ // a continue in the body.
+ node->continue_target()->Bind();
+ frame_->SpillAll();
+ frame_->EmitPop(rax);
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ frame_->EmitPush(rax);
+ entry.Jump();
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
+ // Cleanup. No need to spill because VirtualFrame::Drop is safe for
+ // any frame.
+ node->break_target()->Bind();
+ frame_->Drop(5);
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Exit.
+ exit.Bind();
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
}
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryCatchStatement");
+ CodeForStatementPosition(node);
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
+ JumpTarget try_block;
+ JumpTarget exit;
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
+ try_block.Call();
+ // --- Catch block ---
+ frame_->EmitPush(rax);
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
+ // Store the caught exception in the catch variable.
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
+ // Remove the exception from the stack.
+ frame_->Drop();
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (has_valid_frame()) {
+ exit.Jump();
}
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
- StringCharCodeAtGenerator char_code_at_generator_;
-};
+ // --- Try block ---
+ try_block.Bind();
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ bool has_unlinks = false;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ has_unlinks = has_unlinks || shadows[i]->is_linked();
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
+ // Make sure that there's nothing left on the stack above the
+ // handler structure.
+ if (FLAG_debug_code) {
+ __ movq(kScratchRegister, handler_address);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ Assert(equal, "stack pointer should point to top handler");
}
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame. Unlink from
+ // the handler list and drop the rest of this handler from the
+ // frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ if (has_unlinks) {
+ exit.Jump();
+ }
}
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
+ // Generate unlink code for the (formerly) shadowing targets that
+ // have been jumped to. Deallocate each shadow target.
+ Result return_value;
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain; be careful not to destroy the TOS if
+ // there is one.
+ if (i == kReturnShadowIndex) {
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- Load(args->at(0));
+ if (i == kReturnShadowIndex) {
+ if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+ shadows[i]->other_target()->Jump(&return_value);
+ } else {
+ shadows[i]->other_target()->Jump();
+ }
+ }
+ }
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
+ exit.Bind();
+}
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryFinallyStatement");
+ CodeForStatementPosition(node);
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
+ JumpTarget try_block;
+ JumpTarget finally_block;
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
+ try_block.Call();
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
+ frame_->EmitPush(rax);
+ // In case of thrown exceptions, this is where we continue.
+ __ Move(rcx, Smi::FromInt(THROWING));
+ finally_block.Jump();
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result_, Smi::FromInt(0));
- __ jmp(exit_label());
+ // --- Try block ---
+ try_block.Bind();
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
}
- private:
- Register result_;
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
- Label need_conversion_;
- Label index_out_of_range_;
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ int nof_unlinks = 0;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
- StringCharAtGenerator char_at_generator_;
-};
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // If we can fall off the end of the try block, unlink from the try
+ // chain and set the state on the frame to FALLING.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ // Fake a top of stack value (unneeded when FALLING) and set the
+ // state in ecx, then jump around the unlink blocks if any.
+ frame_->EmitPush(Heap::kUndefinedValueRootIndex);
+ __ Move(rcx, Smi::FromInt(FALLING));
+ if (nof_unlinks > 0) {
+ finally_block.Jump();
+ }
+ }
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
+ // Generate code to unlink and set the state for the (formerly)
+ // shadowing targets that have been jumped to.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // If we have come from the shadowed return, the return value is
+ // on the virtual frame. We must preserve it until it is
+ // pushed.
+ if (i == kReturnShadowIndex) {
+ Result return_value;
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
+ // Reload sp from the top handler, because some statements that
+ // we break from (eg, for...in) may have left stuff on the
+ // stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
+ // Unlink this handler and drop it from the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
+ if (i == kReturnShadowIndex) {
+ // If this target shadowed the function return, materialize
+ // the return value on the stack.
+ frame_->EmitPush(rax);
+ } else {
+ // Fake TOS for targets that shadowed breaks and continues.
+ frame_->EmitPush(Heap::kUndefinedValueRootIndex);
+ }
+ __ Move(rcx, Smi::FromInt(JUMPING + i));
+ if (--nof_unlinks > 0) {
+ // If this is not the last unlink block, jump around the next.
+ finally_block.Jump();
+ }
+ }
+ }
+ // --- Finally block ---
+ finally_block.Bind();
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
- value.Unuse();
- destination()->Split(positive_smi);
-}
+ // Push the state on the stack.
+ frame_->EmitPush(rcx);
+ // We keep two elements on the stack - the (possibly faked) result
+ // and the state - while evaluating the finally block.
+ //
+ // Generate code for the statements in the finally block.
+ VisitStatementsAndSpill(node->finally_block()->statements());
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
+ if (has_valid_frame()) {
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(rcx);
+ frame_->EmitPop(rax);
+ }
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
+ // Generate code to jump to the right destination for all used
+ // formerly shadowing targets. Deallocate each shadow target.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (has_valid_frame() && shadows[i]->is_bound()) {
+ BreakTarget* original = shadows[i]->other_target();
+ __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
+ if (i == kReturnShadowIndex) {
+ // The return value is (already) in rax.
+ Result return_value = allocator_->Allocate(rax);
+ ASSERT(return_value.is_valid());
+ if (function_return_is_shadowed_) {
+ original->Branch(equal, &return_value);
+ } else {
+ // Branch around the preparation for return which may emit
+ // code.
+ JumpTarget skip;
+ skip.Branch(not_equal);
+ frame_->PrepareForReturn();
+ original->Jump(&return_value);
+ skip.Bind();
+ }
+ } else {
+ original->Branch(equal);
+ }
+ }
+ }
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
+ if (has_valid_frame()) {
+ // Check if we need to rethrow the exception.
+ JumpTarget exit;
+ __ SmiCompare(rcx, Smi::FromInt(THROWING));
+ exit.Branch(not_equal);
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
+ // Rethrow exception.
+ frame_->EmitPush(rax); // undo pop from above
+ frame_->CallRuntime(Runtime::kReThrow, 1);
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+ // Done.
+ exit.Bind();
+ }
+}
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Spill everything, even constants, to the frame.
+ frame_->SpillAll();
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
+ frame_->DebugBreak();
+ // Ignore the return value.
+#endif
+}
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
+void CodeGenerator::InstantiateFunction(
+ Handle<SharedFunctionInfo> function_info) {
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && function_info->num_literals() == 0) {
+ FastNewClosureStub stub;
+ frame_->Push(function_info);
+ Result answer = frame_->CallStub(&stub, 1);
+ frame_->Push(&answer);
+ } else {
+ // Call the runtime to instantiate the function based on the
+ // shared function info.
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(function_info);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
+ }
+}
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
+ // Build the function info and instantiate it.
+ Handle<SharedFunctionInfo> function_info =
+ Compiler::BuildFunctionInfo(node, script(), this);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ InstantiateFunction(function_info);
+}
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half);
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* node) {
+ Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+ InstantiateFunction(node->shared_function_info());
+}
- // Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
- __ sqrtsd(xmm1, xmm1);
- __ jmp(&allocate_return);
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
+void CodeGenerator::VisitConditional(Conditional* node) {
+ Comment cmnt(masm_, "[ Conditional");
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), &dest, true);
- // Calculates square root.
- __ movsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Load(node->else_expression());
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+ if (then.is_linked()) {
+ exit.Jump();
+ then.Bind();
+ Load(node->then_expression());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Load(node->then_expression());
- done.Bind(&answer);
- frame()->Push(&answer);
+ if (else_.is_linked()) {
+ exit.Jump();
+ else_.Bind();
+ Load(node->else_expression());
+ }
+ }
+
+ exit.Bind();
}
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &value,
+ &slow,
+ &done);
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
+ slow.Bind();
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ if (typeof_state == INSIDE_TYPEOF) {
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+ done.Bind(&value);
+ frame_->Push(&value);
- end.Bind(&result);
- frame()->Push(&result);
-}
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit;
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
+ exit.Bind();
+ frame_->EmitPush(rcx);
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+
+ } else {
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
+ }
}
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
}
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
}
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = rsi;
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmpq(right.reg(), left.reg());
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+ if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(tmp.reg())) {
+ __ movq(tmp.reg(), context);
+ }
+ // Load map for comparison into register, outside loop.
+ __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+ __ bind(&next);
+ // Terminate at global context.
+ __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+ __ j(equal, &fast);
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal);
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+ tmp.Unuse();
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- // RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi, so it should not be treated as a smi.).
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- Result rbp_as_smi = allocator_->Allocate();
- ASSERT(rbp_as_smi.is_valid());
- __ movq(rbp_as_smi.reg(), rbp);
- frame_->Push(&rbp_as_smi);
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ LoadGlobal();
+ frame_->Push(slot->var()->name());
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test rax
+ // instruction here.
+ masm_->nop();
+ return answer;
}
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ done->Jump(result);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ *result = allocator_->Allocate();
+ ASSERT(result->is_valid());
+ __ movq(result->reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ *result,
+ slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
+ done->Branch(not_equal, result);
+ __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
+ }
+ done->Jump(result);
+ } else if (rewrite != NULL) {
+ // Generate fast case for argument loads.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ Result arguments = allocator()->Allocate();
+ ASSERT(arguments.is_valid());
+ __ movq(arguments.reg(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ arguments,
+ slow));
+ frame_->Push(&arguments);
+ frame_->Push(key_literal->handle());
+ *result = EmitKeyedLoad();
+ done->Jump(result);
+ }
+ }
+ }
+ }
+}
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
- __ bind(&heapnumber_allocated);
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(slot->var()->name());
- __ movq(rax, rbx);
- Result result = allocator_->Allocate(rax);
- frame_->Push(&result);
-}
+ Result value;
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Init const");
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ }
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 4);
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ movq(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
- // Load the arguments on the stack and call the runtime system.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
+ exit.Bind();
+ }
}
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- // No stub. This code only occurs a few times in regexp.js.
- const int kMaxInlineLength = 100;
- ASSERT_EQ(3, args->length());
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- {
- VirtualFrame::SpilledScope spilled_scope;
-
- Label slowcase;
- Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 2));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
+void CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+}
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
- // Set empty properties FixedArray.
- __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
- Factory::empty_fixed_array());
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
+ if (expr != NULL) {
+ Visit(expr);
+ } else {
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValue();
+ }
+}
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
- // Set input, index and length fields from arguments.
- __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
- __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
- __ lea(rsp, Operand(rsp, kPointerSize));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+void CodeGenerator::VisitLiteral(Literal* node) {
+ Comment cmnt(masm_, "[ Literal");
+ frame_->Push(node->handle());
+}
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
- // Set map.
- __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ Move(rdx, Factory::the_hole_value());
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: the hole.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+ UNIMPLEMENTED();
+ // TODO(X64): Implement security policy for loads of smis.
+}
- __ bind(&slowcase);
- __ CallRuntime(Runtime::kRegExpConstructResult, 3);
- __ bind(&done);
- }
- frame_->Forget(3);
- frame_->Push(rax);
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ return false;
}
-class DeferredSearchCache: public DeferredCode {
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function. Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
public:
- DeferredSearchCache(Register dst,
- Register cache,
- Register key,
- Register scratch)
- : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
- set_comment("[ DeferredSearchCache");
+ DeferredRegExpLiteral(Register boilerplate,
+ Register literals,
+ RegExpLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredRegExpLiteral");
}
- virtual void Generate();
+ void Generate();
private:
- Register dst_; // on invocation index of finger (as int32), on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
- Register scratch_;
+ Register boilerplate_;
+ Register literals_;
+ RegExpLiteral* node_;
};
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index| is int32.
-static Operand ArrayElement(Register array,
- Register index,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index, times_pointer_size, offset);
+void DeferredRegExpLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ Push(Smi::FromInt(node_->literal_index()));
+ // RegExp pattern (2).
+ __ Push(node_->pattern());
+ // RegExp flags (3).
+ __ Push(node_->flags());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
- Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, kEntriesIndexImm);
- __ j(less, &search_further);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &first_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ SmiToInteger32(dst_,
- FieldOperand(cache_,
- JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(scratch_,
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
- __ bind(&second_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, scratch_);
- __ j(less_equal, &cache_miss);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &second_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ Comment cmnt(masm_, "[ RegExp Literal");
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On x64 function must be in rdi.
- __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(rdi, expected, CALL_FUNCTION);
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Check if we could add new entry to cache.
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ SmiToInteger32(r9,
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ cmpl(rbx, r9);
- __ j(greater, &add_new_entry);
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
- // Check if we could evict entry after finger.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ addl(rdx, kEntrySizeImm);
- Label forward;
- __ cmpl(rbx, rdx);
- __ j(greater, &forward);
- // Need to wrap over the cache.
- __ movl(rdx, kEntriesIndexImm);
- __ bind(&forward);
- __ movl(r9, rdx);
- __ jmp(&update_cache);
+ // Check whether we need to materialize the RegExp object. If so,
+ // jump to the deferred code passing the literals array.
+ DeferredRegExpLiteral* deferred =
+ new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+ __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
- __ bind(&add_new_entry);
- // r9 holds cache size as int32.
- __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
+}
- // Update the cache itself.
- // r9 holds the index as int32.
- __ bind(&update_cache);
- __ pop(rbx); // restore the key
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
- // Store key.
- __ movq(ArrayElement(rcx, r9), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
- // Store value.
- __ pop(rcx); // restore the cache.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ incl(rdx);
- // Backup rax, because the RecordWrite macro clobbers its arguments.
- __ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rax);
- __ RecordWrite(rcx, 0, rbx, rdx);
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
- if (!dst_.is(rax)) {
- __ movq(dst_, rax);
+ // Load a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant properties.
+ frame_->Push(node->constant_properties());
+ // Should the object literal have fast elements?
+ frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ }
+ frame_->Push(&clone);
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+ // else fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ Handle<Object> key(property->key()->handle());
+ if (key->IsSymbol()) {
+ // Duplicate the object as the IC receiver.
+ frame_->Dup();
+ Load(property->value());
+ frame_->Push(key);
+ Result ignored = frame_->CallStoreIC();
+ break;
+ }
+ // Fall through
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(1));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(0));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ default: UNREACHABLE();
+ }
}
}
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+ // Load a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
- return;
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ frame_->Push(&literals);
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ frame_->Push(node->constant_elements());
+ int length = node->values()->length();
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ clone = frame_->CallStub(&stub, 3);
}
+ frame_->Push(&clone);
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
+ // Generate code to set the elements in the array that are not
+ // literals.
+ for (int i = 0; i < length; i++) {
+ Expression* value = node->values()->at(i);
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ movq(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
+ // If value is a literal the property value is already set in the
+ // boilerplate object.
+ if (value->AsLiteral() != NULL) continue;
+ // If value is a materialized literal the property value is already set
+ // in the boilerplate object if it is simple.
+ if (CompileTimeValue::IsCompileTimeValue(value)) continue;
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
+ // The property must be set by generated code.
+ Load(value);
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
+ // Get the property value off the stack.
+ Result prop_value = frame_->Pop();
+ prop_value.ToRegister();
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg(),
- scratch.reg());
+ // Fetch the array literal while leaving a copy on the stack and
+ // use it to get the elements array.
+ frame_->Dup();
+ Result elements = frame_->Pop();
+ elements.ToRegister();
+ frame_->Spill(elements.reg());
+ // Get the elements FixedArray.
+ __ movq(elements.reg(),
+ FieldOperand(elements.reg(), JSObject::kElementsOffset));
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- // tmp.reg() now holds finger offset as a smi.
- __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- __ cmpq(key.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize));
- deferred->Branch(not_equal);
- __ movq(tmp.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
- deferred->BindExit();
- frame_->Push(&tmp);
+ // Update the write barrier for the array address.
+ frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+ }
}
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+ ASSERT(!in_spilled_code());
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ Load(node->key());
+ Load(node->value());
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
frame_->Push(&result);
}
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
+void CodeGenerator::VisitAssignment(Assignment* node) {
+ Comment cmnt(masm_, "[ Assignment");
- virtual void Generate();
+ { Reference target(this, node->target(), node->is_compound());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
- private:
- Register object_, index1_, index2_;
-};
+ if (node->starts_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // Change to slow case in the beginning of an initialization
+ // block to avoid the quadratic behavior of repeatedly adding
+ // fast properties.
+
+ // The receiver is the argument to the runtime call. It is the
+ // first value pushed when the reference was loaded to the
+ // frame.
+ frame_->PushElementAt(target.size() - 1);
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ Load(node->value());
+ } else { // Assignment is a compound assignment.
+ Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ // There are two cases where the target is not read in the right hand
+ // side, that are easy to test for: the right hand side is a literal,
+ // or the right hand side is a different variable. TakeValue invalidates
+ // the target, with an implicit promise that it will be written to again
+ // before it is read.
+ if (literal != NULL || (right_var != NULL && right_var != var)) {
+ target.TakeValue();
+ } else {
+ target.GetValue();
+ }
+ Load(node->value());
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ }
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
+ if (var != NULL &&
+ var->mode() == Variable::CONST &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
+ } else {
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
+ if (node->ends_initialization_block()) {
+ ASSERT(target.type() == Reference::UNLOADED);
+ // End of initialization block. Revert to fast case. The
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+ }
+ }
}
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
+void CodeGenerator::VisitThrow(Throw* node) {
+ Comment cmnt(masm_, "[ Throw");
+ Load(node->exception());
+ Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->Push(&result);
+}
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Result index2 = frame_->Pop();
- index2.ToRegister();
+void CodeGenerator::VisitProperty(Property* node) {
+ Comment cmnt(masm_, "[ Property");
+ Reference property(this, node);
+ property.GetValue();
+}
- Result index1 = frame_->Pop();
- index1.ToRegister();
- Result object = frame_->Pop();
- object.ToRegister();
+void CodeGenerator::VisitCall(Call* node) {
+ Comment cmnt(masm_, "[ Call");
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
+ ZoneList<Expression*>* args = node->arguments();
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(not_zero);
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
- // Check the object's elements are in fast case.
- __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- deferred->Branch(not_equal);
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
- // Check that both indices are smis.
- Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
- deferred->Branch(NegateCondition(both_smi));
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
- // Bring addresses into index1 and index2.
- __ SmiToInteger32(index1.reg(), index1.reg());
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(index2.reg(), index2.reg());
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
- // Swap elements.
- __ movq(object.reg(), Operand(index1.reg(), 0));
- __ movq(tmp2.reg(), Operand(index2.reg(), 0));
- __ movq(Operand(index2.reg(), 0), object.reg());
- __ movq(Operand(index1.reg(), 0), tmp2.reg());
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
+ // Result to hold the result of the function resolution and the
+ // final result of the eval call.
+ Result result;
- __ movq(tmp2.reg(), tmp1.reg());
- RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
- __ CallStub(&recordWrite1);
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ JumpTarget done;
+ if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ ASSERT(var->slot()->type() == Slot::LOOKUP);
+ JumpTarget slow;
+ // Prepare the stack for the call to
+ // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
+ // function, the first argument to the eval call and the
+ // receiver.
+ Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ frame_->Push(&fun);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+ frame_->PushParameterAt(-1);
- RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
- __ CallStub(&recordWrite2);
+ // Resolve the call.
+ result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
- __ bind(&done);
+ done.Jump(&result);
+ slow.Bind();
+ }
- deferred->BindExit();
- frame_->Push(Factory::undefined_value());
-}
+ // Prepare the stack for the call to ResolvePossiblyDirectEval by
+ // pushing the loaded function, the first argument to the eval
+ // call and the receiver.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+ frame_->PushParameterAt(-1);
+ // Resolve the call.
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
+ // If we generated fast-case code bind the jump-target where fast
+ // and slow case merge.
+ if (done.is_linked()) done.Bind(&result);
- ASSERT(args->length() >= 2);
+ // The runtime call returns a pair of values in rax (function) and
+ // rdx (receiver). Touch up the stack with the right values.
+ Result receiver = allocator_->Allocate(rdx);
+ frame_->SetElementAt(arg_count + 1, &result);
+ frame_->SetElementAt(arg_count, &receiver);
+ receiver.Unuse();
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+ // Pass the global object as the receiver and let the IC stub
+ // patch the stack to use the global proxy as 'this' in the
+ // invoked function.
+ LoadGlobal();
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+ // Push the name of the function on the frame.
+ frame_->Push(var->name());
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->Push(&result);
- Load(args->at(0));
- Load(args->at(1));
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // ----------------------------------
+ // JavaScript examples:
+ //
+ // with (obj) foo(1, 2, 3) // foo may be in obj.
+ //
+ // function f() {};
+ // function g() {
+ // eval(...);
+ // f(); // f could be in extension object.
+ // }
+ // ----------------------------------
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
+ JumpTarget slow, done;
+ Result function;
+ // Generate fast case for loading functions from slots that
+ // correspond to local/global variables or arguments unless they
+ // are shadowed by eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(var->slot(),
+ NOT_INSIDE_TYPEOF,
+ &function,
+ &slow,
+ &done);
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
+ slow.Bind();
+ // Load the function from the context. Sync the frame so we can
+ // push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(var->name());
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ // The runtime call returns a pair of values in rax and rdx. The
+ // looked-up function is in rax and the receiver is in rdx. These
+ // register references are not ref counted here. We spill them
+ // eagerly since they are arguments to an inevitable call (and are
+ // not sharable by the arguments).
+ ASSERT(!allocator()->is_used(rax));
+ frame_->EmitPush(rax);
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
+ // Load the receiver.
+ ASSERT(!allocator()->is_used(rdx));
+ frame_->EmitPush(rdx);
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ JumpTarget call;
+ call.Jump();
+ done.Bind(&function);
+ frame_->Push(&function);
+ LoadGlobalReceiver();
+ call.Bind();
+ }
+ // Call the function.
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
- Load(args->at(0));
- Load(args->at(1));
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
+ Handle<String> name = Handle<String>::cast(literal->handle());
+ if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+ name->IsEqualTo(CStrVector("apply")) &&
+ args->length() == 2 &&
+ args->at(1)->AsVariableProxy() != NULL &&
+ args->at(1)->AsVariableProxy()->IsArguments()) {
+ // Use the optimized Function.prototype.apply that avoids
+ // allocating lazily allocated arguments objects.
+ CallApplyLazy(property->obj(),
+ args->at(0),
+ args->at(1)->AsVariableProxy(),
+ node->position());
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
+ } else {
+ // Push the receiver onto the frame.
+ Load(property->obj());
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
+ // Push the name of the function onto the frame.
+ frame_->Push(name);
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ frame_->Push(&result);
+ }
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
+ // Load the function to call from the property through a reference.
+ if (property->is_synthetic()) {
+ Reference ref(this, property, false);
+ ref.GetValue();
+ // Use global object as receiver.
+ LoadGlobalReceiver();
+ // Call the function.
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
+ } else {
+ // Push the receiver onto the frame.
+ Load(property->obj());
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
+ // Load the name of the function.
+ Load(property->key());
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ frame_->Push(&result);
+ }
+ }
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
+ // Load the function.
+ Load(function);
- // All done.
- leave.Bind();
+ // Pass the global proxy as the receiver.
+ LoadGlobalReceiver();
+
+ // Call the function.
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
+ }
}
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
+void CodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
+ // Compute function to call and use the global object as the
+ // receiver. There is no need to use the global proxy here because
+ // it will always be replaced with a newly allocated object.
+ Load(node->expression());
+ LoadGlobal();
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallConstructor(arg_count);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
}
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ value.Unuse();
+ destination()->Split(is_smi);
}
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation of Expressions
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ Load(args->at(1));
+ Load(args->at(2));
+ frame_->CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ frame_->Push(Factory::undefined_value());
+}
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- // TODO(x64): No architecture specific code. Move to shared location.
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
+ value.Unuse();
+ destination()->Split(positive_smi);
}
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
}
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* x,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
- { CodeGenState new_state(this, dest);
- Visit(x);
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
+ ASSERT(args->length() == 2);
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- // TODO(X64): Make control flow to control destinations work.
- ToBoolean(dest);
- }
+ // We need two extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
}
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
- } else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
+ ASSERT(args->length() == 1);
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
+ Load(args->at(0));
+
+ Result code = frame_->Pop();
+ code.ToRegister();
+ ASSERT(code.is_valid());
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
}
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result_, Smi::FromInt(0));
+ __ jmp(exit_label());
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+ private:
+ Register result_;
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
+ Label need_conversion_;
+ Label index_out_of_range_;
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
+ StringCharAtGenerator char_at_generator_;
+};
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- in_spilled_code_ = was_in_spilled_code;
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
}
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a JS array or not.
+ __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
}
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
+void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a regexp.
+ __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
+}
- case Slot::LOCAL:
- return frame_->LocalAt(index);
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
+ __ Move(kScratchRegister, Factory::null_value());
+ __ cmpq(obj.reg(), kScratchRegister);
+ destination()->true_target()->Branch(equal);
+
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ movzxbq(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ obj.Unuse();
+ destination()->Split(below_equal);
}
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ obj.Unuse();
+ destination()->Split(equal);
+}
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
}
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
- JumpTarget slow;
- JumpTarget done;
- Result value;
+ // Get the frame pointer for the calling frame.
+ Result fp = allocator()->Allocate();
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+ fp.Unuse();
+ destination()->Split(equal);
+}
- done.Bind(&value);
- frame_->Push(&value);
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
+ Result fp = allocator_->Allocate();
+ Result result = allocator_->Allocate();
+ ASSERT(fp.is_valid() && result.is_valid());
+
+ Label exit;
+
+ // Get the number of formal parameters.
+ __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit);
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ movq(result.reg(),
+ Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
+ __ bind(&exit);
+ result.set_type_info(TypeInfo::Smi());
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(result.reg());
}
+ frame_->Push(&result);
}
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
+ Load(args->at(0)); // Load the object.
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+ // If the object is a smi, we return null.
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ null.Branch(is_smi);
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsTheHole()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
- }
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(below);
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
+ // Check if the constructor in the map is a function.
+ __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ non_function_constructor.Branch(not_equal);
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
+ // The obj register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(),
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
+ // All done.
+ leave.Bind();
+}
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
- exit.Bind();
- }
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
+ // if (object->IsSmi()) return object.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi);
+ // It is a heap object - get map.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ // if (!object->IsJSValue()) return object.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+ leave.Branch(not_equal);
+ __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
}
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ Result value = frame_->Pop();
+ Result object = frame_->Pop();
+ value.ToRegister();
+ object.ToRegister();
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
+ // if (object->IsSmi()) return value.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi, &value);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ // if (!object->IsJSValue()) return value.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+ leave.Branch(not_equal, &value);
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
+ // Store the value.
+ __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ movq(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
+ // Leave.
+ leave.Bind(&value);
+ frame_->Push(&value);
}
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
+ Load(args->at(0));
+ Result key = frame_->Pop();
+ // Explicitly create a constant result.
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ Result result = frame_->CallStub(&stub, &key, &count);
+ frame_->Push(&result);
}
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObject());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObject());
- frame_->Push(&temp);
- }
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ Load(args->at(0));
+ Load(args->at(1));
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+ right.ToRegister();
+ left.ToRegister();
+ __ cmpq(right.reg(), left.reg());
+ right.Unuse();
+ left.Unuse();
+ destination()->Split(equal);
}
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObject());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ // RBP value is aligned, so it should be tagged as a smi (without necesarily
+ // being padded as a smi, so it should not be treated as a smi.).
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ Result rbp_as_smi = allocator_->Allocate();
+ ASSERT(rbp_as_smi.is_valid());
+ __ movq(rbp_as_smi.reg(), rbp);
+ frame_->Push(&rbp_as_smi);
}
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
+void CodeGenerator::GenerateRandomHeapNumber(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ frame_->SpillAll();
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+ __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+ __ bind(&slow_allocate_heapnumber);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rbx, rax);
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::the_hole_value());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
+ __ bind(&heapnumber_allocated);
+ // Return a random uint32 number in rax.
+ // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(ExternalReference::random_uint32_function(), 0);
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has been
- // assigned a proper value.
- skip_arguments = !probe.handle()->IsTheHole();
- } else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- return frame_->Pop();
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm1, rcx);
+ __ movd(xmm0, rax);
+ __ cvtss2sd(xmm1, xmm1);
+ __ xorpd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+
+ __ movq(rax, rbx);
+ Result result = allocator_->Allocate(rax);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
}
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
+ frame_->Push(&answer);
}
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
}
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
+
+ // Load the arguments on the stack and call the runtime system.
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
+ frame_->Push(&result);
}
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+ // No stub. This code only occurs a few times in regexp.js.
+ const int kMaxInlineLength = 100;
+ ASSERT_EQ(3, args->length());
+ Load(args->at(0)); // Size of array, smi.
+ Load(args->at(1)); // "index" property value.
+ Load(args->at(2)); // "input" property value.
+ {
+ VirtualFrame::SpilledScope spilled_scope;
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
+ Label slowcase;
+ Label done;
+ __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ JumpIfNotSmi(r8, &slowcase);
+ __ SmiToInteger32(rbx, r8);
+ __ cmpl(rbx, Immediate(kMaxInlineLength));
+ __ j(above, &slowcase);
+ // Smi-tagging is equivalent to multiplying by 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
+ // Elements: [Map][Length][..elements..]
+ __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+ times_pointer_size,
+ rbx, // In: Number of elements.
+ rax, // Out: Start of allocation (tagged).
+ rcx, // Out: End of allocation.
+ rdx, // Scratch register
+ &slowcase,
+ TAG_OBJECT);
+ // rax: Start of allocated area, object-tagged.
+ // rbx: Number of array elements as int32.
+ // r8: Number of array elements as smi.
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
+ // Set JSArray map to global.regexp_result_map().
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+
+ // Set empty properties FixedArray.
+ __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+ Factory::empty_fixed_array());
+
+ // Set elements to point to FixedArray allocated right after the JSArray.
+ __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+ // Set input, index and length fields from arguments.
+ __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
+ __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
+ __ lea(rsp, Operand(rsp, kPointerSize));
+ __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+
+ // Fill out the elements FixedArray.
+ // rax: JSArray.
+ // rcx: FixedArray.
+ // rbx: Number of elements in array as int32.
+
+ // Set map.
+ __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ // Set length.
+ __ Integer32ToSmi(rdx, rbx);
+ __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
+ // Fill contents of fixed-array with the-hole.
+ __ Move(rdx, Factory::the_hole_value());
+ __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
+ // Fill fixed array elements with hole.
+ // rax: JSArray.
+ // rbx: Number of elements in array that remains to be filled, as int32.
+ // rcx: Start of elements in FixedArray.
+ // rdx: the hole.
+ Label loop;
+ __ testl(rbx, rbx);
+ __ bind(&loop);
+ __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ subl(rbx, Immediate(1));
+ __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+ __ jmp(&loop);
+
+ __ bind(&slowcase);
+ __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+
+ __ bind(&done);
}
+ frame_->Forget(3);
+ frame_->Push(rax);
+}
- if (left_side_constant_smi || right_side_constant_smi) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side.handle())->value();
- int right_value = Smi::cast(*right_side.handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
+class DeferredSearchCache: public DeferredCode {
+ public:
+ DeferredSearchCache(Register dst,
+ Register cache,
+ Register key,
+ Register scratch)
+ : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
+ set_comment("[ DeferredSearchCache");
+ }
- if (left_side.is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg());
- }
- } else {
- Condition left_is_smi = masm_->CheckSmi(left_side.reg());
- is_smi.Branch(left_is_smi);
+ virtual void Generate();
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- if (!is_loop_condition && right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- not_number.Branch(not_equal, &left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
+ private:
+ Register dst_; // on invocation index of finger (as int32), on exit
+ // holds value being looked up.
+ Register cache_; // instance of JSFunctionResultCache.
+ Register key_; // key being looked up.
+ Register scratch_;
+};
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- is_smi.Bind();
- }
+// Return a position of the element at |index| + |additional_offset|
+// in FixedArray pointer to which is held in |array|. |index| is int32.
+static Operand ArrayElement(Register array,
+ Register index,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index, times_pointer_size, offset);
+}
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test smi equality and comparison by signed int comparison.
- // Both sides are smis, so we can use an Immediate.
- __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- dest->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- dest->false_target()->Branch(is_smi);
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
+void DeferredSearchCache::Generate() {
+ Label first_loop, search_further, second_loop, cache_miss;
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, ¬_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(¬_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
+ Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
+ Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
+ // Check the cache from finger to start of the cache.
+ __ bind(&first_loop);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, kEntriesIndexImm);
+ __ j(less, &search_further);
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
+ __ cmpq(ArrayElement(cache_, dst_), key_);
+ __ j(not_equal, &first_loop);
+
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
+ __ movq(dst_, ArrayElement(cache_, dst_, 1));
+ __ jmp(exit_label());
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
+ __ bind(&search_further);
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
+ // Check the cache from end of cache up to finger.
+ __ SmiToInteger32(dst_,
+ FieldOperand(cache_,
+ JSFunctionResultCache::kCacheSizeOffset));
+ __ SmiToInteger32(scratch_,
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- left_side.ToRegister();
- right_side.ToRegister();
+ __ bind(&second_loop);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, scratch_);
+ __ j(less_equal, &cache_miss);
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
+ __ cmpq(ArrayElement(cache_, dst_), key_);
+ __ j(not_equal, &second_loop);
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
+ __ movq(dst_, ArrayElement(cache_, dst_, 1));
+ __ jmp(exit_label());
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
+ __ bind(&cache_miss);
+ __ push(cache_); // store a reference to cache
+ __ push(key_); // store a key
+ __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ push(key_);
+ // On x64 function must be in rdi.
+ __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
+ ParameterCount expected(1);
+ __ InvokeFunction(rdi, expected, CALL_FUNCTION);
- Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
- is_smi.Branch(both_smi);
+ // Find a place to put new cached value into.
+ Label add_new_entry, update_cache;
+ __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
+ // Possible optimization: cache size is constant for the given cache
+ // so technically we could use a constant here. However, if we have
+ // cache miss this optimization would hardly matter much.
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
+ // Check if we could add new entry to cache.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(r9,
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
+ __ cmpl(rbx, r9);
+ __ j(greater, &add_new_entry);
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
+ // Check if we could evict entry after finger.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ addl(rdx, kEntrySizeImm);
+ Label forward;
+ __ cmpl(rbx, rdx);
+ __ j(greater, &forward);
+ // Need to wrap over the cache.
+ __ movl(rdx, kEntriesIndexImm);
+ __ bind(&forward);
+ __ movl(r9, rdx);
+ __ jmp(&update_cache);
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
+ __ bind(&add_new_entry);
+ // r9 holds cache size as int32.
+ __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
+ // Update the cache itself.
+ // r9 holds the index as int32.
+ __ bind(&update_cache);
+ __ pop(rbx); // restore the key
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
+ // Store key.
+ __ movq(ArrayElement(rcx, r9), rbx);
+ __ RecordWrite(rcx, 0, rbx, r9);
+
+ // Store value.
+ __ pop(rcx); // restore the cache.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ incl(rdx);
+ // Backup rax, because the RecordWrite macro clobbers its arguments.
+ __ movq(rbx, rax);
+ __ movq(ArrayElement(rcx, rdx), rax);
+ __ RecordWrite(rcx, 0, rbx, rdx);
+
+ if (!dst_.is(rax)) {
+ __ movq(dst_, rax);
}
}
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ Top::global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ frame_->Push(Factory::undefined_value());
+ return;
}
- __ bind(&done);
-}
+ Load(args->at(1));
+ Result key = frame_->Pop();
+ key.ToRegister();
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
+ Result cache = allocator()->Allocate();
+ ASSERT(cache.is_valid());
+ __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(cache.reg(),
+ FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
+ __ movq(cache.reg(),
+ ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ movq(cache.reg(),
+ FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- ¬_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- ¬_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
+ Result tmp = allocator()->Allocate();
+ ASSERT(tmp.is_valid());
+
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+
+ DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
+ cache.reg(),
+ key.reg(),
+ scratch.reg());
+
+ const int kFingerOffset =
+ FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
+ // tmp.reg() now holds finger offset as a smi.
+ __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
+ __ cmpq(key.reg(), FieldOperand(cache.reg(),
+ tmp.reg(), times_pointer_size,
+ FixedArray::kHeaderSize));
+ deferred->Branch(not_equal);
+ __ movq(tmp.reg(), FieldOperand(cache.reg(),
+ tmp.reg(), times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+
+ deferred->BindExit();
+ frame_->Push(&tmp);
+}
+
+
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
- not_numbers.Bind(left_side, right_side);
+ NumberToStringStub stub;
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
}
-class DeferredInlineBinaryOperation: public DeferredCode {
+class DeferredSwapElements: public DeferredCode {
public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
}
virtual void Generate();
private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
+ Register object_, index1_, index2_;
};
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
+ ASSERT_EQ(3, args->length());
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
+ Result index2 = frame_->Pop();
+ index2.ToRegister();
+ Result index1 = frame_->Pop();
+ index1.ToRegister();
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
+ Result object = frame_->Pop();
+ object.ToRegister();
+ Result tmp1 = allocator()->Allocate();
+ tmp1.ToRegister();
+ Result tmp2 = allocator()->Allocate();
+ tmp2.ToRegister();
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
+ frame_->Spill(object.reg());
+ frame_->Spill(index1.reg());
+ frame_->Spill(index2.reg());
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
+ DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+ index1.reg(),
+ index2.reg());
- Result right = frame_->Pop();
- Result left = frame_->Pop();
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+ deferred->Branch(below);
+ __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+ Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(not_zero);
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
+ // Check the object's elements are in fast case.
+ __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ deferred->Branch(not_equal);
+
+ // Check that both indices are smis.
+ Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
+ deferred->Branch(NegateCondition(both_smi));
+
+ // Bring addresses into index1 and index2.
+ __ SmiToInteger32(index1.reg(), index1.reg());
+ __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+ index1.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(index2.reg(), index2.reg());
+ __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+ index2.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Swap elements.
+ __ movq(object.reg(), Operand(index1.reg(), 0));
+ __ movq(tmp2.reg(), Operand(index2.reg(), 0));
+ __ movq(Operand(index2.reg(), 0), object.reg());
+ __ movq(Operand(index1.reg(), 0), tmp2.reg());
+
+ Label done;
+ __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ movq(tmp2.reg(), tmp1.reg());
+ RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+ __ CallStub(&recordWrite2);
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
+ __ bind(&done);
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
+ deferred->BindExit();
+ frame_->Push(Factory::undefined_value());
+}
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateCallFunction");
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- }
- }
+ ASSERT(args->length() >= 2);
- answer.set_type_info(result_type);
- frame_->Push(&answer);
+ int n_args = args->length() - 2; // for receiver and function.
+ Load(args->at(0)); // receiver
+ for (int i = 0; i < n_args; i++) {
+ Load(args->at(i + 1));
+ }
+ Load(args->at(n_args + 1)); // function
+ Result result = frame_->CallJSFunction(n_args);
+ frame_->Push(&result);
}
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst. The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name)
- : dst_(dst), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceGetNamedValue");
- }
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Load(args->at(0));
+ Load(args->at(1));
- virtual void Generate();
+ Label allocate_return;
+ // Load the two operands while leaving the values on the frame.
+ frame()->Dup();
+ Result exponent = frame()->Pop();
+ exponent.ToRegister();
+ frame()->Spill(exponent.reg());
+ frame()->PushElementAt(1);
+ Result base = frame()->Pop();
+ base.ToRegister();
+ frame()->Spill(base.reg());
- Label* patch_site() { return &patch_site_; }
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ ASSERT(!exponent.reg().is(base.reg()));
+ JumpTarget call_runtime;
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
-};
+ // Save 1 in xmm3 - we need this several times later on.
+ __ movl(answer.reg(), Immediate(1));
+ __ cvtlsi2sd(xmm3, answer.reg());
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
+ __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(rax)) {
- __ movq(rax, receiver_);
- }
- __ Move(rcx, name_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a test rax instruction to indicate
- // that the inobject property case was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+ // Optimized version when y is an integer.
+ Label powi;
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&powi);
+ // exponent is smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+ // Optimized version of pow if y is an integer.
+ __ bind(&powi);
+ __ SmiToInteger32(exponent.reg(), exponent.reg());
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ movl(base.reg(), exponent.reg());
+ // Get absolute value of exponent.
+ Label no_neg;
+ __ cmpl(exponent.reg(), Immediate(0));
+ __ j(greater_equal, &no_neg);
+ __ negl(exponent.reg());
+ __ bind(&no_neg);
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
+ __ bind(&while_true);
+ __ shrl(exponent.reg(), Immediate(1));
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ testl(exponent.reg(), exponent.reg());
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ // x has the original value of y - if y is negative return 1/result.
+ __ testl(base.reg(), base.reg());
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ movl(answer.reg(), Immediate(0x7FB00000));
+ __ movd(xmm0, answer.reg());
+ __ cvtss2sd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ call_runtime.Branch(equal);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
+ // exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ call_runtime.Branch(parity_even);
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ JumpIfNotSmi(base.reg(), &base_not_smi);
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&handle_special_cases);
+ __ bind(&base_not_smi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+ __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ call_runtime.Branch(greater_equal);
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ Label not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ movl(answer.reg(), Immediate(0xBF000000));
+ __ movd(xmm2, answer.reg());
+ __ cvtss2sd(xmm2, xmm2);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, ¬_minus_half);
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+ // Calculates reciprocal of square root.
+ // Note that 1/sqrt(x) = sqrt(1/x))
+ __ divsd(xmm3, xmm0);
+ __ movsd(xmm1, xmm3);
+ __ sqrtsd(xmm1, xmm1);
+ __ jmp(&allocate_return);
+ // Test for 0.5.
+ __ bind(¬_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ ucomisd(xmm2, xmm1);
+ call_runtime.Branch(not_equal);
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
+ // Calculates square root.
+ __ movsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
+ JumpTarget done;
+ Label failure, success;
+ __ bind(&allocate_return);
+ // Make a copy of the frame to enable us to handle allocation
+ // failure after the JumpTarget jump.
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
+ __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+ // Remove the two original values from the frame - we only need those
+ // in the case where we branch to runtime.
+ frame()->Drop(2);
+ exponent.Unuse();
+ base.Unuse();
+ done.Jump(&answer);
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ // If we experience an allocation failure we branch to runtime.
+ __ bind(&failure);
+ call_runtime.Bind();
+ answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
+ done.Bind(&answer);
+ frame()->Push(&answer);
+}
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- }
- break;
- }
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
+ // Leave original value on the frame if we need to call runtime.
+ frame()->Dup();
+ Result result = frame()->Pop();
+ result.ToRegister();
+ frame()->Spill(result.reg());
+ Label runtime;
+ Label non_smi;
+ Label load_done;
+ JumpTarget end;
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
+ __ JumpIfNotSmi(result.reg(), &non_smi);
+ __ SmiToInteger32(result.reg(), result.reg());
+ __ cvtlsi2sd(xmm0, result.reg());
+ __ jmp(&load_done);
+ __ bind(&non_smi);
+ __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &runtime);
+ __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
+ __ bind(&load_done);
+ __ sqrtsd(xmm0, xmm0);
+ // A copy of the virtual frame to allow us to go to runtime after the
+ // JumpTarget jump.
+ Result scratch = allocator()->Allocate();
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
+ __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+ frame()->Drop(1);
+ scratch.Unuse();
+ end.Jump(&result);
+ // We only branch to runtime if we have an allocation error.
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ __ bind(&runtime);
+ result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
+ end.Bind(&result);
+ frame()->Push(&result);
}
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node)) {
+ return;
}
-}
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
+ if (function == NULL) {
+ // Push the builtins object found in the current global object.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), GlobalObject());
+ __ movq(temp.reg(),
+ FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+ frame_->Push(&temp);
}
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ if (function == NULL) {
+ // Call the JS runtime function.
+ frame_->Push(node->name());
+ Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting_);
+ frame_->RestoreContextRegister();
+ frame_->Push(&answer);
+ } else {
+ // Call the C runtime function.
+ Result answer = frame_->CallRuntime(function, arg_count);
+ frame_->Push(&answer);
}
}
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
- } else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ Comment cmnt(masm_, "[ UnaryOperation");
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
- }
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ // Swap the true and false targets but keep the same actual label
+ // as the fall through.
+ destination()->Invert();
+ LoadCondition(node->expression(), destination(), true);
+ // Swap the labels back.
+ destination()->Invert();
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
}
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(variable->name());
+ Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
} else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ frame_->SetElementAt(0, Factory::true_value());
}
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
-
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->Push(&answer);
- Label do_op;
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- __ movq(answer.reg(), left->reg());
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpq(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
+ } else if (op == Token::VOID) {
+ Expression* expression = node->expression();
+ if (expression && expression->AsLiteral() && (
+ expression->AsLiteral()->IsTrue() ||
+ expression->AsLiteral()->IsFalse() ||
+ expression->AsLiteral()->handle()->IsNumber() ||
+ expression->AsLiteral()->handle()->IsString() ||
+ expression->AsLiteral()->handle()->IsJSRegExp() ||
+ expression->AsLiteral()->IsNull())) {
+ // Omit evaluating the value of the primitive literal.
+ // It will be discarded anyway, and can have no side effect.
+ frame_->Push(Factory::undefined_value());
} else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
+ Load(node->expression());
+ frame_->SetElementAt(0, Factory::undefined_value());
}
- __ bind(&do_op);
- // Perform the operation.
+ } else {
+ bool can_overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ bool no_negative_zero = node->expression()->no_negative_zero();
+ Load(node->expression());
switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- left->reg(),
- rcx,
- deferred->entry_label());
+
+ case Token::SUB: {
+ GenericUnaryOpStub stub(
+ Token::SUB,
+ overwrite,
+ no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
+ Result operand = frame_->Pop();
+ Result answer = frame_->CallStub(&stub, &operand);
+ answer.set_type_info(TypeInfo::Number());
+ frame_->Push(&answer);
break;
}
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- left->reg(),
- rcx);
+
+ case Token::BIT_NOT: {
+ // Smi check.
+ JumpTarget smi_label;
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ smi_label.Branch(is_smi, &operand);
+
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
+ continue_label.Jump(&answer);
+
+ smi_label.Bind(&answer);
+ answer.ToRegister();
+ frame_->Spill(answer.reg());
+ __ SmiNot(answer.reg(), answer.reg());
+ continue_label.Bind(&answer);
+ answer.set_type_info(TypeInfo::Smi());
+ frame_->Push(&answer);
+ break;
+ }
+
+ case Token::ADD: {
+ // Smi check.
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ TypeInfo operand_info = operand.type_info();
+ operand.ToRegister();
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ continue_label.Branch(is_smi, &operand);
+ frame_->Push(&operand);
+ Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+ CALL_FUNCTION, 1);
+
+ continue_label.Bind(&answer);
+ if (operand_info.IsSmi()) {
+ answer.set_type_info(TypeInfo::Smi());
+ } else if (operand_info.IsInteger32()) {
+ answer.set_type_info(TypeInfo::Integer32());
+ } else {
+ answer.set_type_info(TypeInfo::Number());
+ }
+ frame_->Push(&answer);
break;
}
default:
UNREACHABLE();
}
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
}
+}
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number, and call the specialized add
+// or subtract stub. The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+ DeferredPrefixCountOperation(Register dst,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
+ set_comment("[ DeferredCountOperation");
+ }
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
+ virtual void Generate();
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
- }
+ private:
+ Register dst_;
+ bool is_increment_;
+ TypeInfo input_type_;
+};
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
+void DeferredPrefixCountOperation::Generate() {
+ Register left;
+ if (input_type_.IsNumber()) {
+ left = dst_;
+ } else {
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ left = rax;
+ }
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
- default:
- UNREACHABLE();
- break;
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time
- // in the code generator. Unoptimized code is toplevel code or code
- // that is not in a loop.
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test rax instruction following the call signals that the
- // inobject property case was inlined. Ensure that there is not
- // a test rax instruction here.
- __ nop();
- } else {
- // Inline the inobject property case.
- Comment cmnt(masm(), "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged. Call into the runtime
+// to convert the argument to a number. Update the original value in
+// old. Call the specialized add or subtract stub. The result is
+// left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+ DeferredPostfixCountOperation(Register dst,
+ Register old,
+ bool is_increment,
+ TypeInfo input_type)
+ : dst_(dst),
+ old_(old),
+ is_increment_(is_increment),
+ input_type_(input_type) {
+ set_comment("[ DeferredCountOperation");
+ }
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
+ virtual void Generate();
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+ private:
+ Register dst_;
+ Register old_;
+ bool is_increment_;
+ TypeInfo input_type_;
+};
- // Check that the receiver is a heap object.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->Move(kScratchRegister, Factory::null_value());
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed
- // size which allows the assert below to succeed and patching to work.
- // Don't use deferred->Branch(...), since that might add coverage code.
- masm()->j(not_equal, deferred->entry_label());
+void DeferredPostfixCountOperation::Generate() {
+ Register left;
+ if (input_type_.IsNumber()) {
+ __ push(dst_); // Save the input to use as the old value.
+ left = dst_;
+ } else {
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(rax); // Save the result of ToNumber to use as the old value.
+ left = rax;
+ }
- // The delta from the patch label to the load offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force
- // a 32-bit instruction encoding to allow patching with an
- // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
+ GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS,
+ TypeInfo::Number());
+ stub.GenerateCall(masm_, left, Smi::FromInt(1));
- __ IncrementCounter(&Counters::named_load_inline, 1);
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(old_);
}
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- // Allocate the temporary early so that we use rax if it is free.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is returned in elements, which is not shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
+ // Postfix operations need a stack slot under the reference to hold
+ // the old value while the new value is being stored. This is so that
+ // in the case that storing the new value requires a call, the old
+ // value will be in the frame to be spilled.
+ if (is_postfix) frame_->Push(Smi::FromInt(0));
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ // A constant reference is not saved to, so the reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ target.TakeValue();
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use a load
- // from the root array to load null_value, since the load must be patched
- // with the expected receiver map, which is not in the root array.
- masm_->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
+ Result new_value = frame_->Pop();
+ new_value.ToRegister();
+
+ Result old_value; // Only allocated in the postfix case.
+ if (is_postfix) {
+ // Allocate a temporary to preserve the old value.
+ old_value = allocator_->Allocate();
+ ASSERT(old_value.is_valid());
+ __ movq(old_value.reg(), new_value.reg());
+
+ // The return value for postfix operations is ToNumber(input).
+ // Keep more precise type info if the input is some kind of
+ // number already. If the input is not a number we have to wait
+ // for the deferred code to convert it.
+ if (new_value.type_info().IsNumber()) {
+ old_value.set_type_info(new_value.type_info());
+ }
+ }
+ // Ensure the new value is writable.
+ frame_->Spill(new_value.reg());
+
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment,
+ new_value.type_info());
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment,
+ new_value.type_info());
+ }
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+ if (new_value.is_smi()) {
+ if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
+ } else {
+ __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
+ }
+ if (is_increment) {
+ __ SmiAddConstant(new_value.reg(),
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
+ } else {
+ __ SmiSubConstant(new_value.reg(),
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
+ }
+ deferred->BindExit();
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ Assert(equal, "JSObject with fast elements map has slow elements");
+ // Postfix count operations return their input converted to
+ // number. The case when the input is already a number is covered
+ // above in the allocation code for old_value.
+ if (is_postfix && !new_value.type_info().IsNumber()) {
+ old_value.set_type_info(TypeInfo::Number());
}
- // Check that key is within bounds.
- __ SmiCompare(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
+ new_value.set_type_info(TypeInfo::Number());
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- SmiIndex index =
- masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(elements.reg(),
- FieldOperand(elements.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- result = elements;
- __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
+ // Postfix: store the old value in the allocated slot under the
+ // reference.
+ if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
+ frame_->Push(&new_value);
+ // Non-constant: update the reference.
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
}
- ASSERT(frame()->height() == original_height - 2);
- return result;
+
+ // Postfix: drop the new value and use the old.
+ if (is_postfix) frame_->Drop();
}
-#undef __
-#define __ ACCESS_MASM(masm)
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+ // NOTE: If the left hand side produces a materialized value (not
+ // control flow), we force the right hand side to do the same. This
+ // is necessary because we assume that if we get control flow on the
+ // last path out of an expression we got it on all paths.
+ if (node->op() == Token::AND) {
+ JumpTarget is_true;
+ ControlDestination dest(&is_true, destination()->false_target(), true);
+ LoadCondition(node->left(), &dest, false);
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
+ if (dest.false_was_fall_through()) {
+ // The current false target was used as the fall-through. If
+ // there are no dangling jumps to is_true then the left
+ // subexpression was unconditionally false. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_true.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current false target was a forward jump then we have a
+ // valid frame, we have just bound the false target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->false_target()->Unuse();
+ destination()->false_target()->Jump();
+ }
+ is_true.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
+ } else {
+ // We have actually just jumped to or bound the current false
+ // target but the current control destination is not marked as
+ // used.
+ destination()->Use(false);
+ }
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_true
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_true
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&pop_and_continue, &exit, true);
+ ToBoolean(&dest);
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->Push(&result);
- break;
- }
+ // Compile right side expression.
+ is_true.Bind();
+ Load(node->right());
- case KEYED: {
- // A load of a bare identifier (load from global) cannot be keyed.
- ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- break;
+ // Exit (always with a materialized value).
+ exit.Bind();
}
- default:
- UNREACHABLE();
- }
+ } else {
+ ASSERT(node->op() == Token::OR);
+ JumpTarget is_false;
+ ControlDestination dest(destination()->true_target(), &is_false, false);
+ LoadCondition(node->left(), &dest, false);
- if (!persist_after_get_) {
- set_unloaded();
- }
-}
+ if (dest.true_was_fall_through()) {
+ // The current true target was used as the fall-through. If
+ // there are no dangling jumps to is_false then the left
+ // subexpression was unconditionally true. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_false.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current true target was a forward jump then we have a
+ // valid frame, we have just bound the true target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->true_target()->Unuse();
+ destination()->true_target()->Jump();
+ }
+ is_false.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
+ } else {
+ // We have just jumped to or bound the current true target but
+ // the current control destination is not marked as used.
+ destination()->Use(true);
+ }
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_false
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), destination(), false);
-void Reference::TakeValue() {
- // TODO(X64): This function is completely architecture independent. Move
- // it somewhere shared.
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_false
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&exit, &pop_and_continue, false);
+ ToBoolean(&dest);
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
- // Only non-constant, frame-allocated parameters and locals can reach
- // here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
+ // Compile right side expression.
+ is_false.Bind();
+ Load(node->right());
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+ }
}
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
- break;
+ if (node->op() == Token::AND || node->op() == Token::OR) {
+ GenerateLogicalBooleanOperation(node);
+ } else {
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_RIGHT;
}
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- cgen_->frame()->Push(GetName());
- Result answer = cgen_->frame()->CallStoreIC();
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
+ if (node->left()->IsTrivial()) {
+ Load(node->right());
+ Result right = frame_->Pop();
+ frame_->Push(node->left());
+ frame_->Push(&right);
+ } else {
+ Load(node->left());
+ Load(node->right());
}
+ GenericBinaryOperation(node, overwrite_mode);
+ }
+}
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- // Generate inlined version of the keyed store if the code is in
- // a loop and the key is likely to be a smi.
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
- StaticType* key_smi_analysis = property->key()->type();
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->PushFunction();
+}
- if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
- Comment cmnt(masm, "[ Inlined store to keyed Property");
- // Get the receiver, key and value into registers.
- Result value = cgen_->frame()->Pop();
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
- Result tmp = cgen_->allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = cgen_->allocator_->Allocate();
- ASSERT(tmp2.is_valid());
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+ // To make typeof testing for natives implemented in JavaScript really
+ // efficient, we generate special code for expressions of the form:
+ // 'typeof <expression> == <string>'.
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
- // Determine whether the value is a constant before putting it
- // in a register.
- bool value_is_constant = value.is_constant();
+ // Load the operand and move it to a register.
+ LoadTypeofExpression(operation->expression());
+ Result answer = frame_->Pop();
+ answer.ToRegister();
- // Make sure that value, key and receiver are in registers.
- value.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
+ if (check->Equals(Heap::number_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->true_target()->Branch(is_smi);
+ frame_->Spill(answer.reg());
+ __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
+ answer.Unuse();
+ destination()->Split(equal);
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(value.reg(),
- key.reg(),
- receiver.reg());
+ } else if (check->Equals(Heap::string_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
- // Check that the receiver is not a smi.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ // It can be an undetectable string object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
+ answer.Unuse();
+ destination()->Split(below); // Unsigned byte comparison needed.
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ JumpIfNotSmi(key.reg(), deferred->entry_label());
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg());
- }
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
+ destination()->true_target()->Branch(equal);
+ __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
+ answer.Unuse();
+ destination()->Split(equal);
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
- deferred->Branch(not_equal);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
+ destination()->true_target()->Branch(equal);
- // Check that the key is within bounds. Both the key and the
- // length of the JSArray are smis. Use unsigned comparison to handle
- // negative keys.
- __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
- key.reg());
- deferred->Branch(below_equal);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
- // Get the elements array from the receiver and check that it
- // is a flat array (not a dictionary).
- __ movq(tmp.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ answer.Unuse();
+ destination()->Split(not_zero);
- // Check whether it is possible to omit the write barrier. If the
- // elements array is in new space or the value written is a smi we can
- // safely update the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ JumpIfNotSmi(value.reg(), deferred->entry_label());
- }
+ } else if (check->Equals(Heap::function_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
+ frame_->Spill(answer.reg());
+ __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
+ answer.Unuse();
+ destination()->Split(equal);
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the
- // fixed array map comparison. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call
- // in the deferred code which will allow the debugger to
- // break for fast case stores.
- __ bind(deferred->patch_site());
- // Avoid using __ to ensure the distance from patch_site
- // to the map address is always the same.
- masm->movq(kScratchRegister, Factory::fixed_array_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
+ } else if (check->Equals(Heap::object_symbol())) {
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
+ destination()->true_target()->Branch(equal);
- // Store the value.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(FieldOperand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize),
- value.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ destination()->false_target()->Branch(equal);
- deferred->BindExit();
+ // It can be an undetectable object.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(below);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ answer.Unuse();
+ destination()->Split(below_equal);
+ } else {
+ // Uncommon case: typeof testing against a string literal that is
+ // never returned from the typeof operator.
+ answer.Unuse();
+ destination()->Goto(false);
+ }
+ return;
+ }
- cgen_->frame()->Push(&value);
- } else {
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- masm->nop();
- cgen_->frame()->Push(&answer);
- }
- set_unloaded();
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN: {
+ Load(left);
+ Load(right);
+ Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+ frame_->Push(&answer); // push the result
+ return;
+ }
+ case Token::INSTANCEOF: {
+ Load(left);
+ Load(right);
+ InstanceofStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ answer.ToRegister();
+ __ testq(answer.reg(), answer.reg());
+ answer.Unuse();
+ destination()->Split(zero);
+ return;
}
-
default:
UNREACHABLE();
}
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+ if (left->IsTrivial()) {
+ Load(right);
+ Result right_result = frame_->Pop();
+ frame_->Push(left);
+ frame_->Push(&right_result);
+ } else {
+ Load(left);
+ Load(right);
+ }
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+ Comparison(node, cc, strict, destination());
+}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+ && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+ && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+ && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+ && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+ && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+ && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+ && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+ && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+ && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
+#endif
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst. The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+ DeferredReferenceGetNamedValue(Register dst,
+ Register receiver,
+ Handle<String> name)
+ : dst_(dst), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceGetNamedValue");
+ }
- // Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+ virtual void Generate();
- // Setup the fixed slots.
- __ xor_(rbx, rbx); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+ Label* patch_site() { return &patch_site_; }
- // Copy the global object from the surrounding context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Handle<String> name_;
+};
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
+void DeferredReferenceGetNamedValue::Generate() {
+ if (!receiver_.is(rax)) {
+ __ movq(rax, receiver_);
+ }
+ __ Move(rcx, name_);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a test rax instruction to indicate
+ // that the inobject property case was inlined.
+ //
+ // Store the delta to the map check instruction here in the test
+ // instruction. Use masm_-> instead of the __ macro since the
+ // latter can't return a value.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key)
+ : dst_(dst), receiver_(receiver), key_(key) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
+ virtual void Generate();
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+ Label* patch_site() { return &patch_site_; }
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
+};
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
+void DeferredReferenceGetKeyedValue::Generate() {
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rax)) {
+ __ movq(rax, key_);
+ } // else do nothing.
+ } else if (receiver_.is(rax)) {
+ if (key_.is(rdx)) {
+ __ xchg(rax, rdx);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rax, key_);
}
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rax, key_);
+ __ movq(rdx, receiver_);
+ }
+ // Calculate the delta from the IC call instruction to the map check
+ // movq instruction in the inlined version. This delta is stored in
+ // a test(rax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the movq instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ // TODO(X64): Consider whether it's worth switching the test to a
+ // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+ // be generated normally.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
+ }
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ virtual void Generate();
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
+ Label* patch_site() { return &patch_site_; }
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, ¬_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
- __ bind(¬_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
- // These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
- // Fall through to |true_result|.
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Move value, receiver, and key to registers rax, rdx, and rcx, as
+ // the IC stub expects.
+ // Move value to rax, using xchg if the receiver or key is in rax.
+ if (!value_.is(rax)) {
+ if (!receiver_.is(rax) && !key_.is(rax)) {
+ __ movq(rax, value_);
+ } else {
+ __ xchg(rax, value_);
+ // Update receiver_ and key_ if they are affected by the swap.
+ if (receiver_.is(rax)) {
+ receiver_ = value_;
+ } else if (receiver_.is(value_)) {
+ receiver_ = rax;
+ }
+ if (key_.is(rax)) {
+ key_ = value_;
+ } else if (key_.is(value_)) {
+ key_ = rax;
+ }
+ }
+ }
+ // Value is now in rax. Its original location is remembered in value_,
+ // and the value is restored to value_ before returning.
+ // The variables receiver_ and key_ are not preserved.
+ // Move receiver and key to rdx and rcx, swapping if necessary.
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rcx)) {
+ __ movq(rcx, key_);
+ } // Else everything is already in the right place.
+ } else if (receiver_.is(rcx)) {
+ if (key_.is(rdx)) {
+ __ xchg(rcx, rdx);
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rcx, key_);
+ }
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rcx, key_);
+ __ movq(rdx, receiver_);
+ }
- // Return 1/0 for true/false in rax.
- __ bind(&true_result);
- __ movq(rax, Immediate(1));
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ xor_(rax, rax);
- __ ret(1 * kPointerSize);
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instructions (initial movq)
+ // to the test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC).
+ if (!value_.is(rax)) __ movq(value_, rax);
}
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left + right) >= 0) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time
+ // in the code generator. Unoptimized code is toplevel code or code
+ // that is not in a loop.
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ Comment cmnt(masm(), "[ Load from named Property");
+ frame()->Push(name);
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object == Heap::undefined_value()) {
- return false;
+ RelocInfo::Mode mode = is_contextual
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ result = frame()->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the
+ // inobject property case was inlined. Ensure that there is not
+ // a test rax instruction here.
+ __ nop();
+ } else {
+ // Inline the inobject property case.
+ Comment cmnt(masm(), "[ Inlined named property load");
+ Result receiver = frame()->Pop();
+ receiver.ToRegister();
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+
+ // Cannot use r12 for receiver, because that changes
+ // the distance between a call and a fixup location,
+ // due to a special encoding of r12 as r/m in a ModR/M byte.
+ if (receiver.reg().is(r12)) {
+ frame()->Spill(receiver.reg()); // It will be overwritten with result.
+ // Swap receiver and value.
+ __ movq(result.reg(), receiver.reg());
+ Result temp = receiver;
+ receiver = result;
+ result = temp;
+ }
+
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+ // Check that the receiver is a heap object.
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+ __ bind(deferred->patch_site());
+ // This is the map check instruction that will be patched (so we can't
+ // use the double underscore macro that may insert instructions).
+ // Initially use an invalid map to force a failure.
+ masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ // This branch is always a forwards branch so it's always a fixed
+ // size which allows the assert below to succeed and patching to work.
+ // Don't use deferred->Branch(...), since that might add coverage code.
+ masm()->j(not_equal, deferred->entry_label());
+
+ // The delta from the patch label to the load offset must be
+ // statically known.
+ ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+ LoadIC::kOffsetToLoadInstruction);
+ // The initial (invalid) offset has to be large enough to force
+ // a 32-bit instruction encoding to allow patching with an
+ // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
+
+ __ IncrementCounter(&Counters::named_load_inline, 1);
+ deferred->BindExit();
}
- frame_->Push(Handle<Object>(answer_object));
- return true;
+ ASSERT(frame()->height() == original_height - 1);
+ return result;
}
-// End of CodeGenerator implementation.
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ // Allocate the temporary early so that we use rax if it is free.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ // If key and receiver are shared registers on the frame, their values will
+ // be automatically saved and restored when going to deferred code.
+ // The result is returned in elements, which is not shared.
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(elements.reg(),
+ receiver.reg(),
+ key.reg());
+
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+ // Check that the receiver has the expected map.
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching. Do not use a load
+ // from the root array to load null_value, since the load must be patched
+ // with the expected receiver map, which is not in the root array.
+ masm_->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ Assert(equal, "JSObject with fast elements map has slow elements");
+ }
+
+ // Check that key is within bounds.
+ __ SmiCompare(key.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ SmiIndex index =
+ masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+ __ movq(elements.reg(),
+ FieldOperand(elements.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ result = elements;
+ __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
}
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
+ ASSERT(frame()->height() == original_height - 2);
+ return result;
+}
- __ bind(&cache_miss);
- // Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+#undef __
+#define __ ACCESS_MASM(masm)
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
+
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
}
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+void Reference::GetValue() {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
+
+ // Record the source position for the property load.
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+ break;
+ }
+
+ case NAMED: {
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ bool is_global = var != NULL;
+ ASSERT(!is_global || var->is_global());
+ if (persist_after_get_) {
+ cgen_->frame()->Dup();
+ }
+ Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+ cgen_->frame()->Push(&result);
+ break;
+ }
+
+ case KEYED: {
+ // A load of a bare identifier (load from global) cannot be keyed.
+ ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
+ if (persist_after_get_) {
+ cgen_->frame()->PushElementAt(1);
+ cgen_->frame()->PushElementAt(1);
+ }
+ Result value = cgen_->EmitKeyedLoad();
+ cgen_->frame()->Push(&value);
+ break;
+ }
+
default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
+ UNREACHABLE();
+ }
+
+ if (!persist_after_get_) {
+ set_unloaded();
}
}
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
- // Registers:
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
+void Reference::TakeValue() {
+ // TODO(X64): This function is completely architecture independent. Move
+ // it somewhere shared.
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
+ // For non-constant frame-allocated slots, we invalidate the value in the
+ // slot. For all others, we fall back on GetValue.
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(!is_illegal());
+ if (type_ != SLOT) {
+ GetValue();
+ return;
+ }
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP ||
+ slot->type() == Slot::CONTEXT ||
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
+ GetValue();
+ return;
}
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
+ // Only non-constant, frame-allocated parameters and locals can reach
+ // here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
+ if (slot->type() == Slot::PARAMETER) {
+ cgen_->frame()->TakeParameterAt(slot->index());
+ } else {
+ ASSERT(slot->type() == Slot::LOCAL);
+ cgen_->frame()->TakeLocalAt(slot->index());
}
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
+}
+
+
+void Reference::SetValue(InitState init_state) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
- case TranscendentalCache::COS:
- __ fcos();
+ }
+
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ cgen_->frame()->Push(GetName());
+ Result answer = cgen_->frame()->CallStoreIC();
+ cgen_->frame()->Push(&answer);
+ set_unloaded();
+ break;
+ }
+
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+
+ // Generate inlined version of the keyed store if the code is in
+ // a loop and the key is likely to be a smi.
+ Property* property = expression()->AsProperty();
+ ASSERT(property != NULL);
+ StaticType* key_smi_analysis = property->key()->type();
+
+ if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+ Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ Result value = cgen_->frame()->Pop();
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+
+ Result tmp = cgen_->allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+ Result tmp2 = cgen_->allocator_->Allocate();
+ ASSERT(tmp2.is_valid());
+
+ // Determine whether the value is a constant before putting it
+ // in a register.
+ bool value_is_constant = value.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ value.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(value.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the receiver is not a smi.
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+ // Check that the key is a smi.
+ if (!key.is_smi()) {
+ __ JumpIfNotSmi(key.reg(), deferred->entry_label());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key.reg());
+ }
+
+ // Check that the receiver is a JSArray.
+ __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the
+ // length of the JSArray are smis. Use unsigned comparison to handle
+ // negative keys.
+ __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+ key.reg());
+ deferred->Branch(below_equal);
+
+ // Get the elements array from the receiver and check that it
+ // is a flat array (not a dictionary).
+ __ movq(tmp.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+
+ // Check whether it is possible to omit the write barrier. If the
+ // elements array is in new space or the value written is a smi we can
+ // safely update the elements array without write barrier.
+ Label in_new_space;
+ __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+ if (!value_is_constant) {
+ __ JumpIfNotSmi(value.reg(), deferred->entry_label());
+ }
+
+ __ bind(&in_new_space);
+ // Bind the deferred code patch site to be able to locate the
+ // fixed array map comparison. When debugging, we patch this
+ // comparison to always fail so that we will hit the IC call
+ // in the deferred code which will allow the debugger to
+ // break for fast case stores.
+ __ bind(deferred->patch_site());
+ // Avoid using __ to ensure the distance from patch_site
+ // to the map address is always the same.
+ masm->movq(kScratchRegister, Factory::fixed_array_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ SmiIndex index =
+ masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+ __ movq(FieldOperand(tmp.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize),
+ value.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+
+ cgen_->frame()->Push(&value);
+ } else {
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ masm->nop();
+ cgen_->frame()->Push(&answer);
+ }
+ set_unloaded();
break;
+ }
+
default:
UNREACHABLE();
}
- __ bind(&done);
}
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in rsi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
+ // Get the function info from the stack.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
+ __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
+ __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(rcx); // Temporarily remove return address.
+ __ pop(rdx);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+ // Setup the fixed slots.
+ __ xor_(rbx, rbx); // Set to NULL.
+ __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+
+ // Copy the global object from the surrounding context.
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
}
- __ bind(&done);
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: constant elements.
+ // [rsp + (2 * kPointerSize)]: literal index.
+ // [rsp + (3 * kPointerSize)]: literals array.
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
+ // Load boilerplate object into rcx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- if (negative_zero_ == kStrictNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
- } else {
- __ jmp(&slow);
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
}
+ }
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
}
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- }
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
+ // 'null' => false.
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
+ // Get the map and type of the heap object.
+ // We don't use CmpObjectType because we manipulate the type field.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ // Undetectable => false.
+ __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result);
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
+ // JavaScript object => true.
+ __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(above_equal, &true_result);
- // Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
+ // String value => false iff empty.
+ __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ __ j(above_equal, ¬_string);
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rdx);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
+ __ bind(¬_string);
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result);
+ // HeapNumber => false iff +0, -0, or NaN.
+ // These three cases set the zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
- Label runtime;
+ // Return 1/0 for true/false in rax.
+ __ bind(&true_result);
+ __ movq(rax, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ xor_(rax, rax);
+ __ ret(1 * kPointerSize);
+}
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ }
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
- // rcx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
+ // Call the stub.
+ __ CallStub(this);
+}
- // rcx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
- __ j(above, &runtime);
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
- __ j(above_equal, &runtime);
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ movq(right_arg, right);
+ __ Move(left_arg, left);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
+ // Call the stub.
+ __ CallStub(this);
+}
- // rcx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- ASSERT(kExternalStringTag !=0);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
- __ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
- __ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
- __ jmp(&check_code);
- __ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
+ // dividend in rax and rdx free for the division. Use rax, rbx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
+ Label not_smis;
+ // 2. Smi check both operands.
+ if (static_operands_type_.IsSmi()) {
+ // Skip smi check if we know that both arguments are smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ // Handle OR here, since we do extra smi-checking in the or code below.
+ __ SmiOr(right, right, left);
+ GenerateReturn(masm);
+ return;
+ }
+ } else {
+ if (op_ != Token::BIT_OR) {
+ // Skip the check for OR as it is better combined with the
+ // actual operation.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, ¬_smis);
+ }
+ }
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::ADD: {
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+ }
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ case Token::SUB: {
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+ }
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+ case Token::DIV:
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
+ break;
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- Immediate(1));
+ case Token::MOD:
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
+ break;
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
-#endif
+ case Token::BIT_OR:
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_smis);
+ break;
- // Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
-#endif
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
+ case Token::BIT_XOR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
- // Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ switch (op_) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, slow);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // 4. Emit return of result in rax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ ASSERT(use_fp_on_smis.is_linked());
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
- __ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
- __ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(¬_smis);
- __ bind(&setup_rest);
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
- // Argument 1: Subject string.
- __ movq(arg1, rax);
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
+ default:
+ break;
+ }
+}
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
- // Check the result.
- Label success;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
- Label failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
- __ ret(4 * kPointerSize);
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
- // rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ Label not_floats;
+ // rax: y
+ // rdx: x
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ __ movq(rax, rdx);
+ break;
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ __ bind(¬_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // A perfect moment to try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
+ } else {
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
+ }
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
- // Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ GenerateReturn(masm);
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
+ // Allocate a heap number if needed.
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ }
- // Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
+ switch (op_) {
+ case Token::ADD: {
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+ if (HasArgsReversed()) {
+ lhs = rax;
+ rhs = rdx;
+ } else {
+ lhs = rdx;
+ rhs = rax;
+ }
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in rdx and rax.
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
+ Condition is_smi;
+ is_smi = masm->CheckSmi(lhs);
+ __ j(is_smi, ¬_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, ¬_string1);
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
- ASSERT_EQ(8, kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, ¬_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
+ __ j(above_equal, ¬_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(¬_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ ASSERT(!HasArgsInRegisters());
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- masm->RecordWriteHelper(object_, addr_, scratch_);
- masm->ret(0);
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
}
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(rcx);
+ if (HasArgsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
}
-void CompareStub::Generate(MacroAssembler* masm) {
- Label check_unequal_objects, done;
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, ¬_identical);
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
- __ bind(&check_for_nan);
- }
+ // Left and right arguments are already on stack.
+ __ pop(rcx); // Save the return address.
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
+ // Push this stub's key.
+ __ Push(Smi::FromInt(MinorKey()));
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, ¬_identical);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ // Although the operation and the type info are encoded into the key,
+ // the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(op_));
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
- }
+ __ Push(Smi::FromInt(runtime_operands_type_));
- __ bind(¬_identical);
- }
+ __ push(rcx); // The return address.
- if (cc_ == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
+ // Perform patching to an appropriate fast case and return the result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
- __ bind(¬_smis);
- }
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kPointerSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&loaded);
- // If the first object is a JS object, we have done pointer comparison.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+ __ bind(&loaded);
+ // ST[0] == double value
+ // rbx = bits of double value.
+ // rdx = also bits of double value.
+ // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
+ // h = h0 = bits ^ (bits >> 32);
+ // h ^= h >> 16;
+ // h ^= h >> 8;
+ // h = h & (cacheSize - 1);
+ // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+ __ sar(rdx, Immediate(32));
+ __ xorl(rdx, rbx);
+ __ movl(rcx, rdx);
+ __ movl(rax, rdx);
+ __ movl(rdi, rdx);
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
+ __ xorl(rcx, rdx);
+ __ xorl(rax, rdi);
+ __ xorl(rcx, rax);
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // rbx = bits of double value.
+ // rcx = TranscendentalCache::hash(double value).
+ __ movq(rax, ExternalReference::transcendental_cache_array_address());
+ // rax points to cache array.
+ __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // rax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ testq(rax, rax);
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ // Two uint_32's and a pointer per element.
+ CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+ CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+ CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ }
+#endif
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+ __ addl(rcx, rcx);
+ __ lea(rcx, Operand(rax, rcx, times_8, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmpq(rbx, Operand(rcx, 0));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ movq(rax, Operand(rcx, 2 * kIntSize));
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ Label nan_result;
+ GenerateOperation(masm, &nan_result);
+ __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
+ __ bind(&nan_result);
+ __ fstp(0); // Remove argument from FPU stack.
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ ret(kPointerSize);
+}
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rax);
- __ push(rdx);
- __ push(rcx);
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+ Label* on_nan_result) {
+ // Registers:
+ // rbx: Bits of input double. Must be preserved.
+ // rcx: Pointer to cache entry. Must be preserved.
+ // st(0): Input double
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ movq(rdi, rbx);
+ // Move exponent and sign bits to low bits.
+ __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+ // Remove sign bit.
+ __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
+ int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+ __ cmpl(rdi, Immediate(supported_exponent_limit));
+ __ j(below, &in_range);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmpl(rdi, Immediate(0x7ff));
+ __ j(equal, on_nan_result);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
}
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(2 * kPointerSize);
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
}
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ // FPU Stack: input % 2*pi, 2*pi,
+ __ fstp(0);
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, ¬_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, ¬_both_objects);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(2 * kPointerSize); // rax, rdx were pushed
- __ bind(¬_both_objects);
- }
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
- // must swap argument order
- __ pop(rcx);
- __ pop(rdx);
- __ pop(rax);
- __ push(rdx);
- __ push(rax);
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
} else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
}
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ __ bind(&done);
}
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ // Check float operands.
+ Label done;
+ Label rax_is_smi;
+ Label rax_is_object;
+ Label rdx_is_object;
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
+ __ SmiToInteger32(rcx, rax);
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
+ __ bind(&done);
+ __ movl(rax, rdx);
}
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rax, &slow);
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // rdx is function, rax is map.
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, rdx);
- __ bind(&miss);
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rcx, rax);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+}
- __ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- ASSERT_EQ(0, kSmiTag);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
- __ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
+ __ bind(&done);
+}
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
+}
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi, offset));
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+ if (negative_zero_ == kIgnoreNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(equal, &done);
+ }
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ if (negative_zero_ == kStrictNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+ } else {
+ __ jmp(&slow);
+ }
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ }
- // Return and remove the on-stack parameters.
+ // Return from the stub.
__ bind(&done);
- __ ret(3 * kPointerSize);
+ __ StubReturn(1);
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
- StackHandlerConstants::kStateOffset);
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ xor_(rsi, rsi); // tentatively set context pointer to NULL
- Label skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, Operand(rsp, 4 * kPointerSize));
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, Operand(rsp, 6 * kPointerSize));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, Operand(rsp, 4 * kPointerSize));
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
- }
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
- // Check for failure result.
- Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_, result_size_);
- __ ret(0);
+ // Get the length from the frame.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
- // Handling of failure.
- __ bind(&failure_returned);
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testl(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
- // Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
+ // Copy the JS object part.
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
- // Handle normal exception.
- __ jmp(throw_normal_exception);
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
- // Retry.
- __ bind(&retry);
-}
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ SmiTest(rcx);
+ __ j(zero, &done);
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+
+ // Copy the fixed array slots.
+ Label loop;
__ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
__ bind(&done);
+ __ ret(3 * kPointerSize);
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
}
- // Clear the context pointer.
- __ xor_(rsi, rsi);
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
- // Restore registers from handler.
- ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
- StackHandlerConstants::kPCOffset);
- __ ret(0);
-}
+ Label runtime;
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (unsigned comparison).
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ j(above_equal, &runtime);
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+ // rcx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ andb(rbx, Immediate(
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
- __ bind(&receiver_is_js_object);
- }
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+ __ bind(&seq_ascii_string);
+ // rax: subject string (sequential ascii)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
+ __ bind(&seq_two_byte_string);
+ // rax: subject string (flat two-byte)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r11: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, result_size_);
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ j(zero, &setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
+ // Locate the code entry and call it.
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- UNREACHABLE();
-}
+ // Check the result.
+ Label success;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
+ __ bind(&done);
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, ¬_outermost_js);
- __ movq(rax, rbp);
- __ store_rax(js_entry_sp);
- __ bind(¬_outermost_js);
-#endif
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
- __ jmp(&exit);
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
+ __ subq(mask, Immediate(1)); // Make mask.
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object, Factory::heap_number_map(), not_found, true);
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
+ ASSERT_EQ(8, kDoubleSize);
+ __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
- // Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Register index = scratch;
+ Register probe = mask;
+ __ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
+ __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
- __ j(not_equal, ¬_outermost_js_2);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(¬_outermost_js_2);
-#endif
+ __ bind(&is_smi);
+ __ SmiToInteger32(scratch, object);
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
+
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask) {
+ __ and_(hash, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ __ shl(hash, Immediate(kPointerSizeLog2 + 1));
}
-// -----------------------------------------------------------------------------
-// Implementation of stubs.
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
-// Stub classes have public member named masm, not masm_.
+ __ movq(rbx, Operand(rsp, kPointerSize));
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+ __ ret(1 * kPointerSize);
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
}
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label check_unequal_objects, done;
+ // The compare stub returns a positive, negative, or zero 64-bit integer
+ // value in rax, corresponding to result of comparing the two inputs.
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+ // Two identical objects are equal unless they are both NaN or undefined.
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, ¬_identical);
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check_for_nan);
+ __ Set(rax, NegativeComparisonResult(cc_));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
- __ bind(&done);
-}
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(rax, EQUAL);
+ __ ret(0);
+ } else {
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
+ }
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ bind(¬_identical);
+ }
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal. ebx (the lower half of rbx) is not zero.
+ __ movq(rax, rbx);
+ __ ret(0);
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
+ __ bind(¬_smis);
+ }
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
+ // If the first object is a JS object, we have done pointer comparison.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (eax (not rax) is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rax);
+ __ push(rdx);
+ __ push(rcx);
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, rdx);
-}
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
+ }
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax (not rax) already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
+ __ bind(&check_for_strings);
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
- __ bind(&done);
- __ movl(rax, rdx);
-}
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, ¬_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
}
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
+ // must swap argument order
+ __ pop(rcx);
+ __ pop(rdx);
+ __ pop(rax);
+ __ push(rdx);
+ __ push(rax);
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ builtin = Builtins::COMPARE;
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
}
- // Call the stub.
- __ CallStub(this);
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
}
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
- // Call the stub.
- __ CallStub(this);
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ Push(Smi::FromInt(0));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
}
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
- // Call the stub.
- __ CallStub(this);
-}
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
+ __ bind(&receiver_is_js_object);
}
-}
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
- } else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, ¬_smis);
- }
- }
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
- break;
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ __ pop(rcx);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ pop(rbp); // pop frame pointer
+ __ pop(rdx); // remove state
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
- break;
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(rsi, rsi); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmpq(rbp, Immediate(0));
+ __ j(equal, &skip);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+ __ ret(0);
+}
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis);
- break;
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ UNREACHABLE();
+}
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // rax: result parameter for PerformGC, if any.
+ // rbx: pointer to C function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
+ // This pointer is reused in LeaveExitFrame(), so it is stored in a
+ // callee-saved register.
- default:
- UNREACHABLE();
- break;
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
}
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack is known to be aligned. This function takes one argument which is
+ // passed in register.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // _WIN64
+ __ movq(rdi, rax);
+#endif
+ __ movq(kScratchRegister,
+ FUNCTION_ADDR(Runtime::PerformGC),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(kScratchRegister);
+ }
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ incl(Operand(kScratchRegister, 0));
}
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(¬_smis);
+ // Call C function.
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+ __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
+ if (result_size_ < 2) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax).
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ } else {
+ ASSERT_EQ(2, result_size_);
+ // Pass a pointer to the result location as the first argument.
+ __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+ }
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- break;
+#else // _WIN64
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+ __ movq(rdi, r14); // argc.
+ __ movq(rsi, r12); // argv.
+#endif
+ __ call(rbx);
+ // Result is in rax - do not destroy this register!
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
- break;
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ decl(Operand(kScratchRegister, 0));
+ }
- default:
- break;
+ // Check for failure result.
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+ // If return value is on the stack, pop it to registers.
+ if (result_size_ > 1) {
+ ASSERT_EQ(2, result_size_);
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
+ __ movq(rax, Operand(rsp, 6 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 7 * kPointerSize));
}
-}
+#endif
+ __ lea(rcx, Operand(rax, 1));
+ // Lower 2 bits of rcx are 0 iff rax has failure tag.
+ __ testl(rcx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned);
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(mode_, result_size_);
+ __ ret(0);
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
+ // Handling of failure.
+ __ bind(&failure_returned);
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry);
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
+ // Special handling of out of memory exceptions.
+ __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(equal, throw_out_of_memory_exception);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(¬_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ movq(rax, Operand(kScratchRegister, 0));
+ __ movq(rdx, ExternalReference::the_hole_value_location());
+ __ movq(rdx, Operand(rdx, 0));
+ __ movq(Operand(kScratchRegister, 0), rdx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ __ j(equal, throw_termination_exception);
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
+ // Retry.
+ __ bind(&retry);
+}
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ movq(rsp, Operand(rsp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ movq(kScratchRegister, handler_address);
+ __ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ movq(rax, Immediate(false));
+ __ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ store_rax(pending_exception);
}
- switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
+ // Clear the context pointer.
+ __ xor_(rsi, rsi);
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
+ // Restore registers from handler.
+ ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ StackHandlerConstants::kFPOffset);
+ __ pop(rbp); // FP
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ __ pop(rdx); // State
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
+ __ ret(0);
+}
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, ¬_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, ¬_string1);
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(mode_, result_size_);
+
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: argv pointer (C callee-saved).
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ movq(rax, failure, RelocInfo::NONE);
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, ¬_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(¬_outermost_js);
+#endif
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // First argument was not a string, test second.
- __ bind(¬_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, ¬_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, ¬_strings);
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
- __ bind(¬_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
}
-}
+ __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ call(kScratchRegister);
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, ¬_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(¬_outermost_js_2);
+#endif
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
+ __ pop(rsi);
+ __ pop(rdi);
+#endif
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
}
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
-}
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+ // Returns a bitwise zero to indicate that the value
+ // is and instance of the function and anything else to
+ // indicate that the value is not an instance.
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ JumpIfSmi(rax, &slow);
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ // rdx is function, rax is map.
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(rbx, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
- __ Push(Smi::FromInt(runtime_operands_type_));
+ // Register mapping:
+ // rax is object map.
+ // rdx is function.
+ // rbx is function prototype.
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ push(rcx); // The return address.
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ // The code at is_not_instance assumes that kScratchRegister contains a
+ // non-zero GCable value (the null object in this case).
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ ASSERT_EQ(0, kSmiTag);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+ __ bind(&is_not_instance);
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
#undef __
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ masm->RecordWriteHelper(object_, addr_, scratch_);
+ masm->ret(0);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64