1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "bootstrapper.h"
31 #include "codegen-inl.h"
36 #include "regexp-macro-assembler.h"
37 #include "register-allocator-inl.h"
39 #include "virtual-frame-inl.h"
44 #define __ ACCESS_MASM(masm_)
46 // -------------------------------------------------------------------------
47 // Platform-specific DeferredCode functions.
49 void DeferredCode::SaveRegisters() {
50 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
51 int action = registers_[i];
52 if (action == kPush) {
53 __ push(RegisterAllocator::ToRegister(i));
54 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
55 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
61 void DeferredCode::RestoreRegisters() {
62 // Restore registers in reverse order due to the stack.
63 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
64 int action = registers_[i];
65 if (action == kPush) {
66 __ pop(RegisterAllocator::ToRegister(i));
67 } else if (action != kIgnore) {
68 action &= ~kSyncedFlag;
69 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
75 // -------------------------------------------------------------------------
76 // CodeGenState implementation.
78 CodeGenState::CodeGenState(CodeGenerator* owner)
82 owner_->set_state(this);
86 CodeGenState::CodeGenState(CodeGenerator* owner,
87 ControlDestination* destination)
89 destination_(destination),
90 previous_(owner->state()) {
91 owner_->set_state(this);
95 CodeGenState::~CodeGenState() {
96 ASSERT(owner_->state() == this);
97 owner_->set_state(previous_);
101 // -------------------------------------------------------------------------
102 // Deferred code objects
104 // These subclasses of DeferredCode add pieces of code to the end of generated
105 // code. They are branched to from the generated code, and
106 // keep some slower code out of the main body of the generated code.
107 // Many of them call a code stub or a runtime function.
109 class DeferredInlineSmiAdd: public DeferredCode {
111 DeferredInlineSmiAdd(Register dst,
113 OverwriteMode overwrite_mode)
114 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
115 set_comment("[ DeferredInlineSmiAdd");
118 virtual void Generate();
123 OverwriteMode overwrite_mode_;
127 // The result of value + src is in dst. It either overflowed or was not
128 // smi tagged. Undo the speculative addition and call the appropriate
129 // specialized stub for add. The result is left in dst.
130 class DeferredInlineSmiAddReversed: public DeferredCode {
132 DeferredInlineSmiAddReversed(Register dst,
134 OverwriteMode overwrite_mode)
135 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
136 set_comment("[ DeferredInlineSmiAddReversed");
139 virtual void Generate();
144 OverwriteMode overwrite_mode_;
148 class DeferredInlineSmiSub: public DeferredCode {
150 DeferredInlineSmiSub(Register dst,
152 OverwriteMode overwrite_mode)
153 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
154 set_comment("[ DeferredInlineSmiSub");
157 virtual void Generate();
162 OverwriteMode overwrite_mode_;
166 // Call the appropriate binary operation stub to compute src op value
167 // and leave the result in dst.
168 class DeferredInlineSmiOperation: public DeferredCode {
170 DeferredInlineSmiOperation(Token::Value op,
174 OverwriteMode overwrite_mode)
179 overwrite_mode_(overwrite_mode) {
180 set_comment("[ DeferredInlineSmiOperation");
183 virtual void Generate();
190 OverwriteMode overwrite_mode_;
194 class FloatingPointHelper : public AllStatic {
196 // Code pattern for loading a floating point value. Input value must
197 // be either a smi or a heap number object (fp value). Requirements:
198 // operand on TOS+1. Returns operand as floating point number on FPU
200 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
202 // Code pattern for loading a floating point value. Input value must
203 // be either a smi or a heap number object (fp value). Requirements:
204 // operand in src register. Returns operand as floating point number
206 static void LoadFloatOperand(MacroAssembler* masm,
210 // Code pattern for loading floating point values. Input values must
211 // be either smi or heap number objects (fp values). Requirements:
212 // operand_1 in rdx, operand_2 in rax; Returns operands as
213 // floating point numbers in XMM registers.
214 static void LoadFloatOperands(MacroAssembler* masm,
218 // Similar to LoadFloatOperands, assumes that the operands are smis.
219 static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
223 // Code pattern for loading floating point values onto the fp stack.
224 // Input values must be either smi or heap number objects (fp values).
226 // Register version: operands in registers lhs and rhs.
227 // Stack version: operands on TOS+1 and TOS+2.
228 // Returns operands as floating point numbers on fp stack.
229 static void LoadFloatOperands(MacroAssembler* masm,
233 // Test if operands are smi or number objects (fp). Requirements:
234 // operand_1 in rax, operand_2 in rdx; falls through on float or smi
235 // operands, jumps to the non_float label otherwise.
236 static void CheckNumberOperands(MacroAssembler* masm,
239 // Takes the operands in rdx and rax and loads them as integers in rax
241 static void LoadAsIntegers(MacroAssembler* masm,
243 Label* operand_conversion_failure);
247 // -----------------------------------------------------------------------------
248 // CodeGenerator implementation.
250 CodeGenerator::CodeGenerator(MacroAssembler* masm)
258 function_return_is_shadowed_(false),
259 in_spilled_code_(false) {
263 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
264 // Call the runtime to declare the globals. The inevitable call
265 // will sync frame elements to memory anyway, so we do it eagerly to
266 // allow us to push the arguments directly into place.
267 frame_->SyncRange(0, frame_->element_count() - 1);
269 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
270 frame_->EmitPush(rsi); // The context is the first argument.
271 frame_->EmitPush(kScratchRegister);
272 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
273 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
274 // Return value is ignored.
278 void CodeGenerator::Generate(CompilationInfo* info) {
279 // Record the position for debugging purposes.
280 CodeForFunctionPosition(info->function());
281 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
285 ASSERT(allocator_ == NULL);
286 RegisterAllocator register_allocator(this);
287 allocator_ = ®ister_allocator;
288 ASSERT(frame_ == NULL);
289 frame_ = new VirtualFrame();
290 set_in_spilled_code(false);
292 // Adjust for function-level loop nesting.
293 loop_nesting_ += info->loop_nesting();
295 JumpTarget::set_compiling_deferred_code(false);
298 if (strlen(FLAG_stop_at) > 0 &&
299 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
305 // New scope to get automatic timing calculation.
307 HistogramTimerScope codegen_timer(&Counters::code_generation);
308 CodeGenState state(this);
311 // Stack: receiver, arguments, return address.
312 // rbp: caller's frame pointer
313 // rsp: stack pointer
314 // rdi: called JS function
315 // rsi: callee's context
316 allocator_->Initialize();
318 if (info->mode() == CompilationInfo::PRIMARY) {
321 // Allocate space for locals and initialize them.
322 frame_->AllocateStackSlots();
324 // Allocate the local context if needed.
325 int heap_slots = scope()->num_heap_slots();
326 if (heap_slots > 0) {
327 Comment cmnt(masm_, "[ allocate local context");
328 // Allocate local context.
329 // Get outer context and create a new context based on it.
330 frame_->PushFunction();
332 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
333 FastNewContextStub stub(heap_slots);
334 context = frame_->CallStub(&stub, 1);
336 context = frame_->CallRuntime(Runtime::kNewContext, 1);
339 // Update context local.
340 frame_->SaveContextRegister();
342 // Verify that the runtime call result and rsi agree.
343 if (FLAG_debug_code) {
344 __ cmpq(context.reg(), rsi);
345 __ Assert(equal, "Runtime::NewContext should end up in rsi");
349 // TODO(1241774): Improve this code:
350 // 1) only needed if we have a context
351 // 2) no need to recompute context ptr every single time
352 // 3) don't copy parameter operand code from SlotOperand!
354 Comment cmnt2(masm_, "[ copy context parameters into .context");
355 // Note that iteration order is relevant here! If we have the same
356 // parameter twice (e.g., function (x, y, x)), and that parameter
357 // needs to be copied into the context, it must be the last argument
358 // passed to the parameter that needs to be copied. This is a rare
359 // case so we don't check for it, instead we rely on the copying
360 // order: such a parameter is copied repeatedly into the same
361 // context location and thus the last value is what is seen inside
363 for (int i = 0; i < scope()->num_parameters(); i++) {
364 Variable* par = scope()->parameter(i);
365 Slot* slot = par->slot();
366 if (slot != NULL && slot->type() == Slot::CONTEXT) {
367 // The use of SlotOperand below is safe in unspilled code
368 // because the slot is guaranteed to be a context slot.
370 // There are no parameters in the global scope.
371 ASSERT(!scope()->is_global_scope());
372 frame_->PushParameterAt(i);
373 Result value = frame_->Pop();
376 // SlotOperand loads context.reg() with the context object
377 // stored to, used below in RecordWrite.
378 Result context = allocator_->Allocate();
379 ASSERT(context.is_valid());
380 __ movq(SlotOperand(slot, context.reg()), value.reg());
381 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
382 Result scratch = allocator_->Allocate();
383 ASSERT(scratch.is_valid());
384 frame_->Spill(context.reg());
385 frame_->Spill(value.reg());
386 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
391 // Store the arguments object. This must happen after context
392 // initialization because the arguments object may be stored in
394 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
395 StoreArgumentsObject(true);
398 // Initialize ThisFunction reference if present.
399 if (scope()->is_function_scope() && scope()->function() != NULL) {
400 frame_->Push(Factory::the_hole_value());
401 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
404 // When used as the secondary compiler for splitting, rbp, rsi,
405 // and rdi have been pushed on the stack. Adjust the virtual
406 // frame to match this state.
408 allocator_->Unuse(rdi);
410 // Bind all the bailout labels to the beginning of the function.
411 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
412 for (int i = 0; i < bailouts->length(); i++) {
413 __ bind(bailouts->at(i)->label());
417 // Initialize the function return target after the locals are set
418 // up, because it needs the expected frame height from the frame.
419 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
420 function_return_is_shadowed_ = false;
422 // Generate code to 'execute' declarations and initialize functions
423 // (source elements). In case of an illegal redeclaration we need to
424 // handle that instead of processing the declarations.
425 if (scope()->HasIllegalRedeclaration()) {
426 Comment cmnt(masm_, "[ illegal redeclarations");
427 scope()->VisitIllegalRedeclaration(this);
429 Comment cmnt(masm_, "[ declarations");
430 ProcessDeclarations(scope()->declarations());
431 // Bail out if a stack-overflow exception occurred when processing
433 if (HasStackOverflow()) return;
437 frame_->CallRuntime(Runtime::kTraceEnter, 0);
438 // Ignore the return value.
442 // Compile the body of the function in a vanilla state. Don't
443 // bother compiling all the code if the scope has an illegal
445 if (!scope()->HasIllegalRedeclaration()) {
446 Comment cmnt(masm_, "[ function body");
448 bool is_builtin = Bootstrapper::IsActive();
450 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
452 frame_->CallRuntime(Runtime::kDebugTrace, 0);
453 // Ignore the return value.
456 VisitStatements(info->function()->body());
458 // Handle the return from the function.
459 if (has_valid_frame()) {
460 // If there is a valid frame, control flow can fall off the end of
461 // the body. In that case there is an implicit return statement.
462 ASSERT(!function_return_is_shadowed_);
463 CodeForReturnPosition(info->function());
464 frame_->PrepareForReturn();
465 Result undefined(Factory::undefined_value());
466 if (function_return_.is_bound()) {
467 function_return_.Jump(&undefined);
469 function_return_.Bind(&undefined);
470 GenerateReturnSequence(&undefined);
472 } else if (function_return_.is_linked()) {
473 // If the return target has dangling jumps to it, then we have not
474 // yet generated the return sequence. This can happen when (a)
475 // control does not flow off the end of the body so we did not
476 // compile an artificial return statement just above, and (b) there
477 // are return statements in the body but (c) they are all shadowed.
479 function_return_.Bind(&return_value);
480 GenerateReturnSequence(&return_value);
485 // Adjust for function-level loop nesting.
486 loop_nesting_ -= info->loop_nesting();
488 // Code generation state must be reset.
489 ASSERT(state_ == NULL);
490 ASSERT(loop_nesting() == 0);
491 ASSERT(!function_return_is_shadowed_);
492 function_return_.Unuse();
495 // Process any deferred code using the register allocator.
496 if (!HasStackOverflow()) {
497 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
498 JumpTarget::set_compiling_deferred_code(true);
500 JumpTarget::set_compiling_deferred_code(false);
503 // There is no need to delete the register allocator, it is a
504 // stack-allocated local.
508 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
509 // The return value is a live (but not currently reference counted)
510 // reference to rax. This is safe because the current frame does not
511 // contain a reference to rax (it is prepared for the return by spilling
514 frame_->Push(return_value);
515 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
517 return_value->ToRegister(rax);
519 // Add a label for checking the size of the code used for returning.
521 Label check_exit_codesize;
522 masm_->bind(&check_exit_codesize);
525 // Leave the frame and return popping the arguments and the
528 masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
529 #ifdef ENABLE_DEBUGGER_SUPPORT
530 // Add padding that will be overwritten by a debugger breakpoint.
531 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
532 // with length 7 (3 + 1 + 3).
533 const int kPadding = Assembler::kJSReturnSequenceLength - 7;
534 for (int i = 0; i < kPadding; ++i) {
537 // Check that the size of the code used for returning matches what is
538 // expected by the debugger.
539 ASSERT_EQ(Assembler::kJSReturnSequenceLength,
540 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
547 bool CodeGenerator::HasValidEntryRegisters() {
548 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
549 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
550 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
551 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
552 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
553 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
554 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
555 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
556 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
557 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
558 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
563 class DeferredReferenceGetKeyedValue: public DeferredCode {
565 explicit DeferredReferenceGetKeyedValue(Register dst,
569 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
570 set_comment("[ DeferredReferenceGetKeyedValue");
573 virtual void Generate();
575 Label* patch_site() { return &patch_site_; }
586 void DeferredReferenceGetKeyedValue::Generate() {
587 __ push(receiver_); // First IC argument.
588 __ push(key_); // Second IC argument.
590 // Calculate the delta from the IC call instruction to the map check
591 // movq instruction in the inlined version. This delta is stored in
592 // a test(rax, delta) instruction after the call so that we can find
593 // it in the IC initialization code and patch the movq instruction.
594 // This means that we cannot allow test instructions after calls to
595 // KeyedLoadIC stubs in other places.
596 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
597 RelocInfo::Mode mode = is_global_
598 ? RelocInfo::CODE_TARGET_CONTEXT
599 : RelocInfo::CODE_TARGET;
601 // The delta from the start of the map-compare instruction to the
602 // test instruction. We use masm_-> directly here instead of the __
603 // macro because the macro sometimes uses macro expansion to turn
604 // into something that can't return a value. This is encountered
605 // when doing generated code coverage tests.
606 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
607 // Here we use masm_-> instead of the __ macro because this is the
608 // instruction that gets patched and coverage code gets in the way.
609 // TODO(X64): Consider whether it's worth switching the test to a
610 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
611 // be generated normally.
612 masm_->testl(rax, Immediate(-delta_to_patch_site));
613 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
615 if (!dst_.is(rax)) __ movq(dst_, rax);
621 class DeferredReferenceSetKeyedValue: public DeferredCode {
623 DeferredReferenceSetKeyedValue(Register value,
626 : value_(value), key_(key), receiver_(receiver) {
627 set_comment("[ DeferredReferenceSetKeyedValue");
630 virtual void Generate();
632 Label* patch_site() { return &patch_site_; }
642 void DeferredReferenceSetKeyedValue::Generate() {
643 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
644 // Push receiver and key arguments on the stack.
647 // Move value argument to eax as expected by the IC stub.
648 if (!value_.is(rax)) __ movq(rax, value_);
650 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
651 __ Call(ic, RelocInfo::CODE_TARGET);
652 // The delta from the start of the map-compare instructions (initial movq)
653 // to the test instruction. We use masm_-> directly here instead of the
654 // __ macro because the macro sometimes uses macro expansion to turn
655 // into something that can't return a value. This is encountered
656 // when doing generated code coverage tests.
657 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
658 // Here we use masm_-> instead of the __ macro because this is the
659 // instruction that gets patched and coverage code gets in the way.
660 masm_->testl(rax, Immediate(-delta_to_patch_site));
661 // Restore value (returned from store IC), key and receiver
663 if (!value_.is(rax)) __ movq(value_, rax);
669 void CodeGenerator::CallApplyLazy(Expression* applicand,
670 Expression* receiver,
671 VariableProxy* arguments,
673 // An optimized implementation of expressions of the form
674 // x.apply(y, arguments).
675 // If the arguments object of the scope has not been allocated,
676 // and x.apply is Function.prototype.apply, this optimization
677 // just copies y and the arguments of the current function on the
678 // stack, as receiver and arguments, and calls x.
679 // In the implementation comments, we call x the applicand
680 // and y the receiver.
681 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
682 ASSERT(arguments->IsArguments());
684 // Load applicand.apply onto the stack. This will usually
685 // give us a megamorphic load site. Not super, but it works.
687 Handle<String> name = Factory::LookupAsciiSymbol("apply");
689 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
691 frame()->Push(&answer);
693 // Load the receiver and the existing arguments object onto the
694 // expression stack. Avoid allocating the arguments object here.
696 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
698 // Emit the source position information after having loaded the
699 // receiver and the arguments.
700 CodeForSourcePosition(position);
701 // Contents of frame at this point:
702 // Frame[0]: arguments object of the current function or the hole.
703 // Frame[1]: receiver
704 // Frame[2]: applicand.apply
705 // Frame[3]: applicand.
707 // Check if the arguments object has been lazily allocated
708 // already. If so, just use that instead of copying the arguments
709 // from the stack. This also deals with cases where a local variable
710 // named 'arguments' has been introduced.
712 Result probe = frame_->Pop();
713 { VirtualFrame::SpilledScope spilled_scope;
715 bool try_lazy = true;
716 if (probe.is_constant()) {
717 try_lazy = probe.handle()->IsTheHole();
719 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
721 __ j(not_equal, &slow);
726 // Get rid of the arguments object probe.
727 frame_->Drop(); // Can be called on a spilled frame.
728 // Stack now has 3 elements on it.
729 // Contents of stack at this point:
731 // rsp[1]: applicand.apply
732 // rsp[2]: applicand.
734 // Check that the receiver really is a JavaScript object.
735 __ movq(rax, Operand(rsp, 0));
736 Condition is_smi = masm_->CheckSmi(rax);
737 __ j(is_smi, &build_args);
738 // We allow all JSObjects including JSFunctions. As long as
739 // JS_FUNCTION_TYPE is the last instance type and it is right
740 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
742 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
743 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
744 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
745 __ j(below, &build_args);
747 // Check that applicand.apply is Function.prototype.apply.
748 __ movq(rax, Operand(rsp, kPointerSize));
749 is_smi = masm_->CheckSmi(rax);
750 __ j(is_smi, &build_args);
751 __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
752 __ j(not_equal, &build_args);
753 __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
754 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
755 __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
756 __ j(not_equal, &build_args);
758 // Check that applicand is a function.
759 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
760 is_smi = masm_->CheckSmi(rdi);
761 __ j(is_smi, &build_args);
762 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
763 __ j(not_equal, &build_args);
765 // Copy the arguments to this function possibly from the
766 // adaptor frame below it.
767 Label invoke, adapted;
768 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
769 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
770 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
771 __ j(equal, &adapted);
773 // No arguments adaptor frame. Copy fixed number of arguments.
774 __ movq(rax, Immediate(scope()->num_parameters()));
775 for (int i = 0; i < scope()->num_parameters(); i++) {
776 __ push(frame_->ParameterAt(i));
780 // Arguments adaptor frame present. Copy arguments from there, but
781 // avoid copying too many arguments to avoid stack overflows.
783 static const uint32_t kArgumentsLimit = 1 * KB;
784 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
785 __ SmiToInteger32(rax, rax);
787 __ cmpq(rax, Immediate(kArgumentsLimit));
788 __ j(above, &build_args);
790 // Loop through the arguments pushing them onto the execution
791 // stack. We don't inform the virtual frame of the push, so we don't
792 // have to worry about getting rid of the elements from the virtual
795 // rcx is a small non-negative integer, due to the test above.
799 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
801 __ j(not_zero, &loop);
803 // Invoke the function.
805 ParameterCount actual(rax);
806 __ InvokeFunction(rdi, actual, CALL_FUNCTION);
807 // Drop applicand.apply and applicand from the stack, and push
808 // the result of the function call, but leave the spilled frame
809 // unchanged, with 3 elements, so it is correct when we compile the
811 __ addq(rsp, Immediate(2 * kPointerSize));
813 // Stack now has 1 element:
817 // Slow-case: Allocate the arguments object since we know it isn't
818 // there, and fall-through to the slow-case where we call
820 __ bind(&build_args);
821 // Stack now has 3 elements, because we have jumped from where:
823 // rsp[1]: applicand.apply
824 // rsp[2]: applicand.
826 // StoreArgumentsObject requires a correct frame, and may modify it.
827 Result arguments_object = StoreArgumentsObject(false);
829 arguments_object.ToRegister();
830 frame_->EmitPush(arguments_object.reg());
831 arguments_object.Unuse();
832 // Stack and frame now have 4 elements.
836 // Generic computation of x.apply(y, args) with no special optimization.
837 // Flip applicand.apply and applicand on the stack, so
838 // applicand looks like the receiver of the applicand.apply call.
839 // Then process it as a normal function call.
840 __ movq(rax, Operand(rsp, 3 * kPointerSize));
841 __ movq(rbx, Operand(rsp, 2 * kPointerSize));
842 __ movq(Operand(rsp, 2 * kPointerSize), rax);
843 __ movq(Operand(rsp, 3 * kPointerSize), rbx);
845 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
846 Result res = frame_->CallStub(&call_function, 3);
847 // The function and its two arguments have been dropped.
848 frame_->Drop(1); // Drop the receiver as well.
850 frame_->EmitPush(res.reg());
851 // Stack now has 1 element:
853 if (try_lazy) __ bind(&done);
854 } // End of spilled scope.
855 // Restore the context register after a call.
856 frame_->RestoreContextRegister();
860 class DeferredStackCheck: public DeferredCode {
862 DeferredStackCheck() {
863 set_comment("[ DeferredStackCheck");
866 virtual void Generate();
870 void DeferredStackCheck::Generate() {
876 void CodeGenerator::CheckStack() {
877 DeferredStackCheck* deferred = new DeferredStackCheck;
878 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
879 deferred->Branch(below);
880 deferred->BindExit();
884 void CodeGenerator::VisitAndSpill(Statement* statement) {
885 // TODO(X64): No architecture specific code. Move to shared location.
886 ASSERT(in_spilled_code());
887 set_in_spilled_code(false);
889 if (frame_ != NULL) {
892 set_in_spilled_code(true);
896 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
897 ASSERT(in_spilled_code());
898 set_in_spilled_code(false);
899 VisitStatements(statements);
900 if (frame_ != NULL) {
903 set_in_spilled_code(true);
907 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
908 ASSERT(!in_spilled_code());
909 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
910 Visit(statements->at(i));
915 void CodeGenerator::VisitBlock(Block* node) {
916 ASSERT(!in_spilled_code());
917 Comment cmnt(masm_, "[ Block");
918 CodeForStatementPosition(node);
919 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
920 VisitStatements(node->statements());
921 if (node->break_target()->is_linked()) {
922 node->break_target()->Bind();
924 node->break_target()->Unuse();
928 void CodeGenerator::VisitDeclaration(Declaration* node) {
929 Comment cmnt(masm_, "[ Declaration");
930 Variable* var = node->proxy()->var();
931 ASSERT(var != NULL); // must have been resolved
932 Slot* slot = var->slot();
934 // If it was not possible to allocate the variable at compile time,
935 // we need to "declare" it at runtime to make sure it actually
936 // exists in the local context.
937 if (slot != NULL && slot->type() == Slot::LOOKUP) {
938 // Variables with a "LOOKUP" slot were introduced as non-locals
939 // during variable resolution and must have mode DYNAMIC.
940 ASSERT(var->is_dynamic());
941 // For now, just do a runtime call. Sync the virtual frame eagerly
942 // so we can simply push the arguments into place.
943 frame_->SyncRange(0, frame_->element_count() - 1);
944 frame_->EmitPush(rsi);
945 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
946 frame_->EmitPush(kScratchRegister);
947 // Declaration nodes are always introduced in one of two modes.
948 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
949 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
950 frame_->EmitPush(Smi::FromInt(attr));
951 // Push initial value, if any.
952 // Note: For variables we must not push an initial value (such as
953 // 'undefined') because we may have a (legal) redeclaration and we
954 // must not destroy the current value.
955 if (node->mode() == Variable::CONST) {
956 frame_->EmitPush(Heap::kTheHoleValueRootIndex);
957 } else if (node->fun() != NULL) {
960 frame_->EmitPush(Smi::FromInt(0)); // no initial value!
962 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
963 // Ignore the return value (declarations are statements).
967 ASSERT(!var->is_global());
969 // If we have a function or a constant, we need to initialize the variable.
970 Expression* val = NULL;
971 if (node->mode() == Variable::CONST) {
972 val = new Literal(Factory::the_hole_value());
974 val = node->fun(); // NULL if we don't have a function
979 // Set the initial value.
980 Reference target(this, node->proxy());
982 target.SetValue(NOT_CONST_INIT);
983 // The reference is removed from the stack (preserving TOS) when
984 // it goes out of scope.
986 // Get rid of the assigned value (declarations are statements).
992 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
993 ASSERT(!in_spilled_code());
994 Comment cmnt(masm_, "[ ExpressionStatement");
995 CodeForStatementPosition(node);
996 Expression* expression = node->expression();
997 expression->MarkAsStatement();
999 // Remove the lingering expression result from the top of stack.
1004 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1005 ASSERT(!in_spilled_code());
1006 Comment cmnt(masm_, "// EmptyStatement");
1007 CodeForStatementPosition(node);
1012 void CodeGenerator::VisitIfStatement(IfStatement* node) {
1013 ASSERT(!in_spilled_code());
1014 Comment cmnt(masm_, "[ IfStatement");
1015 // Generate different code depending on which parts of the if statement
1016 // are present or not.
1017 bool has_then_stm = node->HasThenStatement();
1018 bool has_else_stm = node->HasElseStatement();
1020 CodeForStatementPosition(node);
1022 if (has_then_stm && has_else_stm) {
1025 ControlDestination dest(&then, &else_, true);
1026 LoadCondition(node->condition(), &dest, true);
1028 if (dest.false_was_fall_through()) {
1029 // The else target was bound, so we compile the else part first.
1030 Visit(node->else_statement());
1032 // We may have dangling jumps to the then part.
1033 if (then.is_linked()) {
1034 if (has_valid_frame()) exit.Jump();
1036 Visit(node->then_statement());
1039 // The then target was bound, so we compile the then part first.
1040 Visit(node->then_statement());
1042 if (else_.is_linked()) {
1043 if (has_valid_frame()) exit.Jump();
1045 Visit(node->else_statement());
1049 } else if (has_then_stm) {
1050 ASSERT(!has_else_stm);
1052 ControlDestination dest(&then, &exit, true);
1053 LoadCondition(node->condition(), &dest, true);
1055 if (dest.false_was_fall_through()) {
1056 // The exit label was bound. We may have dangling jumps to the
1058 if (then.is_linked()) {
1062 Visit(node->then_statement());
1065 // The then label was bound.
1066 Visit(node->then_statement());
1069 } else if (has_else_stm) {
1070 ASSERT(!has_then_stm);
1072 ControlDestination dest(&exit, &else_, false);
1073 LoadCondition(node->condition(), &dest, true);
1075 if (dest.true_was_fall_through()) {
1076 // The exit label was bound. We may have dangling jumps to the
1078 if (else_.is_linked()) {
1082 Visit(node->else_statement());
1085 // The else label was bound.
1086 Visit(node->else_statement());
1090 ASSERT(!has_then_stm && !has_else_stm);
1091 // We only care about the condition's side effects (not its value
1092 // or control flow effect). LoadCondition is called without
1093 // forcing control flow.
1094 ControlDestination dest(&exit, &exit, true);
1095 LoadCondition(node->condition(), &dest, false);
1096 if (!dest.is_used()) {
1097 // We got a value on the frame rather than (or in addition to)
1103 if (exit.is_linked()) {
1109 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1110 ASSERT(!in_spilled_code());
1111 Comment cmnt(masm_, "[ ContinueStatement");
1112 CodeForStatementPosition(node);
1113 node->target()->continue_target()->Jump();
1117 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1118 ASSERT(!in_spilled_code());
1119 Comment cmnt(masm_, "[ BreakStatement");
1120 CodeForStatementPosition(node);
1121 node->target()->break_target()->Jump();
1125 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1126 ASSERT(!in_spilled_code());
1127 Comment cmnt(masm_, "[ ReturnStatement");
1129 CodeForStatementPosition(node);
1130 Load(node->expression());
1131 Result return_value = frame_->Pop();
1132 if (function_return_is_shadowed_) {
1133 function_return_.Jump(&return_value);
1135 frame_->PrepareForReturn();
1136 if (function_return_.is_bound()) {
1137 // If the function return label is already bound we reuse the
1138 // code by jumping to the return site.
1139 function_return_.Jump(&return_value);
1141 function_return_.Bind(&return_value);
1142 GenerateReturnSequence(&return_value);
1148 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1149 ASSERT(!in_spilled_code());
1150 Comment cmnt(masm_, "[ WithEnterStatement");
1151 CodeForStatementPosition(node);
1152 Load(node->expression());
1154 if (node->is_catch_block()) {
1155 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1157 context = frame_->CallRuntime(Runtime::kPushContext, 1);
1160 // Update context local.
1161 frame_->SaveContextRegister();
1163 // Verify that the runtime call result and rsi agree.
1164 if (FLAG_debug_code) {
1165 __ cmpq(context.reg(), rsi);
1166 __ Assert(equal, "Runtime::NewContext should end up in rsi");
1171 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1172 ASSERT(!in_spilled_code());
1173 Comment cmnt(masm_, "[ WithExitStatement");
1174 CodeForStatementPosition(node);
1176 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1177 // Update context local.
1178 frame_->SaveContextRegister();
1182 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1183 // TODO(X64): This code is completely generic and should be moved somewhere
1184 // where it can be shared between architectures.
1185 ASSERT(!in_spilled_code());
1186 Comment cmnt(masm_, "[ SwitchStatement");
1187 CodeForStatementPosition(node);
1188 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1190 // Compile the switch value.
1193 ZoneList<CaseClause*>* cases = node->cases();
1194 int length = cases->length();
1195 CaseClause* default_clause = NULL;
1197 JumpTarget next_test;
1198 // Compile the case label expressions and comparisons. Exit early
1199 // if a comparison is unconditionally true. The target next_test is
1200 // bound before the loop in order to indicate control flow to the
1201 // first comparison.
1203 for (int i = 0; i < length && !next_test.is_unused(); i++) {
1204 CaseClause* clause = cases->at(i);
1205 // The default is not a test, but remember it for later.
1206 if (clause->is_default()) {
1207 default_clause = clause;
1211 Comment cmnt(masm_, "[ Case comparison");
1212 // We recycle the same target next_test for each test. Bind it if
1213 // the previous test has not done so and then unuse it for the
1215 if (next_test.is_linked()) {
1220 // Duplicate the switch value.
1223 // Compile the label expression.
1224 Load(clause->label());
1226 // Compare and branch to the body if true or the next test if
1227 // false. Prefer the next test as a fall through.
1228 ControlDestination dest(clause->body_target(), &next_test, false);
1229 Comparison(node, equal, true, &dest);
1231 // If the comparison fell through to the true target, jump to the
1233 if (dest.true_was_fall_through()) {
1234 clause->body_target()->Unuse();
1235 clause->body_target()->Jump();
1239 // If there was control flow to a next test from the last one
1240 // compiled, compile a jump to the default or break target.
1241 if (!next_test.is_unused()) {
1242 if (next_test.is_linked()) {
1245 // Drop the switch value.
1247 if (default_clause != NULL) {
1248 default_clause->body_target()->Jump();
1250 node->break_target()->Jump();
1254 // The last instruction emitted was a jump, either to the default
1255 // clause or the break target, or else to a case body from the loop
1256 // that compiles the tests.
1257 ASSERT(!has_valid_frame());
1258 // Compile case bodies as needed.
1259 for (int i = 0; i < length; i++) {
1260 CaseClause* clause = cases->at(i);
1262 // There are two ways to reach the body: from the corresponding
1263 // test or as the fall through of the previous body.
1264 if (clause->body_target()->is_linked() || has_valid_frame()) {
1265 if (clause->body_target()->is_linked()) {
1266 if (has_valid_frame()) {
1267 // If we have both a jump to the test and a fall through, put
1268 // a jump on the fall through path to avoid the dropping of
1269 // the switch value on the test path. The exception is the
1270 // default which has already had the switch value dropped.
1271 if (clause->is_default()) {
1272 clause->body_target()->Bind();
1276 clause->body_target()->Bind();
1281 // No fall through to worry about.
1282 clause->body_target()->Bind();
1283 if (!clause->is_default()) {
1288 // Otherwise, we have only fall through.
1289 ASSERT(has_valid_frame());
1292 // We are now prepared to compile the body.
1293 Comment cmnt(masm_, "[ Case body");
1294 VisitStatements(clause->statements());
1296 clause->body_target()->Unuse();
1299 // We may not have a valid frame here so bind the break target only
1301 if (node->break_target()->is_linked()) {
1302 node->break_target()->Bind();
1304 node->break_target()->Unuse();
1308 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
1309 ASSERT(!in_spilled_code());
1310 Comment cmnt(masm_, "[ DoWhileStatement");
1311 CodeForStatementPosition(node);
1312 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1313 JumpTarget body(JumpTarget::BIDIRECTIONAL);
1314 IncrementLoopNesting();
1316 ConditionAnalysis info = AnalyzeCondition(node->cond());
1317 // Label the top of the loop for the backward jump if necessary.
1320 // Use the continue target.
1321 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1322 node->continue_target()->Bind();
1325 // No need to label it.
1326 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1329 // Continue is the test, so use the backward body target.
1330 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1335 CheckStack(); // TODO(1222600): ignore if body contains calls.
1336 Visit(node->body());
1338 // Compile the test.
1341 // If control flow can fall off the end of the body, jump back
1342 // to the top and bind the break target at the exit.
1343 if (has_valid_frame()) {
1344 node->continue_target()->Jump();
1346 if (node->break_target()->is_linked()) {
1347 node->break_target()->Bind();
1351 // We may have had continues or breaks in the body.
1352 if (node->continue_target()->is_linked()) {
1353 node->continue_target()->Bind();
1355 if (node->break_target()->is_linked()) {
1356 node->break_target()->Bind();
1360 // We have to compile the test expression if it can be reached by
1361 // control flow falling out of the body or via continue.
1362 if (node->continue_target()->is_linked()) {
1363 node->continue_target()->Bind();
1365 if (has_valid_frame()) {
1366 Comment cmnt(masm_, "[ DoWhileCondition");
1367 CodeForDoWhileConditionPosition(node);
1368 ControlDestination dest(&body, node->break_target(), false);
1369 LoadCondition(node->cond(), &dest, true);
1371 if (node->break_target()->is_linked()) {
1372 node->break_target()->Bind();
1377 DecrementLoopNesting();
1378 node->continue_target()->Unuse();
1379 node->break_target()->Unuse();
1383 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1384 ASSERT(!in_spilled_code());
1385 Comment cmnt(masm_, "[ WhileStatement");
1386 CodeForStatementPosition(node);
1388 // If the condition is always false and has no side effects, we do not
1389 // need to compile anything.
1390 ConditionAnalysis info = AnalyzeCondition(node->cond());
1391 if (info == ALWAYS_FALSE) return;
1393 // Do not duplicate conditions that may have function literal
1394 // subexpressions. This can cause us to compile the function literal
1396 bool test_at_bottom = !node->may_have_function_literal();
1397 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1398 IncrementLoopNesting();
1400 if (test_at_bottom) {
1401 body.set_direction(JumpTarget::BIDIRECTIONAL);
1404 // Based on the condition analysis, compile the test as necessary.
1407 // We will not compile the test expression. Label the top of the
1408 // loop with the continue target.
1409 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1410 node->continue_target()->Bind();
1413 if (test_at_bottom) {
1414 // Continue is the test at the bottom, no need to label the test
1415 // at the top. The body is a backward target.
1416 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1418 // Label the test at the top as the continue target. The body
1419 // is a forward-only target.
1420 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1421 node->continue_target()->Bind();
1423 // Compile the test with the body as the true target and preferred
1424 // fall-through and with the break target as the false target.
1425 ControlDestination dest(&body, node->break_target(), true);
1426 LoadCondition(node->cond(), &dest, true);
1428 if (dest.false_was_fall_through()) {
1429 // If we got the break target as fall-through, the test may have
1430 // been unconditionally false (if there are no jumps to the
1432 if (!body.is_linked()) {
1433 DecrementLoopNesting();
1437 // Otherwise, jump around the body on the fall through and then
1438 // bind the body target.
1439 node->break_target()->Unuse();
1440 node->break_target()->Jump();
1450 CheckStack(); // TODO(1222600): ignore if body contains calls.
1451 Visit(node->body());
1453 // Based on the condition analysis, compile the backward jump as
1457 // The loop body has been labeled with the continue target.
1458 if (has_valid_frame()) {
1459 node->continue_target()->Jump();
1463 if (test_at_bottom) {
1464 // If we have chosen to recompile the test at the bottom,
1465 // then it is the continue target.
1466 if (node->continue_target()->is_linked()) {
1467 node->continue_target()->Bind();
1469 if (has_valid_frame()) {
1470 // The break target is the fall-through (body is a backward
1471 // jump from here and thus an invalid fall-through).
1472 ControlDestination dest(&body, node->break_target(), false);
1473 LoadCondition(node->cond(), &dest, true);
1476 // If we have chosen not to recompile the test at the
1477 // bottom, jump back to the one at the top.
1478 if (has_valid_frame()) {
1479 node->continue_target()->Jump();
1488 // The break target may be already bound (by the condition), or there
1489 // may not be a valid frame. Bind it only if needed.
1490 if (node->break_target()->is_linked()) {
1491 node->break_target()->Bind();
1493 DecrementLoopNesting();
1497 void CodeGenerator::VisitForStatement(ForStatement* node) {
1498 ASSERT(!in_spilled_code());
1499 Comment cmnt(masm_, "[ ForStatement");
1500 CodeForStatementPosition(node);
1502 // Compile the init expression if present.
1503 if (node->init() != NULL) {
1504 Visit(node->init());
1507 // If the condition is always false and has no side effects, we do not
1508 // need to compile anything else.
1509 ConditionAnalysis info = AnalyzeCondition(node->cond());
1510 if (info == ALWAYS_FALSE) return;
1512 // Do not duplicate conditions that may have function literal
1513 // subexpressions. This can cause us to compile the function literal
1515 bool test_at_bottom = !node->may_have_function_literal();
1516 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1517 IncrementLoopNesting();
1519 // Target for backward edge if no test at the bottom, otherwise
1521 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1523 // Target for backward edge if there is a test at the bottom,
1524 // otherwise used as target for test at the top.
1526 if (test_at_bottom) {
1527 body.set_direction(JumpTarget::BIDIRECTIONAL);
1530 // Based on the condition analysis, compile the test as necessary.
1533 // We will not compile the test expression. Label the top of the
1535 if (node->next() == NULL) {
1536 // Use the continue target if there is no update expression.
1537 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1538 node->continue_target()->Bind();
1540 // Otherwise use the backward loop target.
1541 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1546 if (test_at_bottom) {
1547 // Continue is either the update expression or the test at the
1548 // bottom, no need to label the test at the top.
1549 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1550 } else if (node->next() == NULL) {
1551 // We are not recompiling the test at the bottom and there is no
1552 // update expression.
1553 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1554 node->continue_target()->Bind();
1556 // We are not recompiling the test at the bottom and there is an
1557 // update expression.
1558 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1562 // Compile the test with the body as the true target and preferred
1563 // fall-through and with the break target as the false target.
1564 ControlDestination dest(&body, node->break_target(), true);
1565 LoadCondition(node->cond(), &dest, true);
1567 if (dest.false_was_fall_through()) {
1568 // If we got the break target as fall-through, the test may have
1569 // been unconditionally false (if there are no jumps to the
1571 if (!body.is_linked()) {
1572 DecrementLoopNesting();
1576 // Otherwise, jump around the body on the fall through and then
1577 // bind the body target.
1578 node->break_target()->Unuse();
1579 node->break_target()->Jump();
1589 CheckStack(); // TODO(1222600): ignore if body contains calls.
1590 Visit(node->body());
1592 // If there is an update expression, compile it if necessary.
1593 if (node->next() != NULL) {
1594 if (node->continue_target()->is_linked()) {
1595 node->continue_target()->Bind();
1598 // Control can reach the update by falling out of the body or by a
1600 if (has_valid_frame()) {
1601 // Record the source position of the statement as this code which
1602 // is after the code for the body actually belongs to the loop
1603 // statement and not the body.
1604 CodeForStatementPosition(node);
1605 Visit(node->next());
1609 // Based on the condition analysis, compile the backward jump as
1613 if (has_valid_frame()) {
1614 if (node->next() == NULL) {
1615 node->continue_target()->Jump();
1622 if (test_at_bottom) {
1623 if (node->continue_target()->is_linked()) {
1624 // We can have dangling jumps to the continue target if there
1625 // was no update expression.
1626 node->continue_target()->Bind();
1628 // Control can reach the test at the bottom by falling out of
1629 // the body, by a continue in the body, or from the update
1631 if (has_valid_frame()) {
1632 // The break target is the fall-through (body is a backward
1634 ControlDestination dest(&body, node->break_target(), false);
1635 LoadCondition(node->cond(), &dest, true);
1638 // Otherwise, jump back to the test at the top.
1639 if (has_valid_frame()) {
1640 if (node->next() == NULL) {
1641 node->continue_target()->Jump();
1653 // The break target may be already bound (by the condition), or there
1654 // may not be a valid frame. Bind it only if needed.
1655 if (node->break_target()->is_linked()) {
1656 node->break_target()->Bind();
1658 DecrementLoopNesting();
1662 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1663 ASSERT(!in_spilled_code());
1664 VirtualFrame::SpilledScope spilled_scope;
1665 Comment cmnt(masm_, "[ ForInStatement");
1666 CodeForStatementPosition(node);
1668 JumpTarget primitive;
1669 JumpTarget jsobject;
1670 JumpTarget fixed_array;
1671 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1672 JumpTarget end_del_check;
1675 // Get the object to enumerate over (converted to JSObject).
1676 LoadAndSpill(node->enumerable());
1678 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1679 // to the specification. 12.6.4 mandates a call to ToObject.
1680 frame_->EmitPop(rax);
1682 // rax: value to be iterated over
1683 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1685 __ CompareRoot(rax, Heap::kNullValueRootIndex);
1688 // Stack layout in body:
1689 // [iteration counter (smi)] <- slot 0
1690 // [length of array] <- slot 1
1691 // [FixedArray] <- slot 2
1692 // [Map or 0] <- slot 3
1693 // [Object] <- slot 4
1695 // Check if enumerable is already a JSObject
1696 // rax: value to be iterated over
1697 Condition is_smi = masm_->CheckSmi(rax);
1698 primitive.Branch(is_smi);
1699 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1700 jsobject.Branch(above_equal);
1703 frame_->EmitPush(rax);
1704 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1705 // function call returns the value in rax, which is where we want it below
1708 // Get the set of properties (as a FixedArray or Map).
1709 // rax: value to be iterated over
1710 frame_->EmitPush(rax); // Push the object being iterated over.
1713 // Check cache validity in generated code. This is a fast case for
1714 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1715 // guarantee cache validity, call the runtime system to check cache
1716 // validity or get the property names in a fixed array.
1717 JumpTarget call_runtime;
1718 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1719 JumpTarget check_prototype;
1720 JumpTarget use_cache;
1723 // Check that there are no elements.
1724 __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
1725 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1726 call_runtime.Branch(not_equal);
1727 // Check that instance descriptors are not empty so that we can
1728 // check for an enum cache. Leave the map in ebx for the subsequent
1730 __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
1731 __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
1732 __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
1733 call_runtime.Branch(equal);
1734 // Check that there in an enum cache in the non-empty instance
1735 // descriptors. This is the case if the next enumeration index
1736 // field does not contain a smi.
1737 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
1738 is_smi = masm_->CheckSmi(rdx);
1739 call_runtime.Branch(is_smi);
1740 // For all objects but the receiver, check that the cache is empty.
1742 check_prototype.Branch(equal);
1743 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1744 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1745 call_runtime.Branch(not_equal);
1746 check_prototype.Bind();
1747 // Load the prototype from the map and loop if non-null.
1748 __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
1749 __ CompareRoot(rcx, Heap::kNullValueRootIndex);
1750 loop.Branch(not_equal);
1751 // The enum cache is valid. Load the map of the object being
1752 // iterated over and use the cache for the iteration.
1753 __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
1756 call_runtime.Bind();
1757 // Call the runtime to get the property names for the object.
1758 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
1759 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1761 // If we got a Map, we can do a fast modification check.
1762 // Otherwise, we got a FixedArray, and we have to do a slow check.
1763 // rax: map or fixed array (result from call to
1764 // Runtime::kGetPropertyNamesFast)
1766 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1767 __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1768 fixed_array.Branch(not_equal);
1772 // rax: map (either the result from a call to
1773 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1776 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1777 // Get the bridge array held in the enumeration index field.
1778 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1779 // Get the cache from the bridge array.
1780 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1782 frame_->EmitPush(rax); // <- slot 3
1783 frame_->EmitPush(rdx); // <- slot 2
1784 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1785 __ Integer32ToSmi(rax, rax);
1786 frame_->EmitPush(rax); // <- slot 1
1787 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1791 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1792 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
1793 frame_->EmitPush(rax); // <- slot 2
1795 // Push the length of the array and the initial index onto the stack.
1796 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1797 __ Integer32ToSmi(rax, rax);
1798 frame_->EmitPush(rax); // <- slot 1
1799 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1803 // Grab the current frame's height for the break and continue
1804 // targets only after all the state is pushed on the frame.
1805 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1806 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1808 __ movq(rax, frame_->ElementAt(0)); // load the current count
1809 __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
1810 node->break_target()->Branch(below_equal);
1812 // Get the i'th entry of the array.
1813 __ movq(rdx, frame_->ElementAt(2));
1814 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1816 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1818 // Get the expected map from the stack or a zero map in the
1819 // permanent slow case rax: current iteration count rbx: i'th entry
1820 // of the enum cache
1821 __ movq(rdx, frame_->ElementAt(3));
1822 // Check if the expected map still matches that of the enumerable.
1823 // If not, we have to filter the key.
1824 // rax: current iteration count
1825 // rbx: i'th entry of the enum cache
1826 // rdx: expected map value
1827 __ movq(rcx, frame_->ElementAt(4));
1828 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1830 end_del_check.Branch(equal);
1832 // Convert the entry to a string (or null if it isn't a property anymore).
1833 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
1834 frame_->EmitPush(rbx); // push entry
1835 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1838 // If the property has been removed while iterating, we just skip it.
1839 __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1840 node->continue_target()->Branch(equal);
1842 end_del_check.Bind();
1843 // Store the entry in the 'each' expression and take another spin in the
1844 // loop. rdx: i'th entry of the enum cache (or string there of)
1845 frame_->EmitPush(rbx);
1846 { Reference each(this, node->each());
1847 // Loading a reference may leave the frame in an unspilled state.
1849 if (!each.is_illegal()) {
1850 if (each.size() > 0) {
1851 frame_->EmitPush(frame_->ElementAt(each.size()));
1852 each.SetValue(NOT_CONST_INIT);
1853 frame_->Drop(2); // Drop the original and the copy of the element.
1855 // If the reference has size zero then we can use the value below
1856 // the reference as if it were above the reference, instead of pushing
1857 // a new copy of it above the reference.
1858 each.SetValue(NOT_CONST_INIT);
1859 frame_->Drop(); // Drop the original of the element.
1863 // Unloading a reference may leave the frame in an unspilled state.
1867 CheckStack(); // TODO(1222600): ignore if body contains calls.
1868 VisitAndSpill(node->body());
1870 // Next. Reestablish a spilled frame in case we are coming here via
1871 // a continue in the body.
1872 node->continue_target()->Bind();
1874 frame_->EmitPop(rax);
1875 __ SmiAddConstant(rax, rax, Smi::FromInt(1));
1876 frame_->EmitPush(rax);
1879 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1881 node->break_target()->Bind();
1887 node->continue_target()->Unuse();
1888 node->break_target()->Unuse();
1891 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
1892 ASSERT(!in_spilled_code());
1893 VirtualFrame::SpilledScope spilled_scope;
1894 Comment cmnt(masm_, "[ TryCatchStatement");
1895 CodeForStatementPosition(node);
1897 JumpTarget try_block;
1901 // --- Catch block ---
1902 frame_->EmitPush(rax);
1904 // Store the caught exception in the catch variable.
1905 Variable* catch_var = node->catch_var()->var();
1906 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
1907 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
1909 // Remove the exception from the stack.
1912 VisitStatementsAndSpill(node->catch_block()->statements());
1913 if (has_valid_frame()) {
1918 // --- Try block ---
1921 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1922 int handler_height = frame_->height();
1924 // Shadow the jump targets for all escapes from the try block, including
1925 // returns. During shadowing, the original target is hidden as the
1926 // ShadowTarget and operations on the original actually affect the
1927 // shadowing target.
1929 // We should probably try to unify the escaping targets and the return
1931 int nof_escapes = node->escaping_targets()->length();
1932 List<ShadowTarget*> shadows(1 + nof_escapes);
1934 // Add the shadow target for the function return.
1935 static const int kReturnShadowIndex = 0;
1936 shadows.Add(new ShadowTarget(&function_return_));
1937 bool function_return_was_shadowed = function_return_is_shadowed_;
1938 function_return_is_shadowed_ = true;
1939 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1941 // Add the remaining shadow targets.
1942 for (int i = 0; i < nof_escapes; i++) {
1943 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1946 // Generate code for the statements in the try block.
1947 VisitStatementsAndSpill(node->try_block()->statements());
1949 // Stop the introduced shadowing and count the number of required unlinks.
1950 // After shadowing stops, the original targets are unshadowed and the
1951 // ShadowTargets represent the formerly shadowing targets.
1952 bool has_unlinks = false;
1953 for (int i = 0; i < shadows.length(); i++) {
1954 shadows[i]->StopShadowing();
1955 has_unlinks = has_unlinks || shadows[i]->is_linked();
1957 function_return_is_shadowed_ = function_return_was_shadowed;
1959 // Get an external reference to the handler address.
1960 ExternalReference handler_address(Top::k_handler_address);
1962 // Make sure that there's nothing left on the stack above the
1963 // handler structure.
1964 if (FLAG_debug_code) {
1965 __ movq(kScratchRegister, handler_address);
1966 __ cmpq(rsp, Operand(kScratchRegister, 0));
1967 __ Assert(equal, "stack pointer should point to top handler");
1970 // If we can fall off the end of the try block, unlink from try chain.
1971 if (has_valid_frame()) {
1972 // The next handler address is on top of the frame. Unlink from
1973 // the handler list and drop the rest of this handler from the
1975 ASSERT(StackHandlerConstants::kNextOffset == 0);
1976 __ movq(kScratchRegister, handler_address);
1977 frame_->EmitPop(Operand(kScratchRegister, 0));
1978 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1984 // Generate unlink code for the (formerly) shadowing targets that
1985 // have been jumped to. Deallocate each shadow target.
1986 Result return_value;
1987 for (int i = 0; i < shadows.length(); i++) {
1988 if (shadows[i]->is_linked()) {
1989 // Unlink from try chain; be careful not to destroy the TOS if
1991 if (i == kReturnShadowIndex) {
1992 shadows[i]->Bind(&return_value);
1993 return_value.ToRegister(rax);
1997 // Because we can be jumping here (to spilled code) from
1998 // unspilled code, we need to reestablish a spilled frame at
2002 // Reload sp from the top handler, because some statements that we
2003 // break from (eg, for...in) may have left stuff on the stack.
2004 __ movq(kScratchRegister, handler_address);
2005 __ movq(rsp, Operand(kScratchRegister, 0));
2006 frame_->Forget(frame_->height() - handler_height);
2008 ASSERT(StackHandlerConstants::kNextOffset == 0);
2009 __ movq(kScratchRegister, handler_address);
2010 frame_->EmitPop(Operand(kScratchRegister, 0));
2011 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2013 if (i == kReturnShadowIndex) {
2014 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
2015 shadows[i]->other_target()->Jump(&return_value);
2017 shadows[i]->other_target()->Jump();
2026 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2027 ASSERT(!in_spilled_code());
2028 VirtualFrame::SpilledScope spilled_scope;
2029 Comment cmnt(masm_, "[ TryFinallyStatement");
2030 CodeForStatementPosition(node);
2032 // State: Used to keep track of reason for entering the finally
2033 // block. Should probably be extended to hold information for
2034 // break/continue from within the try block.
2035 enum { FALLING, THROWING, JUMPING };
2037 JumpTarget try_block;
2038 JumpTarget finally_block;
2042 frame_->EmitPush(rax);
2043 // In case of thrown exceptions, this is where we continue.
2044 __ Move(rcx, Smi::FromInt(THROWING));
2045 finally_block.Jump();
2047 // --- Try block ---
2050 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2051 int handler_height = frame_->height();
2053 // Shadow the jump targets for all escapes from the try block, including
2054 // returns. During shadowing, the original target is hidden as the
2055 // ShadowTarget and operations on the original actually affect the
2056 // shadowing target.
2058 // We should probably try to unify the escaping targets and the return
2060 int nof_escapes = node->escaping_targets()->length();
2061 List<ShadowTarget*> shadows(1 + nof_escapes);
2063 // Add the shadow target for the function return.
2064 static const int kReturnShadowIndex = 0;
2065 shadows.Add(new ShadowTarget(&function_return_));
2066 bool function_return_was_shadowed = function_return_is_shadowed_;
2067 function_return_is_shadowed_ = true;
2068 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2070 // Add the remaining shadow targets.
2071 for (int i = 0; i < nof_escapes; i++) {
2072 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2075 // Generate code for the statements in the try block.
2076 VisitStatementsAndSpill(node->try_block()->statements());
2078 // Stop the introduced shadowing and count the number of required unlinks.
2079 // After shadowing stops, the original targets are unshadowed and the
2080 // ShadowTargets represent the formerly shadowing targets.
2081 int nof_unlinks = 0;
2082 for (int i = 0; i < shadows.length(); i++) {
2083 shadows[i]->StopShadowing();
2084 if (shadows[i]->is_linked()) nof_unlinks++;
2086 function_return_is_shadowed_ = function_return_was_shadowed;
2088 // Get an external reference to the handler address.
2089 ExternalReference handler_address(Top::k_handler_address);
2091 // If we can fall off the end of the try block, unlink from the try
2092 // chain and set the state on the frame to FALLING.
2093 if (has_valid_frame()) {
2094 // The next handler address is on top of the frame.
2095 ASSERT(StackHandlerConstants::kNextOffset == 0);
2096 __ movq(kScratchRegister, handler_address);
2097 frame_->EmitPop(Operand(kScratchRegister, 0));
2098 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2100 // Fake a top of stack value (unneeded when FALLING) and set the
2101 // state in ecx, then jump around the unlink blocks if any.
2102 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2103 __ Move(rcx, Smi::FromInt(FALLING));
2104 if (nof_unlinks > 0) {
2105 finally_block.Jump();
2109 // Generate code to unlink and set the state for the (formerly)
2110 // shadowing targets that have been jumped to.
2111 for (int i = 0; i < shadows.length(); i++) {
2112 if (shadows[i]->is_linked()) {
2113 // If we have come from the shadowed return, the return value is
2114 // on the virtual frame. We must preserve it until it is
2116 if (i == kReturnShadowIndex) {
2117 Result return_value;
2118 shadows[i]->Bind(&return_value);
2119 return_value.ToRegister(rax);
2123 // Because we can be jumping here (to spilled code) from
2124 // unspilled code, we need to reestablish a spilled frame at
2128 // Reload sp from the top handler, because some statements that
2129 // we break from (eg, for...in) may have left stuff on the
2131 __ movq(kScratchRegister, handler_address);
2132 __ movq(rsp, Operand(kScratchRegister, 0));
2133 frame_->Forget(frame_->height() - handler_height);
2135 // Unlink this handler and drop it from the frame.
2136 ASSERT(StackHandlerConstants::kNextOffset == 0);
2137 __ movq(kScratchRegister, handler_address);
2138 frame_->EmitPop(Operand(kScratchRegister, 0));
2139 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2141 if (i == kReturnShadowIndex) {
2142 // If this target shadowed the function return, materialize
2143 // the return value on the stack.
2144 frame_->EmitPush(rax);
2146 // Fake TOS for targets that shadowed breaks and continues.
2147 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2149 __ Move(rcx, Smi::FromInt(JUMPING + i));
2150 if (--nof_unlinks > 0) {
2151 // If this is not the last unlink block, jump around the next.
2152 finally_block.Jump();
2157 // --- Finally block ---
2158 finally_block.Bind();
2160 // Push the state on the stack.
2161 frame_->EmitPush(rcx);
2163 // We keep two elements on the stack - the (possibly faked) result
2164 // and the state - while evaluating the finally block.
2166 // Generate code for the statements in the finally block.
2167 VisitStatementsAndSpill(node->finally_block()->statements());
2169 if (has_valid_frame()) {
2170 // Restore state and return value or faked TOS.
2171 frame_->EmitPop(rcx);
2172 frame_->EmitPop(rax);
2175 // Generate code to jump to the right destination for all used
2176 // formerly shadowing targets. Deallocate each shadow target.
2177 for (int i = 0; i < shadows.length(); i++) {
2178 if (has_valid_frame() && shadows[i]->is_bound()) {
2179 BreakTarget* original = shadows[i]->other_target();
2180 __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
2181 if (i == kReturnShadowIndex) {
2182 // The return value is (already) in rax.
2183 Result return_value = allocator_->Allocate(rax);
2184 ASSERT(return_value.is_valid());
2185 if (function_return_is_shadowed_) {
2186 original->Branch(equal, &return_value);
2188 // Branch around the preparation for return which may emit
2191 skip.Branch(not_equal);
2192 frame_->PrepareForReturn();
2193 original->Jump(&return_value);
2197 original->Branch(equal);
2202 if (has_valid_frame()) {
2203 // Check if we need to rethrow the exception.
2205 __ SmiCompare(rcx, Smi::FromInt(THROWING));
2206 exit.Branch(not_equal);
2208 // Rethrow exception.
2209 frame_->EmitPush(rax); // undo pop from above
2210 frame_->CallRuntime(Runtime::kReThrow, 1);
2218 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2219 ASSERT(!in_spilled_code());
2220 Comment cmnt(masm_, "[ DebuggerStatement");
2221 CodeForStatementPosition(node);
2222 #ifdef ENABLE_DEBUGGER_SUPPORT
2223 // Spill everything, even constants, to the frame.
2226 frame_->DebugBreak();
2227 // Ignore the return value.
2232 void CodeGenerator::InstantiateFunction(
2233 Handle<SharedFunctionInfo> function_info) {
2234 // The inevitable call will sync frame elements to memory anyway, so
2235 // we do it eagerly to allow us to push the arguments directly into
2237 frame_->SyncRange(0, frame_->element_count() - 1);
2239 // Use the fast case closure allocation code that allocates in new
2240 // space for nested functions that don't need literals cloning.
2241 if (scope()->is_function_scope() && function_info->num_literals() == 0) {
2242 FastNewClosureStub stub;
2243 frame_->Push(function_info);
2244 Result answer = frame_->CallStub(&stub, 1);
2245 frame_->Push(&answer);
2247 // Call the runtime to instantiate the function boilerplate
2249 frame_->EmitPush(rsi);
2250 frame_->EmitPush(function_info);
2251 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2252 frame_->Push(&result);
2257 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2258 Comment cmnt(masm_, "[ FunctionLiteral");
2260 // Build the function info and instantiate it.
2261 Handle<SharedFunctionInfo> function_info =
2262 Compiler::BuildFunctionInfo(node, script(), this);
2263 // Check for stack-overflow exception.
2264 if (HasStackOverflow()) return;
2265 InstantiateFunction(function_info);
2269 void CodeGenerator::VisitSharedFunctionInfoLiteral(
2270 SharedFunctionInfoLiteral* node) {
2271 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2272 InstantiateFunction(node->shared_function_info());
2276 void CodeGenerator::VisitConditional(Conditional* node) {
2277 Comment cmnt(masm_, "[ Conditional");
2281 ControlDestination dest(&then, &else_, true);
2282 LoadCondition(node->condition(), &dest, true);
2284 if (dest.false_was_fall_through()) {
2285 // The else target was bound, so we compile the else part first.
2286 Load(node->else_expression());
2288 if (then.is_linked()) {
2291 Load(node->then_expression());
2294 // The then target was bound, so we compile the then part first.
2295 Load(node->then_expression());
2297 if (else_.is_linked()) {
2300 Load(node->else_expression());
2308 void CodeGenerator::VisitSlot(Slot* node) {
2309 Comment cmnt(masm_, "[ Slot");
2310 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
2314 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2315 Comment cmnt(masm_, "[ VariableProxy");
2316 Variable* var = node->var();
2317 Expression* expr = var->rewrite();
2321 ASSERT(var->is_global());
2322 Reference ref(this, node);
2328 void CodeGenerator::VisitLiteral(Literal* node) {
2329 Comment cmnt(masm_, "[ Literal");
2330 frame_->Push(node->handle());
2334 // Materialize the regexp literal 'node' in the literals array
2335 // 'literals' of the function. Leave the regexp boilerplate in
2337 class DeferredRegExpLiteral: public DeferredCode {
2339 DeferredRegExpLiteral(Register boilerplate,
2341 RegExpLiteral* node)
2342 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2343 set_comment("[ DeferredRegExpLiteral");
2349 Register boilerplate_;
2351 RegExpLiteral* node_;
2355 void DeferredRegExpLiteral::Generate() {
2356 // Since the entry is undefined we call the runtime system to
2357 // compute the literal.
2358 // Literal array (0).
2360 // Literal index (1).
2361 __ Push(Smi::FromInt(node_->literal_index()));
2362 // RegExp pattern (2).
2363 __ Push(node_->pattern());
2364 // RegExp flags (3).
2365 __ Push(node_->flags());
2366 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2367 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2371 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2372 Comment cmnt(masm_, "[ RegExp Literal");
2374 // Retrieve the literals array and check the allocated entry. Begin
2375 // with a writable copy of the function of this activation in a
2377 frame_->PushFunction();
2378 Result literals = frame_->Pop();
2379 literals.ToRegister();
2380 frame_->Spill(literals.reg());
2382 // Load the literals array of the function.
2383 __ movq(literals.reg(),
2384 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2386 // Load the literal at the ast saved index.
2387 Result boilerplate = allocator_->Allocate();
2388 ASSERT(boilerplate.is_valid());
2389 int literal_offset =
2390 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2391 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2393 // Check whether we need to materialize the RegExp object. If so,
2394 // jump to the deferred code passing the literals array.
2395 DeferredRegExpLiteral* deferred =
2396 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2397 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2398 deferred->Branch(equal);
2399 deferred->BindExit();
2402 // Push the boilerplate object.
2403 frame_->Push(&boilerplate);
2407 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2408 Comment cmnt(masm_, "[ ObjectLiteral");
2410 // Load a writable copy of the function of this activation in a
2412 frame_->PushFunction();
2413 Result literals = frame_->Pop();
2414 literals.ToRegister();
2415 frame_->Spill(literals.reg());
2417 // Load the literals array of the function.
2418 __ movq(literals.reg(),
2419 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2421 frame_->Push(&literals);
2423 frame_->Push(Smi::FromInt(node->literal_index()));
2424 // Constant properties.
2425 frame_->Push(node->constant_properties());
2426 // Should the object literal have fast elements?
2427 frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
2429 if (node->depth() > 1) {
2430 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
2432 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
2434 frame_->Push(&clone);
2436 for (int i = 0; i < node->properties()->length(); i++) {
2437 ObjectLiteral::Property* property = node->properties()->at(i);
2438 switch (property->kind()) {
2439 case ObjectLiteral::Property::CONSTANT:
2441 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2442 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2443 // else fall through.
2444 case ObjectLiteral::Property::COMPUTED: {
2445 Handle<Object> key(property->key()->handle());
2446 if (key->IsSymbol()) {
2447 // Duplicate the object as the IC receiver.
2449 Load(property->value());
2451 Result ignored = frame_->CallStoreIC();
2456 case ObjectLiteral::Property::PROTOTYPE: {
2457 // Duplicate the object as an argument to the runtime call.
2459 Load(property->key());
2460 Load(property->value());
2461 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2462 // Ignore the result.
2465 case ObjectLiteral::Property::SETTER: {
2466 // Duplicate the object as an argument to the runtime call.
2468 Load(property->key());
2469 frame_->Push(Smi::FromInt(1));
2470 Load(property->value());
2471 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2472 // Ignore the result.
2475 case ObjectLiteral::Property::GETTER: {
2476 // Duplicate the object as an argument to the runtime call.
2478 Load(property->key());
2479 frame_->Push(Smi::FromInt(0));
2480 Load(property->value());
2481 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2482 // Ignore the result.
2485 default: UNREACHABLE();
2491 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2492 Comment cmnt(masm_, "[ ArrayLiteral");
2494 // Load a writable copy of the function of this activation in a
2496 frame_->PushFunction();
2497 Result literals = frame_->Pop();
2498 literals.ToRegister();
2499 frame_->Spill(literals.reg());
2501 // Load the literals array of the function.
2502 __ movq(literals.reg(),
2503 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2505 frame_->Push(&literals);
2506 frame_->Push(Smi::FromInt(node->literal_index()));
2507 frame_->Push(node->constant_elements());
2508 int length = node->values()->length();
2510 if (node->depth() > 1) {
2511 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2512 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
2513 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
2515 FastCloneShallowArrayStub stub(length);
2516 clone = frame_->CallStub(&stub, 3);
2518 frame_->Push(&clone);
2520 // Generate code to set the elements in the array that are not
2522 for (int i = 0; i < node->values()->length(); i++) {
2523 Expression* value = node->values()->at(i);
2525 // If value is a literal the property value is already set in the
2526 // boilerplate object.
2527 if (value->AsLiteral() != NULL) continue;
2528 // If value is a materialized literal the property value is already set
2529 // in the boilerplate object if it is simple.
2530 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2532 // The property must be set by generated code.
2535 // Get the property value off the stack.
2536 Result prop_value = frame_->Pop();
2537 prop_value.ToRegister();
2539 // Fetch the array literal while leaving a copy on the stack and
2540 // use it to get the elements array.
2542 Result elements = frame_->Pop();
2543 elements.ToRegister();
2544 frame_->Spill(elements.reg());
2545 // Get the elements FixedArray.
2546 __ movq(elements.reg(),
2547 FieldOperand(elements.reg(), JSObject::kElementsOffset));
2549 // Write to the indexed properties array.
2550 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2551 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2553 // Update the write barrier for the array address.
2554 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
2555 Result scratch = allocator_->Allocate();
2556 ASSERT(scratch.is_valid());
2557 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2562 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2563 ASSERT(!in_spilled_code());
2564 // Call runtime routine to allocate the catch extension object and
2565 // assign the exception value to the catch variable.
2566 Comment cmnt(masm_, "[ CatchExtensionObject");
2568 Load(node->value());
2570 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2571 frame_->Push(&result);
2575 void CodeGenerator::VisitAssignment(Assignment* node) {
2576 Comment cmnt(masm_, "[ Assignment");
2578 { Reference target(this, node->target(), node->is_compound());
2579 if (target.is_illegal()) {
2580 // Fool the virtual frame into thinking that we left the assignment's
2581 // value on the frame.
2582 frame_->Push(Smi::FromInt(0));
2585 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2587 if (node->starts_initialization_block()) {
2588 ASSERT(target.type() == Reference::NAMED ||
2589 target.type() == Reference::KEYED);
2590 // Change to slow case in the beginning of an initialization
2591 // block to avoid the quadratic behavior of repeatedly adding
2594 // The receiver is the argument to the runtime call. It is the
2595 // first value pushed when the reference was loaded to the
2597 frame_->PushElementAt(target.size() - 1);
2598 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2600 if (node->ends_initialization_block()) {
2601 // Add an extra copy of the receiver to the frame, so that it can be
2602 // converted back to fast case after the assignment.
2603 ASSERT(target.type() == Reference::NAMED ||
2604 target.type() == Reference::KEYED);
2605 if (target.type() == Reference::NAMED) {
2607 // Dup target receiver on stack.
2609 ASSERT(target.type() == Reference::KEYED);
2610 Result temp = frame_->Pop();
2612 frame_->Push(&temp);
2615 if (node->op() == Token::ASSIGN ||
2616 node->op() == Token::INIT_VAR ||
2617 node->op() == Token::INIT_CONST) {
2618 Load(node->value());
2620 } else { // Assignment is a compound assignment.
2621 Literal* literal = node->value()->AsLiteral();
2622 bool overwrite_value =
2623 (node->value()->AsBinaryOperation() != NULL &&
2624 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2625 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2626 // There are two cases where the target is not read in the right hand
2627 // side, that are easy to test for: the right hand side is a literal,
2628 // or the right hand side is a different variable. TakeValue invalidates
2629 // the target, with an implicit promise that it will be written to again
2630 // before it is read.
2631 if (literal != NULL || (right_var != NULL && right_var != var)) {
2636 Load(node->value());
2637 GenericBinaryOperation(node->binary_op(),
2639 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2643 var->mode() == Variable::CONST &&
2644 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2645 // Assignment ignored - leave the value on the stack.
2646 UnloadReference(&target);
2648 CodeForSourcePosition(node->position());
2649 if (node->op() == Token::INIT_CONST) {
2650 // Dynamic constant initializations must use the function context
2651 // and initialize the actual constant declared. Dynamic variable
2652 // initializations are simply assignments and use SetValue.
2653 target.SetValue(CONST_INIT);
2655 target.SetValue(NOT_CONST_INIT);
2657 if (node->ends_initialization_block()) {
2658 ASSERT(target.type() == Reference::UNLOADED);
2659 // End of initialization block. Revert to fast case. The
2660 // argument to the runtime call is the extra copy of the receiver,
2661 // which is below the value of the assignment.
2662 // Swap the receiver and the value of the assignment expression.
2663 Result lhs = frame_->Pop();
2664 Result receiver = frame_->Pop();
2666 frame_->Push(&receiver);
2667 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2674 void CodeGenerator::VisitThrow(Throw* node) {
2675 Comment cmnt(masm_, "[ Throw");
2676 Load(node->exception());
2677 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2678 frame_->Push(&result);
2682 void CodeGenerator::VisitProperty(Property* node) {
2683 Comment cmnt(masm_, "[ Property");
2684 Reference property(this, node);
2685 property.GetValue();
2689 void CodeGenerator::VisitCall(Call* node) {
2690 Comment cmnt(masm_, "[ Call");
2692 ZoneList<Expression*>* args = node->arguments();
2694 // Check if the function is a variable or a property.
2695 Expression* function = node->expression();
2696 Variable* var = function->AsVariableProxy()->AsVariable();
2697 Property* property = function->AsProperty();
2699 // ------------------------------------------------------------------------
2700 // Fast-case: Use inline caching.
2702 // According to ECMA-262, section 11.2.3, page 44, the function to call
2703 // must be resolved after the arguments have been evaluated. The IC code
2704 // automatically handles this by loading the arguments before the function
2705 // is resolved in cache misses (this also holds for megamorphic calls).
2706 // ------------------------------------------------------------------------
2708 if (var != NULL && var->is_possibly_eval()) {
2709 // ----------------------------------
2710 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2711 // ----------------------------------
2713 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2714 // resolve the function we need to call and the receiver of the
2715 // call. Then we call the resolved function using the given
2718 // Prepare the stack for the call to the resolved function.
2721 // Allocate a frame slot for the receiver.
2722 frame_->Push(Factory::undefined_value());
2723 int arg_count = args->length();
2724 for (int i = 0; i < arg_count; i++) {
2728 // Prepare the stack for the call to ResolvePossiblyDirectEval.
2729 frame_->PushElementAt(arg_count + 1);
2730 if (arg_count > 0) {
2731 frame_->PushElementAt(arg_count);
2733 frame_->Push(Factory::undefined_value());
2736 // Push the receiver.
2737 frame_->PushParameterAt(-1);
2739 // Resolve the call.
2741 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
2743 // The runtime call returns a pair of values in rax (function) and
2744 // rdx (receiver). Touch up the stack with the right values.
2745 Result receiver = allocator_->Allocate(rdx);
2746 frame_->SetElementAt(arg_count + 1, &result);
2747 frame_->SetElementAt(arg_count, &receiver);
2750 // Call the function.
2751 CodeForSourcePosition(node->position());
2752 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2753 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
2754 result = frame_->CallStub(&call_function, arg_count + 1);
2756 // Restore the context and overwrite the function on the stack with
2758 frame_->RestoreContextRegister();
2759 frame_->SetElementAt(0, &result);
2761 } else if (var != NULL && !var->is_this() && var->is_global()) {
2762 // ----------------------------------
2763 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2764 // ----------------------------------
2766 // Pass the global object as the receiver and let the IC stub
2767 // patch the stack to use the global proxy as 'this' in the
2768 // invoked function.
2771 // Load the arguments.
2772 int arg_count = args->length();
2773 for (int i = 0; i < arg_count; i++) {
2777 // Push the name of the function on the frame.
2778 frame_->Push(var->name());
2780 // Call the IC initialization code.
2781 CodeForSourcePosition(node->position());
2782 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2785 frame_->RestoreContextRegister();
2786 // Replace the function on the stack with the result.
2787 frame_->Push(&result);
2789 } else if (var != NULL && var->slot() != NULL &&
2790 var->slot()->type() == Slot::LOOKUP) {
2791 // ----------------------------------
2792 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
2793 // ----------------------------------
2795 // Load the function from the context. Sync the frame so we can
2796 // push the arguments directly into place.
2797 frame_->SyncRange(0, frame_->element_count() - 1);
2798 frame_->EmitPush(rsi);
2799 frame_->EmitPush(var->name());
2800 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2801 // The runtime call returns a pair of values in rax and rdx. The
2802 // looked-up function is in rax and the receiver is in rdx. These
2803 // register references are not ref counted here. We spill them
2804 // eagerly since they are arguments to an inevitable call (and are
2805 // not sharable by the arguments).
2806 ASSERT(!allocator()->is_used(rax));
2807 frame_->EmitPush(rax);
2809 // Load the receiver.
2810 ASSERT(!allocator()->is_used(rdx));
2811 frame_->EmitPush(rdx);
2813 // Call the function.
2814 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2816 } else if (property != NULL) {
2817 // Check if the key is a literal string.
2818 Literal* literal = property->key()->AsLiteral();
2820 if (literal != NULL && literal->handle()->IsSymbol()) {
2821 // ------------------------------------------------------------------
2822 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2823 // ------------------------------------------------------------------
2825 Handle<String> name = Handle<String>::cast(literal->handle());
2827 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2828 name->IsEqualTo(CStrVector("apply")) &&
2829 args->length() == 2 &&
2830 args->at(1)->AsVariableProxy() != NULL &&
2831 args->at(1)->AsVariableProxy()->IsArguments()) {
2832 // Use the optimized Function.prototype.apply that avoids
2833 // allocating lazily allocated arguments objects.
2834 CallApplyLazy(property->obj(),
2836 args->at(1)->AsVariableProxy(),
2840 // Push the receiver onto the frame.
2841 Load(property->obj());
2843 // Load the arguments.
2844 int arg_count = args->length();
2845 for (int i = 0; i < arg_count; i++) {
2849 // Push the name of the function onto the frame.
2852 // Call the IC initialization code.
2853 CodeForSourcePosition(node->position());
2854 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2857 frame_->RestoreContextRegister();
2858 frame_->Push(&result);
2862 // -------------------------------------------
2863 // JavaScript example: 'array[index](1, 2, 3)'
2864 // -------------------------------------------
2866 // Load the function to call from the property through a reference.
2867 if (property->is_synthetic()) {
2868 Reference ref(this, property, false);
2870 // Use global object as receiver.
2871 LoadGlobalReceiver();
2873 Reference ref(this, property, false);
2874 ASSERT(ref.size() == 2);
2875 Result key = frame_->Pop();
2876 frame_->Dup(); // Duplicate the receiver.
2879 // Top of frame contains function to call, with duplicate copy of
2880 // receiver below it. Swap them.
2881 Result function = frame_->Pop();
2882 Result receiver = frame_->Pop();
2883 frame_->Push(&function);
2884 frame_->Push(&receiver);
2887 // Call the function.
2888 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
2892 // ----------------------------------
2893 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
2894 // ----------------------------------
2896 // Load the function.
2899 // Pass the global proxy as the receiver.
2900 LoadGlobalReceiver();
2902 // Call the function.
2903 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2908 void CodeGenerator::VisitCallNew(CallNew* node) {
2909 Comment cmnt(masm_, "[ CallNew");
2911 // According to ECMA-262, section 11.2.2, page 44, the function
2912 // expression in new calls must be evaluated before the
2913 // arguments. This is different from ordinary calls, where the
2914 // actual function to call is resolved after the arguments have been
2917 // Compute function to call and use the global object as the
2918 // receiver. There is no need to use the global proxy here because
2919 // it will always be replaced with a newly allocated object.
2920 Load(node->expression());
2923 // Push the arguments ("left-to-right") on the stack.
2924 ZoneList<Expression*>* args = node->arguments();
2925 int arg_count = args->length();
2926 for (int i = 0; i < arg_count; i++) {
2930 // Call the construct call builtin that handles allocation and
2931 // constructor invocation.
2932 CodeForSourcePosition(node->position());
2933 Result result = frame_->CallConstructor(arg_count);
2934 // Replace the function on the stack with the result.
2935 frame_->SetElementAt(0, &result);
2939 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2940 if (CheckForInlineRuntimeCall(node)) {
2944 ZoneList<Expression*>* args = node->arguments();
2945 Comment cmnt(masm_, "[ CallRuntime");
2946 Runtime::Function* function = node->function();
2948 if (function == NULL) {
2949 // Push the builtins object found in the current global object.
2950 Result temp = allocator()->Allocate();
2951 ASSERT(temp.is_valid());
2952 __ movq(temp.reg(), GlobalObject());
2954 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
2955 frame_->Push(&temp);
2958 // Push the arguments ("left-to-right").
2959 int arg_count = args->length();
2960 for (int i = 0; i < arg_count; i++) {
2964 if (function == NULL) {
2965 // Call the JS runtime function.
2966 frame_->Push(node->name());
2967 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2970 frame_->RestoreContextRegister();
2971 frame_->Push(&answer);
2973 // Call the C runtime function.
2974 Result answer = frame_->CallRuntime(function, arg_count);
2975 frame_->Push(&answer);
2980 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
2981 Comment cmnt(masm_, "[ UnaryOperation");
2983 Token::Value op = node->op();
2985 if (op == Token::NOT) {
2986 // Swap the true and false targets but keep the same actual label
2987 // as the fall through.
2988 destination()->Invert();
2989 LoadCondition(node->expression(), destination(), true);
2990 // Swap the labels back.
2991 destination()->Invert();
2993 } else if (op == Token::DELETE) {
2994 Property* property = node->expression()->AsProperty();
2995 if (property != NULL) {
2996 Load(property->obj());
2997 Load(property->key());
2998 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
2999 frame_->Push(&answer);
3003 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3004 if (variable != NULL) {
3005 Slot* slot = variable->slot();
3006 if (variable->is_global()) {
3008 frame_->Push(variable->name());
3009 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3011 frame_->Push(&answer);
3014 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3015 // Call the runtime to look up the context holding the named
3016 // variable. Sync the virtual frame eagerly so we can push the
3017 // arguments directly into place.
3018 frame_->SyncRange(0, frame_->element_count() - 1);
3019 frame_->EmitPush(rsi);
3020 frame_->EmitPush(variable->name());
3021 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3022 ASSERT(context.is_register());
3023 frame_->EmitPush(context.reg());
3025 frame_->EmitPush(variable->name());
3026 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3028 frame_->Push(&answer);
3032 // Default: Result of deleting non-global, not dynamically
3033 // introduced variables is false.
3034 frame_->Push(Factory::false_value());
3037 // Default: Result of deleting expressions is true.
3038 Load(node->expression()); // may have side-effects
3039 frame_->SetElementAt(0, Factory::true_value());
3042 } else if (op == Token::TYPEOF) {
3043 // Special case for loading the typeof expression; see comment on
3044 // LoadTypeofExpression().
3045 LoadTypeofExpression(node->expression());
3046 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3047 frame_->Push(&answer);
3049 } else if (op == Token::VOID) {
3050 Expression* expression = node->expression();
3051 if (expression && expression->AsLiteral() && (
3052 expression->AsLiteral()->IsTrue() ||
3053 expression->AsLiteral()->IsFalse() ||
3054 expression->AsLiteral()->handle()->IsNumber() ||
3055 expression->AsLiteral()->handle()->IsString() ||
3056 expression->AsLiteral()->handle()->IsJSRegExp() ||
3057 expression->AsLiteral()->IsNull())) {
3058 // Omit evaluating the value of the primitive literal.
3059 // It will be discarded anyway, and can have no side effect.
3060 frame_->Push(Factory::undefined_value());
3062 Load(node->expression());
3063 frame_->SetElementAt(0, Factory::undefined_value());
3068 (node->expression()->AsBinaryOperation() != NULL &&
3069 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
3070 Load(node->expression());
3075 UNREACHABLE(); // handled above
3079 GenericUnaryOpStub stub(Token::SUB, overwrite);
3080 Result operand = frame_->Pop();
3081 Result answer = frame_->CallStub(&stub, &operand);
3082 frame_->Push(&answer);
3086 case Token::BIT_NOT: {
3088 JumpTarget smi_label;
3089 JumpTarget continue_label;
3090 Result operand = frame_->Pop();
3091 operand.ToRegister();
3093 Condition is_smi = masm_->CheckSmi(operand.reg());
3094 smi_label.Branch(is_smi, &operand);
3096 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
3097 Result answer = frame_->CallStub(&stub, &operand);
3098 continue_label.Jump(&answer);
3100 smi_label.Bind(&answer);
3101 answer.ToRegister();
3102 frame_->Spill(answer.reg());
3103 __ SmiNot(answer.reg(), answer.reg());
3104 continue_label.Bind(&answer);
3105 frame_->Push(&answer);
3111 JumpTarget continue_label;
3112 Result operand = frame_->Pop();
3113 operand.ToRegister();
3114 Condition is_smi = masm_->CheckSmi(operand.reg());
3115 continue_label.Branch(is_smi, &operand);
3116 frame_->Push(&operand);
3117 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3120 continue_label.Bind(&answer);
3121 frame_->Push(&answer);
3132 // The value in dst was optimistically incremented or decremented. The
3133 // result overflowed or was not smi tagged. Undo the operation, call
3134 // into the runtime to convert the argument to a number, and call the
3135 // specialized add or subtract stub. The result is left in dst.
3136 class DeferredPrefixCountOperation: public DeferredCode {
3138 DeferredPrefixCountOperation(Register dst, bool is_increment)
3139 : dst_(dst), is_increment_(is_increment) {
3140 set_comment("[ DeferredCountOperation");
3143 virtual void Generate();
3151 void DeferredPrefixCountOperation::Generate() {
3153 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3155 __ Push(Smi::FromInt(1));
3156 if (is_increment_) {
3157 __ CallRuntime(Runtime::kNumberAdd, 2);
3159 __ CallRuntime(Runtime::kNumberSub, 2);
3161 if (!dst_.is(rax)) __ movq(dst_, rax);
3165 // The value in dst was optimistically incremented or decremented. The
3166 // result overflowed or was not smi tagged. Undo the operation and call
3167 // into the runtime to convert the argument to a number. Update the
3168 // original value in old. Call the specialized add or subtract stub.
3169 // The result is left in dst.
3170 class DeferredPostfixCountOperation: public DeferredCode {
3172 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
3173 : dst_(dst), old_(old), is_increment_(is_increment) {
3174 set_comment("[ DeferredCountOperation");
3177 virtual void Generate();
3186 void DeferredPostfixCountOperation::Generate() {
3188 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3190 // Save the result of ToNumber to use as the old value.
3193 // Call the runtime for the addition or subtraction.
3195 __ Push(Smi::FromInt(1));
3196 if (is_increment_) {
3197 __ CallRuntime(Runtime::kNumberAdd, 2);
3199 __ CallRuntime(Runtime::kNumberSub, 2);
3201 if (!dst_.is(rax)) __ movq(dst_, rax);
3206 void CodeGenerator::VisitCountOperation(CountOperation* node) {
3207 Comment cmnt(masm_, "[ CountOperation");
3209 bool is_postfix = node->is_postfix();
3210 bool is_increment = node->op() == Token::INC;
3212 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3213 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3215 // Postfix operations need a stack slot under the reference to hold
3216 // the old value while the new value is being stored. This is so that
3217 // in the case that storing the new value requires a call, the old
3218 // value will be in the frame to be spilled.
3219 if (is_postfix) frame_->Push(Smi::FromInt(0));
3221 // A constant reference is not saved to, so the reference is not a
3222 // compound assignment reference.
3223 { Reference target(this, node->expression(), !is_const);
3224 if (target.is_illegal()) {
3225 // Spoof the virtual frame to have the expected height (one higher
3227 if (!is_postfix) frame_->Push(Smi::FromInt(0));
3232 Result new_value = frame_->Pop();
3233 new_value.ToRegister();
3235 Result old_value; // Only allocated in the postfix case.
3237 // Allocate a temporary to preserve the old value.
3238 old_value = allocator_->Allocate();
3239 ASSERT(old_value.is_valid());
3240 __ movq(old_value.reg(), new_value.reg());
3242 // Ensure the new value is writable.
3243 frame_->Spill(new_value.reg());
3245 DeferredCode* deferred = NULL;
3247 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3251 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3255 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
3257 __ SmiAddConstant(kScratchRegister,
3260 deferred->entry_label());
3262 __ SmiSubConstant(kScratchRegister,
3265 deferred->entry_label());
3267 __ movq(new_value.reg(), kScratchRegister);
3268 deferred->BindExit();
3270 // Postfix: store the old value in the allocated slot under the
3272 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3274 frame_->Push(&new_value);
3275 // Non-constant: update the reference.
3276 if (!is_const) target.SetValue(NOT_CONST_INIT);
3279 // Postfix: drop the new value and use the old.
3280 if (is_postfix) frame_->Drop();
3284 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
3285 // According to ECMA-262 section 11.11, page 58, the binary logical
3286 // operators must yield the result of one of the two expressions
3287 // before any ToBoolean() conversions. This means that the value
3288 // produced by a && or || operator is not necessarily a boolean.
3290 // NOTE: If the left hand side produces a materialized value (not
3291 // control flow), we force the right hand side to do the same. This
3292 // is necessary because we assume that if we get control flow on the
3293 // last path out of an expression we got it on all paths.
3294 if (node->op() == Token::AND) {
3296 ControlDestination dest(&is_true, destination()->false_target(), true);
3297 LoadCondition(node->left(), &dest, false);
3299 if (dest.false_was_fall_through()) {
3300 // The current false target was used as the fall-through. If
3301 // there are no dangling jumps to is_true then the left
3302 // subexpression was unconditionally false. Otherwise we have
3303 // paths where we do have to evaluate the right subexpression.
3304 if (is_true.is_linked()) {
3305 // We need to compile the right subexpression. If the jump to
3306 // the current false target was a forward jump then we have a
3307 // valid frame, we have just bound the false target, and we
3308 // have to jump around the code for the right subexpression.
3309 if (has_valid_frame()) {
3310 destination()->false_target()->Unuse();
3311 destination()->false_target()->Jump();
3314 // The left subexpression compiled to control flow, so the
3315 // right one is free to do so as well.
3316 LoadCondition(node->right(), destination(), false);
3318 // We have actually just jumped to or bound the current false
3319 // target but the current control destination is not marked as
3321 destination()->Use(false);
3324 } else if (dest.is_used()) {
3325 // The left subexpression compiled to control flow (and is_true
3326 // was just bound), so the right is free to do so as well.
3327 LoadCondition(node->right(), destination(), false);
3330 // We have a materialized value on the frame, so we exit with
3331 // one on all paths. There are possibly also jumps to is_true
3332 // from nested subexpressions.
3333 JumpTarget pop_and_continue;
3336 // Avoid popping the result if it converts to 'false' using the
3337 // standard ToBoolean() conversion as described in ECMA-262,
3338 // section 9.2, page 30.
3340 // Duplicate the TOS value. The duplicate will be popped by
3343 ControlDestination dest(&pop_and_continue, &exit, true);
3346 // Pop the result of evaluating the first part.
3349 // Compile right side expression.
3351 Load(node->right());
3353 // Exit (always with a materialized value).
3358 ASSERT(node->op() == Token::OR);
3359 JumpTarget is_false;
3360 ControlDestination dest(destination()->true_target(), &is_false, false);
3361 LoadCondition(node->left(), &dest, false);
3363 if (dest.true_was_fall_through()) {
3364 // The current true target was used as the fall-through. If
3365 // there are no dangling jumps to is_false then the left
3366 // subexpression was unconditionally true. Otherwise we have
3367 // paths where we do have to evaluate the right subexpression.
3368 if (is_false.is_linked()) {
3369 // We need to compile the right subexpression. If the jump to
3370 // the current true target was a forward jump then we have a
3371 // valid frame, we have just bound the true target, and we
3372 // have to jump around the code for the right subexpression.
3373 if (has_valid_frame()) {
3374 destination()->true_target()->Unuse();
3375 destination()->true_target()->Jump();
3378 // The left subexpression compiled to control flow, so the
3379 // right one is free to do so as well.
3380 LoadCondition(node->right(), destination(), false);
3382 // We have just jumped to or bound the current true target but
3383 // the current control destination is not marked as used.
3384 destination()->Use(true);
3387 } else if (dest.is_used()) {
3388 // The left subexpression compiled to control flow (and is_false
3389 // was just bound), so the right is free to do so as well.
3390 LoadCondition(node->right(), destination(), false);
3393 // We have a materialized value on the frame, so we exit with
3394 // one on all paths. There are possibly also jumps to is_false
3395 // from nested subexpressions.
3396 JumpTarget pop_and_continue;
3399 // Avoid popping the result if it converts to 'true' using the
3400 // standard ToBoolean() conversion as described in ECMA-262,
3401 // section 9.2, page 30.
3403 // Duplicate the TOS value. The duplicate will be popped by
3406 ControlDestination dest(&exit, &pop_and_continue, false);
3409 // Pop the result of evaluating the first part.
3412 // Compile right side expression.
3414 Load(node->right());
3416 // Exit (always with a materialized value).
3422 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3423 Comment cmnt(masm_, "[ BinaryOperation");
3425 if (node->op() == Token::AND || node->op() == Token::OR) {
3426 GenerateLogicalBooleanOperation(node);
3428 // NOTE: The code below assumes that the slow cases (calls to runtime)
3429 // never return a constant/immutable object.
3430 OverwriteMode overwrite_mode = NO_OVERWRITE;
3431 if (node->left()->AsBinaryOperation() != NULL &&
3432 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3433 overwrite_mode = OVERWRITE_LEFT;
3434 } else if (node->right()->AsBinaryOperation() != NULL &&
3435 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3436 overwrite_mode = OVERWRITE_RIGHT;
3440 Load(node->right());
3441 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3447 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3448 Comment cmnt(masm_, "[ CompareOperation");
3450 // Get the expressions from the node.
3451 Expression* left = node->left();
3452 Expression* right = node->right();
3453 Token::Value op = node->op();
3454 // To make typeof testing for natives implemented in JavaScript really
3455 // efficient, we generate special code for expressions of the form:
3456 // 'typeof <expression> == <string>'.
3457 UnaryOperation* operation = left->AsUnaryOperation();
3458 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3459 (operation != NULL && operation->op() == Token::TYPEOF) &&
3460 (right->AsLiteral() != NULL &&
3461 right->AsLiteral()->handle()->IsString())) {
3462 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3464 // Load the operand and move it to a register.
3465 LoadTypeofExpression(operation->expression());
3466 Result answer = frame_->Pop();
3467 answer.ToRegister();
3469 if (check->Equals(Heap::number_symbol())) {
3470 Condition is_smi = masm_->CheckSmi(answer.reg());
3471 destination()->true_target()->Branch(is_smi);
3472 frame_->Spill(answer.reg());
3473 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3474 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3476 destination()->Split(equal);
3478 } else if (check->Equals(Heap::string_symbol())) {
3479 Condition is_smi = masm_->CheckSmi(answer.reg());
3480 destination()->false_target()->Branch(is_smi);
3482 // It can be an undetectable string object.
3483 __ movq(kScratchRegister,
3484 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3485 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3486 Immediate(1 << Map::kIsUndetectable));
3487 destination()->false_target()->Branch(not_zero);
3488 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3490 destination()->Split(below); // Unsigned byte comparison needed.
3492 } else if (check->Equals(Heap::boolean_symbol())) {
3493 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3494 destination()->true_target()->Branch(equal);
3495 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3497 destination()->Split(equal);
3499 } else if (check->Equals(Heap::undefined_symbol())) {
3500 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3501 destination()->true_target()->Branch(equal);
3503 Condition is_smi = masm_->CheckSmi(answer.reg());
3504 destination()->false_target()->Branch(is_smi);
3506 // It can be an undetectable object.
3507 __ movq(kScratchRegister,
3508 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3509 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3510 Immediate(1 << Map::kIsUndetectable));
3512 destination()->Split(not_zero);
3514 } else if (check->Equals(Heap::function_symbol())) {
3515 Condition is_smi = masm_->CheckSmi(answer.reg());
3516 destination()->false_target()->Branch(is_smi);
3517 frame_->Spill(answer.reg());
3518 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3519 destination()->true_target()->Branch(equal);
3520 // Regular expressions are callable so typeof == 'function'.
3521 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
3523 destination()->Split(equal);
3525 } else if (check->Equals(Heap::object_symbol())) {
3526 Condition is_smi = masm_->CheckSmi(answer.reg());
3527 destination()->false_target()->Branch(is_smi);
3528 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3529 destination()->true_target()->Branch(equal);
3531 // Regular expressions are typeof == 'function', not 'object'.
3532 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
3533 destination()->false_target()->Branch(equal);
3535 // It can be an undetectable object.
3536 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3537 Immediate(1 << Map::kIsUndetectable));
3538 destination()->false_target()->Branch(not_zero);
3539 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3540 destination()->false_target()->Branch(below);
3541 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3543 destination()->Split(below_equal);
3545 // Uncommon case: typeof testing against a string literal that is
3546 // never returned from the typeof operator.
3548 destination()->Goto(false);
3553 Condition cc = no_condition;
3554 bool strict = false;
3556 case Token::EQ_STRICT:
3577 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3578 frame_->Push(&answer); // push the result
3581 case Token::INSTANCEOF: {
3584 InstanceofStub stub;
3585 Result answer = frame_->CallStub(&stub, 2);
3586 answer.ToRegister();
3587 __ testq(answer.reg(), answer.reg());
3589 destination()->Split(zero);
3597 Comparison(node, cc, strict, destination());
3601 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3602 frame_->PushFunction();
3606 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
3607 ASSERT(args->length() == 1);
3609 // ArgumentsAccessStub expects the key in rdx and the formal
3610 // parameter count in rax.
3612 Result key = frame_->Pop();
3613 // Explicitly create a constant result.
3614 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
3615 // Call the shared stub to get to arguments[key].
3616 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3617 Result result = frame_->CallStub(&stub, &key, &count);
3618 frame_->Push(&result);
3622 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3623 ASSERT(args->length() == 1);
3625 Result value = frame_->Pop();
3627 ASSERT(value.is_valid());
3628 Condition is_smi = masm_->CheckSmi(value.reg());
3629 destination()->false_target()->Branch(is_smi);
3630 // It is a heap object - get map.
3631 // Check if the object is a JS array or not.
3632 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3634 destination()->Split(equal);
3638 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
3639 ASSERT(args->length() == 1);
3641 Result value = frame_->Pop();
3643 ASSERT(value.is_valid());
3644 Condition is_smi = masm_->CheckSmi(value.reg());
3645 destination()->false_target()->Branch(is_smi);
3646 // It is a heap object - get map.
3647 // Check if the object is a regexp.
3648 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
3650 destination()->Split(equal);
3654 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3655 // This generates a fast version of:
3656 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3657 ASSERT(args->length() == 1);
3659 Result obj = frame_->Pop();
3661 Condition is_smi = masm_->CheckSmi(obj.reg());
3662 destination()->false_target()->Branch(is_smi);
3664 __ Move(kScratchRegister, Factory::null_value());
3665 __ cmpq(obj.reg(), kScratchRegister);
3666 destination()->true_target()->Branch(equal);
3668 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3669 // Undetectable objects behave like undefined when tested with typeof.
3670 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3671 Immediate(1 << Map::kIsUndetectable));
3672 destination()->false_target()->Branch(not_zero);
3673 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3674 destination()->false_target()->Branch(less);
3675 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3677 destination()->Split(less_equal);
3681 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3682 // This generates a fast version of:
3683 // (%_ClassOf(arg) === 'Function')
3684 ASSERT(args->length() == 1);
3686 Result obj = frame_->Pop();
3688 Condition is_smi = masm_->CheckSmi(obj.reg());
3689 destination()->false_target()->Branch(is_smi);
3690 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
3692 destination()->Split(equal);
3696 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3697 ASSERT(args->length() == 1);
3699 Result obj = frame_->Pop();
3701 Condition is_smi = masm_->CheckSmi(obj.reg());
3702 destination()->false_target()->Branch(is_smi);
3703 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3704 __ movzxbl(kScratchRegister,
3705 FieldOperand(kScratchRegister, Map::kBitFieldOffset));
3706 __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
3708 destination()->Split(not_zero);
3712 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3713 ASSERT(args->length() == 0);
3715 // Get the frame pointer for the calling frame.
3716 Result fp = allocator()->Allocate();
3717 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3719 // Skip the arguments adaptor frame if it exists.
3720 Label check_frame_marker;
3721 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3722 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3723 __ j(not_equal, &check_frame_marker);
3724 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3726 // Check the marker in the calling frame.
3727 __ bind(&check_frame_marker);
3728 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3729 Smi::FromInt(StackFrame::CONSTRUCT));
3731 destination()->Split(equal);
3735 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3736 ASSERT(args->length() == 0);
3737 // ArgumentsAccessStub takes the parameter count as an input argument
3738 // in register eax. Create a constant result for it.
3739 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
3740 // Call the shared stub to get to the arguments.length.
3741 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3742 Result result = frame_->CallStub(&stub, &count);
3743 frame_->Push(&result);
3747 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3748 Comment(masm_, "[ GenerateFastCharCodeAt");
3749 ASSERT(args->length() == 2);
3753 Label not_a_flat_string;
3754 Label try_again_with_new_string;
3756 Label got_char_code;
3760 Result index = frame_->Pop();
3761 Result object = frame_->Pop();
3763 // Get register rcx to use as shift amount later.
3764 Result shift_amount;
3765 if (object.is_register() && object.reg().is(rcx)) {
3766 Result fresh = allocator_->Allocate();
3767 shift_amount = object;
3769 __ movq(object.reg(), rcx);
3771 if (index.is_register() && index.reg().is(rcx)) {
3772 Result fresh = allocator_->Allocate();
3773 shift_amount = index;
3775 __ movq(index.reg(), rcx);
3777 // There could be references to ecx in the frame. Allocating will
3778 // spill them, otherwise spill explicitly.
3779 if (shift_amount.is_valid()) {
3782 shift_amount = allocator()->Allocate(rcx);
3784 ASSERT(shift_amount.is_register());
3785 ASSERT(shift_amount.reg().is(rcx));
3786 ASSERT(allocator_->count(rcx) == 1);
3788 // We will mutate the index register and possibly the object register.
3789 // The case where they are somehow the same register is handled
3790 // because we only mutate them in the case where the receiver is a
3791 // heap object and the index is not.
3792 object.ToRegister();
3794 frame_->Spill(object.reg());
3795 frame_->Spill(index.reg());
3797 // We need a single extra temporary register.
3798 Result temp = allocator()->Allocate();
3799 ASSERT(temp.is_valid());
3801 // There is no virtual frame effect from here up to the final result
3804 // If the receiver is a smi trigger the slow case.
3805 __ JumpIfSmi(object.reg(), &slow_case);
3807 // If the index is negative or non-smi trigger the slow case.
3808 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3811 __ SmiToInteger32(index.reg(), index.reg());
3813 __ bind(&try_again_with_new_string);
3814 // Fetch the instance type of the receiver into rcx.
3815 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3816 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3817 // If the receiver is not a string trigger the slow case.
3818 __ testb(rcx, Immediate(kIsNotStringMask));
3819 __ j(not_zero, &slow_case);
3821 // Check for index out of range.
3822 __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
3823 __ j(greater_equal, &slow_case);
3824 // Reload the instance type (into the temp register this time)..
3825 __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3826 __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3828 // We need special handling for non-flat strings.
3829 ASSERT_EQ(0, kSeqStringTag);
3830 __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3831 __ j(not_zero, ¬_a_flat_string);
3832 // Check for 1-byte or 2-byte string.
3833 ASSERT_EQ(0, kTwoByteStringTag);
3834 __ testb(temp.reg(), Immediate(kStringEncodingMask));
3835 __ j(not_zero, &ascii_string);
3838 // Load the 2-byte character code into the temp register.
3839 __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3842 SeqTwoByteString::kHeaderSize));
3843 __ jmp(&got_char_code);
3846 __ bind(&ascii_string);
3847 // Load the byte into the temp register.
3848 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3851 SeqAsciiString::kHeaderSize));
3852 __ bind(&got_char_code);
3853 __ Integer32ToSmi(temp.reg(), temp.reg());
3856 // Handle non-flat strings.
3857 __ bind(¬_a_flat_string);
3858 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3859 __ cmpb(temp.reg(), Immediate(kConsStringTag));
3860 __ j(not_equal, &slow_case);
3863 // Check that the right hand side is the empty string (ie if this is really a
3864 // flat string in a cons string). If that is not the case we would rather go
3865 // to the runtime system now, to flatten the string.
3866 __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
3867 __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
3868 __ j(not_equal, &slow_case);
3869 // Get the first of the two strings.
3870 __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3871 __ jmp(&try_again_with_new_string);
3873 __ bind(&slow_case);
3874 // Move the undefined value into the result register, which will
3875 // trigger the slow case.
3876 __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3879 frame_->Push(&temp);
3883 void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
3884 Comment(masm_, "[ GenerateCharFromCode");
3885 ASSERT(args->length() == 1);
3888 Result code = frame_->Pop();
3890 ASSERT(code.is_valid());
3892 Result temp = allocator()->Allocate();
3893 ASSERT(temp.is_valid());
3895 JumpTarget slow_case;
3898 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3899 Condition is_smi = __ CheckSmi(code.reg());
3900 slow_case.Branch(NegateCondition(is_smi), &code, not_taken);
3902 __ SmiToInteger32(kScratchRegister, code.reg());
3903 __ cmpl(kScratchRegister, Immediate(String::kMaxAsciiCharCode));
3904 slow_case.Branch(above, &code, not_taken);
3906 __ Move(temp.reg(), Factory::single_character_string_cache());
3907 __ movq(temp.reg(), FieldOperand(temp.reg(),
3908 kScratchRegister, times_pointer_size,
3909 FixedArray::kHeaderSize));
3910 __ CompareRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3911 slow_case.Branch(equal, &code, not_taken);
3914 frame_->Push(&temp);
3917 slow_case.Bind(&code);
3918 frame_->Push(&code);
3919 Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
3920 frame_->Push(&result);
3926 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3927 ASSERT(args->length() == 1);
3929 Result value = frame_->Pop();
3931 ASSERT(value.is_valid());
3932 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3934 destination()->Split(positive_smi);
3938 // Generates the Math.pow method - currently just calls runtime.
3939 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
3940 ASSERT(args->length() == 2);
3943 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
3948 // Generates the Math.sqrt method - currently just calls runtime.
3949 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
3950 ASSERT(args->length() == 1);
3952 Result res = frame_->CallRuntime(Runtime::kMath_sqrt, 1);
3957 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3958 ASSERT(args->length() == 1);
3960 Result value = frame_->Pop();
3962 ASSERT(value.is_valid());
3963 Condition is_smi = masm_->CheckSmi(value.reg());
3965 destination()->Split(is_smi);
3969 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3970 // Conditionally generate a log call.
3972 // 0 (literal string): The type of logging (corresponds to the flags).
3973 // This is used to determine whether or not to generate the log call.
3974 // 1 (string): Format string. Access the string at argument index 2
3975 // with '%2s' (see Logger::LogRuntime for all the formats).
3976 // 2 (array): Arguments to the format string.
3977 ASSERT_EQ(args->length(), 3);
3978 #ifdef ENABLE_LOGGING_AND_PROFILING
3979 if (ShouldGenerateLog(args->at(0))) {
3982 frame_->CallRuntime(Runtime::kLog, 2);
3985 // Finally, we're expected to leave a value on the top of the stack.
3986 frame_->Push(Factory::undefined_value());
3990 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3991 ASSERT(args->length() == 2);
3993 // Load the two objects into registers and perform the comparison.
3996 Result right = frame_->Pop();
3997 Result left = frame_->Pop();
4000 __ cmpq(right.reg(), left.reg());
4003 destination()->Split(equal);
4007 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
4008 ASSERT(args->length() == 0);
4009 // RBP value is aligned, so it should be tagged as a smi (without necesarily
4010 // being padded as a smi, so it should not be treated as a smi.).
4011 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4012 Result rbp_as_smi = allocator_->Allocate();
4013 ASSERT(rbp_as_smi.is_valid());
4014 __ movq(rbp_as_smi.reg(), rbp);
4015 frame_->Push(&rbp_as_smi);
4019 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
4020 ASSERT(args->length() == 0);
4024 static const int num_arguments = 0;
4025 __ PrepareCallCFunction(num_arguments);
4027 // Call V8::RandomPositiveSmi().
4028 __ CallCFunction(ExternalReference::random_positive_smi_function(),
4032 Result result = allocator_->Allocate(rax);
4033 frame_->Push(&result);
4037 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4038 ASSERT_EQ(args->length(), 4);
4040 // Load the arguments on the stack and call the runtime system.
4045 RegExpExecStub stub;
4046 Result result = frame_->CallStub(&stub, 4);
4047 frame_->Push(&result);
4051 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
4052 ASSERT_EQ(args->length(), 1);
4054 // Load the argument on the stack and jump to the runtime.
4057 NumberToStringStub stub;
4058 Result result = frame_->CallStub(&stub, 1);
4059 frame_->Push(&result);
4063 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
4064 ASSERT_EQ(args->length(), 1);
4065 // Load the argument on the stack and jump to the runtime.
4067 Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
4068 frame_->Push(&answer);
4072 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
4073 ASSERT_EQ(args->length(), 1);
4074 // Load the argument on the stack and jump to the runtime.
4076 Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
4077 frame_->Push(&answer);
4081 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4082 ASSERT_EQ(2, args->length());
4087 StringAddStub stub(NO_STRING_ADD_FLAGS);
4088 Result answer = frame_->CallStub(&stub, 2);
4089 frame_->Push(&answer);
4093 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4094 ASSERT_EQ(3, args->length());
4101 Result answer = frame_->CallStub(&stub, 3);
4102 frame_->Push(&answer);
4106 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4107 ASSERT_EQ(2, args->length());
4112 StringCompareStub stub;
4113 Result answer = frame_->CallStub(&stub, 2);
4114 frame_->Push(&answer);
4118 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4119 ASSERT(args->length() == 1);
4120 JumpTarget leave, null, function, non_function_constructor;
4121 Load(args->at(0)); // Load the object.
4122 Result obj = frame_->Pop();
4124 frame_->Spill(obj.reg());
4126 // If the object is a smi, we return null.
4127 Condition is_smi = masm_->CheckSmi(obj.reg());
4128 null.Branch(is_smi);
4130 // Check that the object is a JS object but take special care of JS
4131 // functions to make sure they have 'Function' as their class.
4133 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4136 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4137 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4138 // LAST_JS_OBJECT_TYPE.
4139 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4140 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4141 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4142 function.Branch(equal);
4144 // Check if the constructor in the map is a function.
4145 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4146 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4147 non_function_constructor.Branch(not_equal);
4149 // The obj register now contains the constructor function. Grab the
4150 // instance class name from there.
4152 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4154 FieldOperand(obj.reg(),
4155 SharedFunctionInfo::kInstanceClassNameOffset));
4159 // Functions have class 'Function'.
4161 frame_->Push(Factory::function_class_symbol());
4164 // Objects with a non-function constructor have class 'Object'.
4165 non_function_constructor.Bind();
4166 frame_->Push(Factory::Object_symbol());
4169 // Non-JS objects have class null.
4171 frame_->Push(Factory::null_value());
4178 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4179 ASSERT(args->length() == 2);
4181 Load(args->at(0)); // Load the object.
4182 Load(args->at(1)); // Load the value.
4183 Result value = frame_->Pop();
4184 Result object = frame_->Pop();
4186 object.ToRegister();
4188 // if (object->IsSmi()) return value.
4189 Condition is_smi = masm_->CheckSmi(object.reg());
4190 leave.Branch(is_smi, &value);
4192 // It is a heap object - get its map.
4193 Result scratch = allocator_->Allocate();
4194 ASSERT(scratch.is_valid());
4195 // if (!object->IsJSValue()) return value.
4196 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4197 leave.Branch(not_equal, &value);
4200 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4201 // Update the write barrier. Save the value as it will be
4202 // overwritten by the write barrier code and is needed afterward.
4203 Result duplicate_value = allocator_->Allocate();
4204 ASSERT(duplicate_value.is_valid());
4205 __ movq(duplicate_value.reg(), value.reg());
4206 // The object register is also overwritten by the write barrier and
4207 // possibly aliased in the frame.
4208 frame_->Spill(object.reg());
4209 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4213 duplicate_value.Unuse();
4217 frame_->Push(&value);
4221 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4222 ASSERT(args->length() == 1);
4224 Load(args->at(0)); // Load the object.
4226 Result object = frame_->Pop();
4227 object.ToRegister();
4228 ASSERT(object.is_valid());
4229 // if (object->IsSmi()) return object.
4230 Condition is_smi = masm_->CheckSmi(object.reg());
4231 leave.Branch(is_smi);
4232 // It is a heap object - get map.
4233 Result temp = allocator()->Allocate();
4234 ASSERT(temp.is_valid());
4235 // if (!object->IsJSValue()) return object.
4236 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4237 leave.Branch(not_equal);
4238 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4240 frame_->SetElementAt(0, &temp);
4245 // -----------------------------------------------------------------------------
4246 // CodeGenerator implementation of Expressions
4248 void CodeGenerator::LoadAndSpill(Expression* expression) {
4249 // TODO(x64): No architecture specific code. Move to shared location.
4250 ASSERT(in_spilled_code());
4251 set_in_spilled_code(false);
4254 set_in_spilled_code(true);
4258 void CodeGenerator::Load(Expression* expr) {
4260 int original_height = frame_->height();
4262 ASSERT(!in_spilled_code());
4263 JumpTarget true_target;
4264 JumpTarget false_target;
4265 ControlDestination dest(&true_target, &false_target, true);
4266 LoadCondition(expr, &dest, false);
4268 if (dest.false_was_fall_through()) {
4269 // The false target was just bound.
4271 frame_->Push(Factory::false_value());
4272 // There may be dangling jumps to the true target.
4273 if (true_target.is_linked()) {
4276 frame_->Push(Factory::true_value());
4280 } else if (dest.is_used()) {
4281 // There is true, and possibly false, control flow (with true as
4282 // the fall through).
4284 frame_->Push(Factory::true_value());
4285 if (false_target.is_linked()) {
4287 false_target.Bind();
4288 frame_->Push(Factory::false_value());
4293 // We have a valid value on top of the frame, but we still may
4294 // have dangling jumps to the true and false targets from nested
4295 // subexpressions (eg, the left subexpressions of the
4296 // short-circuited boolean operators).
4297 ASSERT(has_valid_frame());
4298 if (true_target.is_linked() || false_target.is_linked()) {
4300 loaded.Jump(); // Don't lose the current TOS.
4301 if (true_target.is_linked()) {
4303 frame_->Push(Factory::true_value());
4304 if (false_target.is_linked()) {
4308 if (false_target.is_linked()) {
4309 false_target.Bind();
4310 frame_->Push(Factory::false_value());
4316 ASSERT(has_valid_frame());
4317 ASSERT(frame_->height() == original_height + 1);
4321 // Emit code to load the value of an expression to the top of the
4322 // frame. If the expression is boolean-valued it may be compiled (or
4323 // partially compiled) into control flow to the control destination.
4324 // If force_control is true, control flow is forced.
4325 void CodeGenerator::LoadCondition(Expression* x,
4326 ControlDestination* dest,
4327 bool force_control) {
4328 ASSERT(!in_spilled_code());
4329 int original_height = frame_->height();
4331 { CodeGenState new_state(this, dest);
4334 // If we hit a stack overflow, we may not have actually visited
4335 // the expression. In that case, we ensure that we have a
4336 // valid-looking frame state because we will continue to generate
4337 // code as we unwind the C++ stack.
4339 // It's possible to have both a stack overflow and a valid frame
4340 // state (eg, a subexpression overflowed, visiting it returned
4341 // with a dummied frame state, and visiting this expression
4342 // returned with a normal-looking state).
4343 if (HasStackOverflow() &&
4345 frame_->height() == original_height) {
4350 if (force_control && !dest->is_used()) {
4351 // Convert the TOS value into flow to the control destination.
4352 // TODO(X64): Make control flow to control destinations work.
4356 ASSERT(!(force_control && !dest->is_used()));
4357 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4361 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4362 // convert it to a boolean in the condition code register or jump to
4363 // 'false_target'/'true_target' as appropriate.
4364 void CodeGenerator::ToBoolean(ControlDestination* dest) {
4365 Comment cmnt(masm_, "[ ToBoolean");
4367 // The value to convert should be popped from the frame.
4368 Result value = frame_->Pop();
4371 if (value.is_number()) {
4372 Comment cmnt(masm_, "ONLY_NUMBER");
4373 // Fast case if TypeInfo indicates only numbers.
4374 if (FLAG_debug_code) {
4375 __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
4377 // Smi => false iff zero.
4378 __ SmiCompare(value.reg(), Smi::FromInt(0));
4379 dest->false_target()->Branch(equal);
4380 Condition is_smi = masm_->CheckSmi(value.reg());
4381 dest->true_target()->Branch(is_smi);
4383 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
4386 dest->Split(not_zero);
4388 // Fast case checks.
4389 // 'false' => false.
4390 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4391 dest->false_target()->Branch(equal);
4394 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4395 dest->true_target()->Branch(equal);
4397 // 'undefined' => false.
4398 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4399 dest->false_target()->Branch(equal);
4401 // Smi => false iff zero.
4402 __ SmiCompare(value.reg(), Smi::FromInt(0));
4403 dest->false_target()->Branch(equal);
4404 Condition is_smi = masm_->CheckSmi(value.reg());
4405 dest->true_target()->Branch(is_smi);
4407 // Call the stub for all other cases.
4408 frame_->Push(&value); // Undo the Pop() from above.
4410 Result temp = frame_->CallStub(&stub, 1);
4411 // Convert the result to a condition code.
4412 __ testq(temp.reg(), temp.reg());
4414 dest->Split(not_equal);
4419 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4421 // TODO(X64): Implement security policy for loads of smis.
4425 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4429 //------------------------------------------------------------------------------
4430 // CodeGenerator implementation of variables, lookups, and stores.
4432 Reference::Reference(CodeGenerator* cgen,
4433 Expression* expression,
4434 bool persist_after_get)
4436 expression_(expression),
4438 persist_after_get_(persist_after_get) {
4439 cgen->LoadReference(this);
4443 Reference::~Reference() {
4444 ASSERT(is_unloaded() || is_illegal());
4448 void CodeGenerator::LoadReference(Reference* ref) {
4449 // References are loaded from both spilled and unspilled code. Set the
4450 // state to unspilled to allow that (and explicitly spill after
4451 // construction at the construction sites).
4452 bool was_in_spilled_code = in_spilled_code_;
4453 in_spilled_code_ = false;
4455 Comment cmnt(masm_, "[ LoadReference");
4456 Expression* e = ref->expression();
4457 Property* property = e->AsProperty();
4458 Variable* var = e->AsVariableProxy()->AsVariable();
4460 if (property != NULL) {
4461 // The expression is either a property or a variable proxy that rewrites
4463 Load(property->obj());
4464 if (property->key()->IsPropertyName()) {
4465 ref->set_type(Reference::NAMED);
4467 Load(property->key());
4468 ref->set_type(Reference::KEYED);
4470 } else if (var != NULL) {
4471 // The expression is a variable proxy that does not rewrite to a
4472 // property. Global variables are treated as named property references.
4473 if (var->is_global()) {
4475 ref->set_type(Reference::NAMED);
4477 ASSERT(var->slot() != NULL);
4478 ref->set_type(Reference::SLOT);
4481 // Anything else is a runtime error.
4483 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4486 in_spilled_code_ = was_in_spilled_code;
4490 void CodeGenerator::UnloadReference(Reference* ref) {
4491 // Pop a reference from the stack while preserving TOS.
4492 Comment cmnt(masm_, "[ UnloadReference");
4493 frame_->Nip(ref->size());
4494 ref->set_unloaded();
4498 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4499 // Currently, this assertion will fail if we try to assign to
4500 // a constant variable that is constant because it is read-only
4501 // (such as the variable referring to a named function expression).
4502 // We need to implement assignments to read-only variables.
4503 // Ideally, we should do this during AST generation (by converting
4504 // such assignments into expression statements); however, in general
4505 // we may not be able to make the decision until past AST generation,
4506 // that is when the entire program is known.
4507 ASSERT(slot != NULL);
4508 int index = slot->index();
4509 switch (slot->type()) {
4510 case Slot::PARAMETER:
4511 return frame_->ParameterAt(index);
4514 return frame_->LocalAt(index);
4516 case Slot::CONTEXT: {
4517 // Follow the context chain if necessary.
4518 ASSERT(!tmp.is(rsi)); // do not overwrite context register
4519 Register context = rsi;
4520 int chain_length = scope()->ContextChainLength(slot->var()->scope());
4521 for (int i = 0; i < chain_length; i++) {
4522 // Load the closure.
4523 // (All contexts, even 'with' contexts, have a closure,
4524 // and it is the same for all contexts inside a function.
4525 // There is no need to go to the function context first.)
4526 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4527 // Load the function context (which is the incoming, outer context).
4528 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4531 // We may have a 'with' context now. Get the function context.
4532 // (In fact this mov may never be the needed, since the scope analysis
4533 // may not permit a direct context access in this case and thus we are
4534 // always at a function context. However it is safe to dereference be-
4535 // cause the function context of a function context is itself. Before
4536 // deleting this mov we should try to create a counter-example first,
4538 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4539 return ContextOperand(tmp, index);
4544 return Operand(rsp, 0);
4549 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4552 ASSERT(slot->type() == Slot::CONTEXT);
4553 ASSERT(tmp.is_register());
4554 Register context = rsi;
4556 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4557 if (s->num_heap_slots() > 0) {
4558 if (s->calls_eval()) {
4559 // Check that extension is NULL.
4560 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4562 slow->Branch(not_equal, not_taken);
4564 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4565 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4566 context = tmp.reg();
4569 // Check that last extension is NULL.
4570 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4571 slow->Branch(not_equal, not_taken);
4572 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4573 return ContextOperand(tmp.reg(), slot->index());
4577 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4578 if (slot->type() == Slot::LOOKUP) {
4579 ASSERT(slot->var()->is_dynamic());
4585 // Generate fast-case code for variables that might be shadowed by
4586 // eval-introduced variables. Eval is used a lot without
4587 // introducing variables. In those cases, we do not want to
4588 // perform a runtime call for all variables in the scope
4589 // containing the eval.
4590 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4591 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4592 // If there was no control flow to slow, we can exit early.
4593 if (!slow.is_linked()) {
4594 frame_->Push(&value);
4600 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4601 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4602 // Only generate the fast case for locals that rewrite to slots.
4603 // This rules out argument loads.
4604 if (potential_slot != NULL) {
4605 // Allocate a fresh register to use as a temp in
4606 // ContextSlotOperandCheckExtensions and to hold the result
4608 value = allocator_->Allocate();
4609 ASSERT(value.is_valid());
4610 __ movq(value.reg(),
4611 ContextSlotOperandCheckExtensions(potential_slot,
4614 if (potential_slot->var()->mode() == Variable::CONST) {
4615 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4616 done.Branch(not_equal, &value);
4617 __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4619 // There is always control flow to slow from
4620 // ContextSlotOperandCheckExtensions so we have to jump around
4627 // A runtime call is inevitable. We eagerly sync frame elements
4628 // to memory so that we can push the arguments directly into place
4629 // on top of the frame.
4630 frame_->SyncRange(0, frame_->element_count() - 1);
4631 frame_->EmitPush(rsi);
4632 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4633 frame_->EmitPush(kScratchRegister);
4634 if (typeof_state == INSIDE_TYPEOF) {
4636 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4638 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4642 frame_->Push(&value);
4644 } else if (slot->var()->mode() == Variable::CONST) {
4645 // Const slots may contain 'the hole' value (the constant hasn't been
4646 // initialized yet) which needs to be converted into the 'undefined'
4649 // We currently spill the virtual frame because constants use the
4650 // potentially unsafe direct-frame access of SlotOperand.
4651 VirtualFrame::SpilledScope spilled_scope;
4652 Comment cmnt(masm_, "[ Load const");
4654 __ movq(rcx, SlotOperand(slot, rcx));
4655 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4656 exit.Branch(not_equal);
4657 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4659 frame_->EmitPush(rcx);
4661 } else if (slot->type() == Slot::PARAMETER) {
4662 frame_->PushParameterAt(slot->index());
4664 } else if (slot->type() == Slot::LOCAL) {
4665 frame_->PushLocalAt(slot->index());
4668 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4671 // The use of SlotOperand below is safe for an unspilled frame
4672 // because it will always be a context slot.
4673 ASSERT(slot->type() == Slot::CONTEXT);
4674 Result temp = allocator_->Allocate();
4675 ASSERT(temp.is_valid());
4676 __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4677 frame_->Push(&temp);
4682 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4683 TypeofState state) {
4684 LoadFromSlot(slot, state);
4686 // Bail out quickly if we're not using lazy arguments allocation.
4687 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4689 // ... or if the slot isn't a non-parameter arguments slot.
4690 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4692 // Pop the loaded value from the stack.
4693 Result value = frame_->Pop();
4695 // If the loaded value is a constant, we know if the arguments
4696 // object has been lazily loaded yet.
4697 if (value.is_constant()) {
4698 if (value.handle()->IsTheHole()) {
4699 Result arguments = StoreArgumentsObject(false);
4700 frame_->Push(&arguments);
4702 frame_->Push(&value);
4707 // The loaded value is in a register. If it is the sentinel that
4708 // indicates that we haven't loaded the arguments object yet, we
4709 // need to do it now.
4711 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4712 frame_->Push(&value);
4713 exit.Branch(not_equal);
4714 Result arguments = StoreArgumentsObject(false);
4715 frame_->SetElementAt(0, &arguments);
4720 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4721 if (slot->type() == Slot::LOOKUP) {
4722 ASSERT(slot->var()->is_dynamic());
4724 // For now, just do a runtime call. Since the call is inevitable,
4725 // we eagerly sync the virtual frame so we can directly push the
4726 // arguments into place.
4727 frame_->SyncRange(0, frame_->element_count() - 1);
4729 frame_->EmitPush(rsi);
4730 frame_->EmitPush(slot->var()->name());
4733 if (init_state == CONST_INIT) {
4734 // Same as the case for a normal store, but ignores attribute
4735 // (e.g. READ_ONLY) of context slot so that we can initialize const
4736 // properties (introduced via eval("const foo = (some expr);")). Also,
4737 // uses the current function context instead of the top context.
4739 // Note that we must declare the foo upon entry of eval(), via a
4740 // context slot declaration, but we cannot initialize it at the same
4741 // time, because the const declaration may be at the end of the eval
4742 // code (sigh...) and the const variable may have been used before
4743 // (where its value is 'undefined'). Thus, we can only do the
4744 // initialization when we actually encounter the expression and when
4745 // the expression operands are defined and valid, and thus we need the
4746 // split into 2 operations: declaration of the context slot followed
4747 // by initialization.
4748 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4750 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4752 // Storing a variable must keep the (new) value on the expression
4753 // stack. This is necessary for compiling chained assignment
4755 frame_->Push(&value);
4757 ASSERT(!slot->var()->is_dynamic());
4760 if (init_state == CONST_INIT) {
4761 ASSERT(slot->var()->mode() == Variable::CONST);
4762 // Only the first const initialization must be executed (the slot
4763 // still contains 'the hole' value). When the assignment is executed,
4764 // the code is identical to a normal store (see below).
4766 // We spill the frame in the code below because the direct-frame
4767 // access of SlotOperand is potentially unsafe with an unspilled
4769 VirtualFrame::SpilledScope spilled_scope;
4770 Comment cmnt(masm_, "[ Init const");
4771 __ movq(rcx, SlotOperand(slot, rcx));
4772 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4773 exit.Branch(not_equal);
4776 // We must execute the store. Storing a variable must keep the (new)
4777 // value on the stack. This is necessary for compiling assignment
4780 // Note: We will reach here even with slot->var()->mode() ==
4781 // Variable::CONST because of const declarations which will initialize
4782 // consts to 'the hole' value and by doing so, end up calling this code.
4783 if (slot->type() == Slot::PARAMETER) {
4784 frame_->StoreToParameterAt(slot->index());
4785 } else if (slot->type() == Slot::LOCAL) {
4786 frame_->StoreToLocalAt(slot->index());
4788 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4790 // The use of SlotOperand below is safe for an unspilled frame
4791 // because the slot is a context slot.
4792 ASSERT(slot->type() == Slot::CONTEXT);
4794 Result value = frame_->Pop();
4796 Result start = allocator_->Allocate();
4797 ASSERT(start.is_valid());
4798 __ movq(SlotOperand(slot, start.reg()), value.reg());
4799 // RecordWrite may destroy the value registers.
4801 // TODO(204): Avoid actually spilling when the value is not
4802 // needed (probably the common case).
4803 frame_->Spill(value.reg());
4804 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4805 Result temp = allocator_->Allocate();
4806 ASSERT(temp.is_valid());
4807 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4808 // The results start, value, and temp are unused by going out of
4817 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4819 TypeofState typeof_state,
4821 // Check that no extension objects have been created by calls to
4822 // eval from the current scope to the global scope.
4823 Register context = rsi;
4824 Result tmp = allocator_->Allocate();
4825 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
4829 if (s->num_heap_slots() > 0) {
4830 if (s->calls_eval()) {
4831 // Check that extension is NULL.
4832 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4834 slow->Branch(not_equal, not_taken);
4836 // Load next context in chain.
4837 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4838 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4839 context = tmp.reg();
4841 // If no outer scope calls eval, we do not need to check more
4842 // context extensions. If we have reached an eval scope, we check
4843 // all extensions from this point.
4844 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4845 s = s->outer_scope();
4848 if (s->is_eval_scope()) {
4849 // Loop up the context chain. There is no frame effect so it is
4850 // safe to use raw labels here.
4852 if (!context.is(tmp.reg())) {
4853 __ movq(tmp.reg(), context);
4855 // Load map for comparison into register, outside loop.
4856 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4858 // Terminate at global context.
4859 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4861 // Check that extension is NULL.
4862 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4863 slow->Branch(not_equal);
4864 // Load next context in chain.
4865 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4866 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4872 // All extension objects were empty and it is safe to use a global
4875 frame_->Push(slot->var()->name());
4876 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4877 ? RelocInfo::CODE_TARGET
4878 : RelocInfo::CODE_TARGET_CONTEXT;
4879 Result answer = frame_->CallLoadIC(mode);
4880 // A test rax instruction following the call signals that the inobject
4881 // property case was inlined. Ensure that there is not a test rax
4882 // instruction here.
4884 // Discard the global object. The result is in answer.
4890 void CodeGenerator::LoadGlobal() {
4891 if (in_spilled_code()) {
4892 frame_->EmitPush(GlobalObject());
4894 Result temp = allocator_->Allocate();
4895 __ movq(temp.reg(), GlobalObject());
4896 frame_->Push(&temp);
4901 void CodeGenerator::LoadGlobalReceiver() {
4902 Result temp = allocator_->Allocate();
4903 Register reg = temp.reg();
4904 __ movq(reg, GlobalObject());
4905 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
4906 frame_->Push(&temp);
4910 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
4911 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
4912 ASSERT(scope()->arguments_shadow() != NULL);
4913 // We don't want to do lazy arguments allocation for functions that
4914 // have heap-allocated contexts, because it interfers with the
4915 // uninitialized const tracking in the context objects.
4916 return (scope()->num_heap_slots() > 0)
4917 ? EAGER_ARGUMENTS_ALLOCATION
4918 : LAZY_ARGUMENTS_ALLOCATION;
4922 Result CodeGenerator::StoreArgumentsObject(bool initial) {
4923 ArgumentsAllocationMode mode = ArgumentsMode();
4924 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
4926 Comment cmnt(masm_, "[ store arguments object");
4927 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
4928 // When using lazy arguments allocation, we store the hole value
4929 // as a sentinel indicating that the arguments object hasn't been
4931 frame_->Push(Factory::the_hole_value());
4933 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
4934 frame_->PushFunction();
4935 frame_->PushReceiverSlotAddress();
4936 frame_->Push(Smi::FromInt(scope()->num_parameters()));
4937 Result result = frame_->CallStub(&stub, 3);
4938 frame_->Push(&result);
4942 Variable* arguments = scope()->arguments()->var();
4943 Variable* shadow = scope()->arguments_shadow()->var();
4944 ASSERT(arguments != NULL && arguments->slot() != NULL);
4945 ASSERT(shadow != NULL && shadow->slot() != NULL);
4947 bool skip_arguments = false;
4948 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
4949 // We have to skip storing into the arguments slot if it has
4950 // already been written to. This can happen if the a function
4951 // has a local variable named 'arguments'.
4952 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
4953 Result probe = frame_->Pop();
4954 if (probe.is_constant()) {
4955 // We have to skip updating the arguments object if it has been
4956 // assigned a proper value.
4957 skip_arguments = !probe.handle()->IsTheHole();
4959 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
4961 done.Branch(not_equal);
4964 if (!skip_arguments) {
4965 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
4966 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
4968 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
4969 return frame_->Pop();
4973 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
4974 // Special handling of identifiers as subexpressions of typeof.
4975 Variable* variable = expr->AsVariableProxy()->AsVariable();
4976 if (variable != NULL && !variable->is_this() && variable->is_global()) {
4977 // For a global variable we build the property reference
4978 // <global>.<variable> and perform a (regular non-contextual) property
4979 // load to make sure we do not get reference errors.
4980 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
4981 Literal key(variable->name());
4982 Property property(&global, &key, RelocInfo::kNoPosition);
4983 Reference ref(this, &property);
4985 } else if (variable != NULL && variable->slot() != NULL) {
4986 // For a variable that rewrites to a slot, we signal it is the immediate
4987 // subexpression of a typeof.
4988 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
4990 // Anything else can be handled normally.
4996 void CodeGenerator::Comparison(AstNode* node,
4999 ControlDestination* dest) {
5000 // Strict only makes sense for equality comparisons.
5001 ASSERT(!strict || cc == equal);
5005 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
5006 if (cc == greater || cc == less_equal) {
5007 cc = ReverseCondition(cc);
5008 left_side = frame_->Pop();
5009 right_side = frame_->Pop();
5011 right_side = frame_->Pop();
5012 left_side = frame_->Pop();
5014 ASSERT(cc == less || cc == equal || cc == greater_equal);
5016 // If either side is a constant smi, optimize the comparison.
5017 bool left_side_constant_smi =
5018 left_side.is_constant() && left_side.handle()->IsSmi();
5019 bool right_side_constant_smi =
5020 right_side.is_constant() && right_side.handle()->IsSmi();
5021 bool left_side_constant_null =
5022 left_side.is_constant() && left_side.handle()->IsNull();
5023 bool right_side_constant_null =
5024 right_side.is_constant() && right_side.handle()->IsNull();
5026 if (left_side_constant_smi || right_side_constant_smi) {
5027 if (left_side_constant_smi && right_side_constant_smi) {
5028 // Trivial case, comparing two constants.
5029 int left_value = Smi::cast(*left_side.handle())->value();
5030 int right_value = Smi::cast(*right_side.handle())->value();
5033 dest->Goto(left_value < right_value);
5036 dest->Goto(left_value == right_value);
5039 dest->Goto(left_value >= right_value);
5045 // Only one side is a constant Smi.
5046 // If left side is a constant Smi, reverse the operands.
5047 // Since one side is a constant Smi, conversion order does not matter.
5048 if (left_side_constant_smi) {
5049 Result temp = left_side;
5050 left_side = right_side;
5052 cc = ReverseCondition(cc);
5053 // This may reintroduce greater or less_equal as the value of cc.
5054 // CompareStub and the inline code both support all values of cc.
5056 // Implement comparison against a constant Smi, inlining the case
5057 // where both sides are Smis.
5058 left_side.ToRegister();
5059 Register left_reg = left_side.reg();
5060 Handle<Object> right_val = right_side.handle();
5062 // Here we split control flow to the stub call and inlined cases
5063 // before finally splitting it to the control destination. We use
5064 // a jump target and branching to duplicate the virtual frame at
5065 // the first split. We manually handle the off-frame references
5066 // by reconstituting them on the non-fall-through path.
5069 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
5070 is_smi.Branch(left_is_smi);
5072 bool is_loop_condition = (node->AsExpression() != NULL) &&
5073 node->AsExpression()->is_loop_condition();
5074 if (!is_loop_condition && right_val->IsSmi()) {
5075 // Right side is a constant smi and left side has been checked
5077 JumpTarget not_number;
5078 __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
5079 Factory::heap_number_map());
5080 not_number.Branch(not_equal, &left_side);
5082 FieldOperand(left_reg, HeapNumber::kValueOffset));
5083 int value = Smi::cast(*right_val)->value();
5085 __ xorpd(xmm0, xmm0);
5087 Result temp = allocator()->Allocate();
5088 __ movl(temp.reg(), Immediate(value));
5089 __ cvtlsi2sd(xmm0, temp.reg());
5092 __ ucomisd(xmm1, xmm0);
5093 // Jump to builtin for NaN.
5094 not_number.Branch(parity_even, &left_side);
5096 Condition double_cc = cc;
5098 case less: double_cc = below; break;
5099 case equal: double_cc = equal; break;
5100 case less_equal: double_cc = below_equal; break;
5101 case greater: double_cc = above; break;
5102 case greater_equal: double_cc = above_equal; break;
5103 default: UNREACHABLE();
5105 dest->true_target()->Branch(double_cc);
5106 dest->false_target()->Jump();
5107 not_number.Bind(&left_side);
5110 // Setup and call the compare stub.
5111 CompareStub stub(cc, strict);
5112 Result result = frame_->CallStub(&stub, &left_side, &right_side);
5113 result.ToRegister();
5114 __ testq(result.reg(), result.reg());
5116 dest->true_target()->Branch(cc);
5117 dest->false_target()->Jump();
5120 left_side = Result(left_reg);
5121 right_side = Result(right_val);
5122 // Test smi equality and comparison by signed int comparison.
5123 // Both sides are smis, so we can use an Immediate.
5124 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
5129 } else if (cc == equal &&
5130 (left_side_constant_null || right_side_constant_null)) {
5131 // To make null checks efficient, we check if either the left side or
5132 // the right side is the constant 'null'.
5133 // If so, we optimize the code by inlining a null check instead of
5134 // calling the (very) general runtime routine for checking equality.
5135 Result operand = left_side_constant_null ? right_side : left_side;
5138 operand.ToRegister();
5139 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
5144 // The 'null' value is only equal to 'undefined' if using non-strict
5146 dest->true_target()->Branch(equal);
5147 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
5148 dest->true_target()->Branch(equal);
5149 Condition is_smi = masm_->CheckSmi(operand.reg());
5150 dest->false_target()->Branch(is_smi);
5152 // It can be an undetectable object.
5153 // Use a scratch register in preference to spilling operand.reg().
5154 Result temp = allocator()->Allocate();
5155 ASSERT(temp.is_valid());
5157 FieldOperand(operand.reg(), HeapObject::kMapOffset));
5158 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
5159 Immediate(1 << Map::kIsUndetectable));
5162 dest->Split(not_zero);
5164 } else { // Neither side is a constant Smi or null.
5165 // If either side is a non-smi constant, skip the smi check.
5166 bool known_non_smi =
5167 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
5168 (right_side.is_constant() && !right_side.handle()->IsSmi());
5169 left_side.ToRegister();
5170 right_side.ToRegister();
5172 if (known_non_smi) {
5173 // When non-smi, call out to the compare stub.
5174 CompareStub stub(cc, strict);
5175 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5176 // The result is a Smi, which is negative, zero, or positive.
5177 __ SmiTest(answer.reg()); // Sets both zero and sign flag.
5181 // Here we split control flow to the stub call and inlined cases
5182 // before finally splitting it to the control destination. We use
5183 // a jump target and branching to duplicate the virtual frame at
5184 // the first split. We manually handle the off-frame references
5185 // by reconstituting them on the non-fall-through path.
5187 Register left_reg = left_side.reg();
5188 Register right_reg = right_side.reg();
5190 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5191 is_smi.Branch(both_smi);
5192 // When non-smi, call out to the compare stub.
5193 CompareStub stub(cc, strict);
5194 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5195 __ SmiTest(answer.reg()); // Sets both zero and sign flags.
5197 dest->true_target()->Branch(cc);
5198 dest->false_target()->Jump();
5201 left_side = Result(left_reg);
5202 right_side = Result(right_reg);
5203 __ SmiCompare(left_side.reg(), right_side.reg());
5212 class DeferredInlineBinaryOperation: public DeferredCode {
5214 DeferredInlineBinaryOperation(Token::Value op,
5219 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5220 set_comment("[ DeferredInlineBinaryOperation");
5223 virtual void Generate();
5230 OverwriteMode mode_;
5234 void DeferredInlineBinaryOperation::Generate() {
5235 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
5236 stub.GenerateCall(masm_, left_, right_);
5237 if (!dst_.is(rax)) __ movq(dst_, rax);
5241 void CodeGenerator::GenericBinaryOperation(Token::Value op,
5243 OverwriteMode overwrite_mode) {
5244 Comment cmnt(masm_, "[ BinaryOperation");
5245 Comment cmnt_token(masm_, Token::String(op));
5247 if (op == Token::COMMA) {
5248 // Simply discard left value.
5253 Result right = frame_->Pop();
5254 Result left = frame_->Pop();
5256 if (op == Token::ADD) {
5257 bool left_is_string = left.is_constant() && left.handle()->IsString();
5258 bool right_is_string = right.is_constant() && right.handle()->IsString();
5259 if (left_is_string || right_is_string) {
5260 frame_->Push(&left);
5261 frame_->Push(&right);
5263 if (left_is_string) {
5264 if (right_is_string) {
5265 // TODO(lrn): if both are constant strings
5266 // -- do a compile time cons, if allocation during codegen is allowed.
5267 answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
5270 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5272 } else if (right_is_string) {
5274 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5276 frame_->Push(&answer);
5279 // Neither operand is known to be a string.
5282 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
5283 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
5284 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
5285 bool right_is_non_smi_constant =
5286 right.is_constant() && !right.handle()->IsSmi();
5288 if (left_is_smi_constant && right_is_smi_constant) {
5289 // Compute the constant result at compile time, and leave it on the frame.
5290 int left_int = Smi::cast(*left.handle())->value();
5291 int right_int = Smi::cast(*right.handle())->value();
5292 if (FoldConstantSmis(op, left_int, right_int)) return;
5295 // Get number type of left and right sub-expressions.
5296 TypeInfo operands_type =
5297 TypeInfo::Combine(left.type_info(), right.type_info());
5300 if (left_is_non_smi_constant || right_is_non_smi_constant) {
5301 GenericBinaryOpStub stub(op,
5303 NO_SMI_CODE_IN_STUB,
5305 answer = stub.GenerateCall(masm_, frame_, &left, &right);
5306 } else if (right_is_smi_constant) {
5307 answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
5308 type, false, overwrite_mode);
5309 } else if (left_is_smi_constant) {
5310 answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
5311 type, true, overwrite_mode);
5313 // Set the flags based on the operation, type and loop nesting level.
5314 // Bit operations always assume they likely operate on Smis. Still only
5315 // generate the inline Smi check code if this operation is part of a loop.
5316 // For all other operations only inline the Smi check code for likely smis
5317 // if the operation is part of a loop.
5318 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
5319 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5321 GenericBinaryOpStub stub(op,
5323 NO_GENERIC_BINARY_FLAGS,
5325 answer = stub.GenerateCall(masm_, frame_, &left, &right);
5329 // Set TypeInfo of result according to the operation performed.
5330 // We rely on the fact that smis have a 32 bit payload on x64.
5331 ASSERT(kSmiValueSize == 32);
5332 TypeInfo result_type = TypeInfo::Unknown();
5335 result_type = right.type_info();
5339 // Result type can be either of the two input types.
5340 result_type = operands_type;
5343 case Token::BIT_XOR:
5344 case Token::BIT_AND:
5345 // Result is always a smi.
5346 result_type = TypeInfo::Smi();
5350 // Result is always a smi.
5351 result_type = TypeInfo::Smi();
5354 // Result of x >>> y is always a smi if y >= 1, otherwise a number.
5355 result_type = (right.is_constant() && right.handle()->IsSmi()
5356 && Smi::cast(*right.handle())->value() >= 1)
5358 : TypeInfo::Number();
5361 // Result could be a string or a number. Check types of inputs.
5362 result_type = operands_type.IsNumber()
5363 ? TypeInfo::Number()
5364 : TypeInfo::Unknown();
5370 // Result is always a number.
5371 result_type = TypeInfo::Number();
5376 answer.set_type_info(result_type);
5377 frame_->Push(&answer);
5381 // Emit a LoadIC call to get the value from receiver and leave it in
5382 // dst. The receiver register is restored after the call.
5383 class DeferredReferenceGetNamedValue: public DeferredCode {
5385 DeferredReferenceGetNamedValue(Register dst,
5387 Handle<String> name)
5388 : dst_(dst), receiver_(receiver), name_(name) {
5389 set_comment("[ DeferredReferenceGetNamedValue");
5392 virtual void Generate();
5394 Label* patch_site() { return &patch_site_; }
5400 Handle<String> name_;
5404 void DeferredReferenceGetNamedValue::Generate() {
5406 __ Move(rcx, name_);
5407 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5408 __ Call(ic, RelocInfo::CODE_TARGET);
5409 // The call must be followed by a test rax instruction to indicate
5410 // that the inobject property case was inlined.
5412 // Store the delta to the map check instruction here in the test
5413 // instruction. Use masm_-> instead of the __ macro since the
5414 // latter can't return a value.
5415 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5416 // Here we use masm_-> instead of the __ macro because this is the
5417 // instruction that gets patched and coverage code gets in the way.
5418 masm_->testl(rax, Immediate(-delta_to_patch_site));
5419 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5421 if (!dst_.is(rax)) __ movq(dst_, rax);
5426 void DeferredInlineSmiAdd::Generate() {
5427 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5428 igostub.GenerateCall(masm_, dst_, value_);
5429 if (!dst_.is(rax)) __ movq(dst_, rax);
5433 void DeferredInlineSmiAddReversed::Generate() {
5434 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5435 igostub.GenerateCall(masm_, value_, dst_);
5436 if (!dst_.is(rax)) __ movq(dst_, rax);
5440 void DeferredInlineSmiSub::Generate() {
5441 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5442 igostub.GenerateCall(masm_, dst_, value_);
5443 if (!dst_.is(rax)) __ movq(dst_, rax);
5447 void DeferredInlineSmiOperation::Generate() {
5448 // For mod we don't generate all the Smi code inline.
5449 GenericBinaryOpStub stub(
5452 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
5453 stub.GenerateCall(masm_, src_, value_);
5454 if (!dst_.is(rax)) __ movq(dst_, rax);
5458 Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5460 Handle<Object> value,
5463 OverwriteMode overwrite_mode) {
5464 // NOTE: This is an attempt to inline (a bit) more of the code for
5465 // some possible smi operations (like + and -) when (at least) one
5466 // of the operands is a constant smi.
5467 // Consumes the argument "operand".
5469 // TODO(199): Optimize some special cases of operations involving a
5470 // smi literal (multiply by 2, shift by 0, etc.).
5471 if (IsUnsafeSmi(value)) {
5472 Result unsafe_operand(value);
5474 return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
5477 return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
5482 // Get the literal value.
5483 Smi* smi_value = Smi::cast(*value);
5484 int int_value = smi_value->value();
5489 operand->ToRegister();
5490 frame_->Spill(operand->reg());
5491 DeferredCode* deferred = NULL;
5493 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5497 deferred = new DeferredInlineSmiAdd(operand->reg(),
5501 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5502 __ SmiAddConstant(operand->reg(),
5505 deferred->entry_label());
5506 deferred->BindExit();
5513 Result constant_operand(value);
5514 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5517 operand->ToRegister();
5518 frame_->Spill(operand->reg());
5519 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5522 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5523 // A smi currently fits in a 32-bit Immediate.
5524 __ SmiSubConstant(operand->reg(),
5527 deferred->entry_label());
5528 deferred->BindExit();
5536 Result constant_operand(value);
5537 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5540 // Only the least significant 5 bits of the shift value are used.
5541 // In the slow case, this masking is done inside the runtime call.
5542 int shift_value = int_value & 0x1f;
5543 operand->ToRegister();
5544 frame_->Spill(operand->reg());
5545 DeferredInlineSmiOperation* deferred =
5546 new DeferredInlineSmiOperation(op,
5551 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5552 __ SmiShiftArithmeticRightConstant(operand->reg(),
5555 deferred->BindExit();
5562 Result constant_operand(value);
5563 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5566 // Only the least significant 5 bits of the shift value are used.
5567 // In the slow case, this masking is done inside the runtime call.
5568 int shift_value = int_value & 0x1f;
5569 operand->ToRegister();
5570 answer = allocator()->Allocate();
5571 ASSERT(answer.is_valid());
5572 DeferredInlineSmiOperation* deferred =
5573 new DeferredInlineSmiOperation(op,
5578 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5579 __ SmiShiftLogicalRightConstant(answer.reg(),
5582 deferred->entry_label());
5583 deferred->BindExit();
5590 Result constant_operand(value);
5591 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5594 // Only the least significant 5 bits of the shift value are used.
5595 // In the slow case, this masking is done inside the runtime call.
5596 int shift_value = int_value & 0x1f;
5597 operand->ToRegister();
5598 if (shift_value == 0) {
5599 // Spill operand so it can be overwritten in the slow case.
5600 frame_->Spill(operand->reg());
5601 DeferredInlineSmiOperation* deferred =
5602 new DeferredInlineSmiOperation(op,
5607 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5608 deferred->BindExit();
5611 // Use a fresh temporary for nonzero shift values.
5612 answer = allocator()->Allocate();
5613 ASSERT(answer.is_valid());
5614 DeferredInlineSmiOperation* deferred =
5615 new DeferredInlineSmiOperation(op,
5620 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5621 __ SmiShiftLeftConstant(answer.reg(),
5624 deferred->entry_label());
5625 deferred->BindExit();
5632 case Token::BIT_XOR:
5633 case Token::BIT_AND: {
5634 operand->ToRegister();
5635 frame_->Spill(operand->reg());
5637 // Bit operations with a constant smi are commutative.
5638 // We can swap left and right operands with no problem.
5639 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
5640 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5642 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5647 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5648 if (op == Token::BIT_AND) {
5649 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
5650 } else if (op == Token::BIT_XOR) {
5651 if (int_value != 0) {
5652 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
5655 ASSERT(op == Token::BIT_OR);
5656 if (int_value != 0) {
5657 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
5660 deferred->BindExit();
5665 // Generate inline code for mod of powers of 2 and negative powers of 2.
5669 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5670 operand->ToRegister();
5671 frame_->Spill(operand->reg());
5672 DeferredCode* deferred =
5673 new DeferredInlineSmiOperation(op,
5678 // Check for negative or non-Smi left hand side.
5679 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5680 if (int_value < 0) int_value = -int_value;
5681 if (int_value == 1) {
5682 __ Move(operand->reg(), Smi::FromInt(0));
5684 __ SmiAndConstant(operand->reg(),
5686 Smi::FromInt(int_value - 1));
5688 deferred->BindExit();
5690 break; // This break only applies if we generated code for MOD.
5692 // Fall through if we did not find a power of 2 on the right hand side!
5693 // The next case must be the default.
5696 Result constant_operand(value);
5698 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5701 answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
5707 ASSERT(answer.is_valid());
5711 Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
5714 OverwriteMode overwrite_mode) {
5716 // Special handling of div and mod because they use fixed registers.
5717 if (op == Token::DIV || op == Token::MOD) {
5718 // We need rax as the quotient register, rdx as the remainder
5719 // register, neither left nor right in rax or rdx, and left copied
5723 bool left_is_in_rax = false;
5724 // Step 1: get rax for quotient.
5725 if ((left->is_register() && left->reg().is(rax)) ||
5726 (right->is_register() && right->reg().is(rax))) {
5727 // One or both is in rax. Use a fresh non-rdx register for
5729 Result fresh = allocator_->Allocate();
5730 ASSERT(fresh.is_valid());
5731 if (fresh.reg().is(rdx)) {
5733 fresh = allocator_->Allocate();
5734 ASSERT(fresh.is_valid());
5736 if (left->is_register() && left->reg().is(rax)) {
5739 left_is_in_rax = true;
5741 if (right->is_register() && right->reg().is(rax)) {
5745 __ movq(fresh.reg(), rax);
5747 // Neither left nor right is in rax.
5748 quotient = allocator_->Allocate(rax);
5750 ASSERT(quotient.is_register() && quotient.reg().is(rax));
5751 ASSERT(!(left->is_register() && left->reg().is(rax)));
5752 ASSERT(!(right->is_register() && right->reg().is(rax)));
5754 // Step 2: get rdx for remainder if necessary.
5755 if (!remainder.is_valid()) {
5756 if ((left->is_register() && left->reg().is(rdx)) ||
5757 (right->is_register() && right->reg().is(rdx))) {
5758 Result fresh = allocator_->Allocate();
5759 ASSERT(fresh.is_valid());
5760 if (left->is_register() && left->reg().is(rdx)) {
5764 if (right->is_register() && right->reg().is(rdx)) {
5768 __ movq(fresh.reg(), rdx);
5770 // Neither left nor right is in rdx.
5771 remainder = allocator_->Allocate(rdx);
5774 ASSERT(remainder.is_register() && remainder.reg().is(rdx));
5775 ASSERT(!(left->is_register() && left->reg().is(rdx)));
5776 ASSERT(!(right->is_register() && right->reg().is(rdx)));
5779 right->ToRegister();
5783 // Check that left and right are smi tagged.
5784 DeferredInlineBinaryOperation* deferred =
5785 new DeferredInlineBinaryOperation(op,
5786 (op == Token::DIV) ? rax : rdx,
5790 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5792 if (op == Token::DIV) {
5793 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5794 deferred->BindExit();
5799 ASSERT(op == Token::MOD);
5800 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5801 deferred->BindExit();
5806 ASSERT(answer.is_valid());
5810 // Special handling of shift operations because they use fixed
5812 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
5813 // Move left out of rcx if necessary.
5814 if (left->is_register() && left->reg().is(rcx)) {
5815 *left = allocator_->Allocate();
5816 ASSERT(left->is_valid());
5817 __ movq(left->reg(), rcx);
5819 right->ToRegister(rcx);
5821 ASSERT(left->is_register() && !left->reg().is(rcx));
5822 ASSERT(right->is_register() && right->reg().is(rcx));
5824 // We will modify right, it must be spilled.
5827 // Use a fresh answer register to avoid spilling the left operand.
5828 answer = allocator_->Allocate();
5829 ASSERT(answer.is_valid());
5830 // Check that both operands are smis using the answer register as a
5832 DeferredInlineBinaryOperation* deferred =
5833 new DeferredInlineBinaryOperation(op,
5838 __ movq(answer.reg(), left->reg());
5839 __ or_(answer.reg(), rcx);
5840 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5842 // Perform the operation.
5845 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5848 __ SmiShiftLogicalRight(answer.reg(),
5851 deferred->entry_label());
5855 __ SmiShiftLeft(answer.reg(),
5858 deferred->entry_label());
5864 deferred->BindExit();
5867 ASSERT(answer.is_valid());
5871 // Handle the other binary operations.
5873 right->ToRegister();
5874 // A newly allocated register answer is used to hold the answer. The
5875 // registers containing left and right are not modified so they don't
5876 // need to be spilled in the fast case.
5877 answer = allocator_->Allocate();
5878 ASSERT(answer.is_valid());
5880 // Perform the smi tag check.
5881 DeferredInlineBinaryOperation* deferred =
5882 new DeferredInlineBinaryOperation(op,
5887 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5891 __ SmiAdd(answer.reg(),
5894 deferred->entry_label());
5898 __ SmiSub(answer.reg(),
5901 deferred->entry_label());
5905 __ SmiMul(answer.reg(),
5908 deferred->entry_label());
5913 __ SmiOr(answer.reg(), left->reg(), right->reg());
5916 case Token::BIT_AND:
5917 __ SmiAnd(answer.reg(), left->reg(), right->reg());
5920 case Token::BIT_XOR:
5921 __ SmiXor(answer.reg(), left->reg(), right->reg());
5928 deferred->BindExit();
5931 ASSERT(answer.is_valid());
5936 Result CodeGenerator::EmitKeyedLoad(bool is_global) {
5937 Comment cmnt(masm_, "[ Load from keyed Property");
5938 // Inline array load code if inside of a loop. We do not know
5939 // the receiver map yet, so we initially generate the code with
5940 // a check against an invalid map. In the inline cache code, we
5941 // patch the map check if appropriate.
5942 if (loop_nesting() > 0) {
5943 Comment cmnt(masm_, "[ Inlined load from keyed Property");
5945 Result key = frame_->Pop();
5946 Result receiver = frame_->Pop();
5948 receiver.ToRegister();
5950 // Use a fresh temporary to load the elements without destroying
5951 // the receiver which is needed for the deferred slow case.
5952 Result elements = allocator()->Allocate();
5953 ASSERT(elements.is_valid());
5955 // Use a fresh temporary for the index and later the loaded
5957 Result index = allocator()->Allocate();
5958 ASSERT(index.is_valid());
5960 DeferredReferenceGetKeyedValue* deferred =
5961 new DeferredReferenceGetKeyedValue(index.reg(),
5966 // Check that the receiver is not a smi (only needed if this
5967 // is not a load from the global context) and that it has the
5970 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5973 // Initially, use an invalid map. The map is patched in the IC
5974 // initialization code.
5975 __ bind(deferred->patch_site());
5976 // Use masm-> here instead of the double underscore macro since extra
5977 // coverage code can interfere with the patching. Do not use
5978 // root array to load null_value, since it must be patched with
5979 // the expected receiver map.
5980 masm_->movq(kScratchRegister, Factory::null_value(),
5981 RelocInfo::EMBEDDED_OBJECT);
5982 masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5984 deferred->Branch(not_equal);
5986 // Check that the key is a non-negative smi.
5987 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
5989 // Get the elements array from the receiver and check that it
5990 // is not a dictionary.
5991 __ movq(elements.reg(),
5992 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5993 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5994 Factory::fixed_array_map());
5995 deferred->Branch(not_equal);
5997 // Shift the key to get the actual index value and check that
5998 // it is within bounds.
5999 __ SmiToInteger32(index.reg(), key.reg());
6000 __ cmpl(index.reg(),
6001 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
6002 deferred->Branch(above_equal);
6004 // The index register holds the un-smi-tagged key. It has been
6005 // zero-extended to 64-bits, so it can be used directly as index in the
6007 // Load and check that the result is not the hole. We could
6008 // reuse the index or elements register for the value.
6010 // TODO(206): Consider whether it makes sense to try some
6011 // heuristic about which register to reuse. For example, if
6012 // one is rax, the we can reuse that one because the value
6013 // coming from the deferred code will be in rax.
6014 Result value = index;
6015 __ movq(value.reg(),
6016 Operand(elements.reg(),
6019 FixedArray::kHeaderSize - kHeapObjectTag));
6022 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
6023 deferred->Branch(equal);
6024 __ IncrementCounter(&Counters::keyed_load_inline, 1);
6026 deferred->BindExit();
6027 // Restore the receiver and key to the frame and push the
6028 // result on top of it.
6029 frame_->Push(&receiver);
6034 Comment cmnt(masm_, "[ Load from keyed Property");
6035 RelocInfo::Mode mode = is_global
6036 ? RelocInfo::CODE_TARGET_CONTEXT
6037 : RelocInfo::CODE_TARGET;
6038 Result answer = frame_->CallKeyedLoadIC(mode);
6039 // Make sure that we do not have a test instruction after the
6040 // call. A test instruction after the call is used to
6041 // indicate that we have generated an inline version of the
6042 // keyed load. The explicit nop instruction is here because
6043 // the push that follows might be peep-hole optimized away.
6051 #define __ ACCESS_MASM(masm)
6054 Handle<String> Reference::GetName() {
6055 ASSERT(type_ == NAMED);
6056 Property* property = expression_->AsProperty();
6057 if (property == NULL) {
6058 // Global variable reference treated as a named property reference.
6059 VariableProxy* proxy = expression_->AsVariableProxy();
6060 ASSERT(proxy->AsVariable() != NULL);
6061 ASSERT(proxy->AsVariable()->is_global());
6062 return proxy->name();
6064 Literal* raw_name = property->key()->AsLiteral();
6065 ASSERT(raw_name != NULL);
6066 return Handle<String>(String::cast(*raw_name->handle()));
6071 void Reference::GetValue() {
6072 ASSERT(!cgen_->in_spilled_code());
6073 ASSERT(cgen_->HasValidEntryRegisters());
6074 ASSERT(!is_illegal());
6075 MacroAssembler* masm = cgen_->masm();
6077 // Record the source position for the property load.
6078 Property* property = expression_->AsProperty();
6079 if (property != NULL) {
6080 cgen_->CodeForSourcePosition(property->position());
6085 Comment cmnt(masm, "[ Load from Slot");
6086 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6087 ASSERT(slot != NULL);
6088 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6093 Variable* var = expression_->AsVariableProxy()->AsVariable();
6094 bool is_global = var != NULL;
6095 ASSERT(!is_global || var->is_global());
6097 // Do not inline the inobject property case for loads from the global
6098 // object. Also do not inline for unoptimized code. This saves time
6099 // in the code generator. Unoptimized code is toplevel code or code
6100 // that is not in a loop.
6102 cgen_->scope()->is_global_scope() ||
6103 cgen_->loop_nesting() == 0) {
6104 Comment cmnt(masm, "[ Load from named Property");
6105 cgen_->frame()->Push(GetName());
6107 RelocInfo::Mode mode = is_global
6108 ? RelocInfo::CODE_TARGET_CONTEXT
6109 : RelocInfo::CODE_TARGET;
6110 Result answer = cgen_->frame()->CallLoadIC(mode);
6111 // A test rax instruction following the call signals that the
6112 // inobject property case was inlined. Ensure that there is not
6113 // a test rax instruction here.
6115 cgen_->frame()->Push(&answer);
6117 // Inline the inobject property case.
6118 Comment cmnt(masm, "[ Inlined named property load");
6119 Result receiver = cgen_->frame()->Pop();
6120 receiver.ToRegister();
6121 Result value = cgen_->allocator()->Allocate();
6122 ASSERT(value.is_valid());
6123 // Cannot use r12 for receiver, because that changes
6124 // the distance between a call and a fixup location,
6125 // due to a special encoding of r12 as r/m in a ModR/M byte.
6126 if (receiver.reg().is(r12)) {
6127 // Swap receiver and value.
6128 __ movq(value.reg(), receiver.reg());
6129 Result temp = receiver;
6132 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
6135 DeferredReferenceGetNamedValue* deferred =
6136 new DeferredReferenceGetNamedValue(value.reg(),
6140 // Check that the receiver is a heap object.
6141 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6143 __ bind(deferred->patch_site());
6144 // This is the map check instruction that will be patched (so we can't
6145 // use the double underscore macro that may insert instructions).
6146 // Initially use an invalid map to force a failure.
6147 masm->Move(kScratchRegister, Factory::null_value());
6148 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6150 // This branch is always a forwards branch so it's always a fixed
6151 // size which allows the assert below to succeed and patching to work.
6152 // Don't use deferred->Branch(...), since that might add coverage code.
6153 masm->j(not_equal, deferred->entry_label());
6155 // The delta from the patch label to the load offset must be
6156 // statically known.
6157 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
6158 LoadIC::kOffsetToLoadInstruction);
6159 // The initial (invalid) offset has to be large enough to force
6160 // a 32-bit instruction encoding to allow patching with an
6161 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
6162 int offset = kMaxInt;
6163 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
6165 __ IncrementCounter(&Counters::named_load_inline, 1);
6166 deferred->BindExit();
6167 cgen_->frame()->Push(&receiver);
6168 cgen_->frame()->Push(&value);
6174 Comment cmnt(masm, "[ Load from keyed Property");
6175 Variable* var = expression_->AsVariableProxy()->AsVariable();
6176 bool is_global = var != NULL;
6177 ASSERT(!is_global || var->is_global());
6179 Result value = cgen_->EmitKeyedLoad(is_global);
6180 cgen_->frame()->Push(&value);
6188 if (!persist_after_get_) {
6189 cgen_->UnloadReference(this);
6194 void Reference::TakeValue() {
6195 // TODO(X64): This function is completely architecture independent. Move
6196 // it somewhere shared.
6198 // For non-constant frame-allocated slots, we invalidate the value in the
6199 // slot. For all others, we fall back on GetValue.
6200 ASSERT(!cgen_->in_spilled_code());
6201 ASSERT(!is_illegal());
6202 if (type_ != SLOT) {
6207 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6208 ASSERT(slot != NULL);
6209 if (slot->type() == Slot::LOOKUP ||
6210 slot->type() == Slot::CONTEXT ||
6211 slot->var()->mode() == Variable::CONST ||
6212 slot->is_arguments()) {
6217 // Only non-constant, frame-allocated parameters and locals can reach
6218 // here. Be careful not to use the optimizations for arguments
6219 // object access since it may not have been initialized yet.
6220 ASSERT(!slot->is_arguments());
6221 if (slot->type() == Slot::PARAMETER) {
6222 cgen_->frame()->TakeParameterAt(slot->index());
6224 ASSERT(slot->type() == Slot::LOCAL);
6225 cgen_->frame()->TakeLocalAt(slot->index());
6228 ASSERT(persist_after_get_);
6229 // Do not unload the reference, because it is used in SetValue.
6233 void Reference::SetValue(InitState init_state) {
6234 ASSERT(cgen_->HasValidEntryRegisters());
6235 ASSERT(!is_illegal());
6236 MacroAssembler* masm = cgen_->masm();
6239 Comment cmnt(masm, "[ Store to Slot");
6240 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6241 ASSERT(slot != NULL);
6242 cgen_->StoreToSlot(slot, init_state);
6243 cgen_->UnloadReference(this);
6248 Comment cmnt(masm, "[ Store to named Property");
6249 cgen_->frame()->Push(GetName());
6250 Result answer = cgen_->frame()->CallStoreIC();
6251 cgen_->frame()->Push(&answer);
6257 Comment cmnt(masm, "[ Store to keyed Property");
6259 // Generate inlined version of the keyed store if the code is in
6260 // a loop and the key is likely to be a smi.
6261 Property* property = expression()->AsProperty();
6262 ASSERT(property != NULL);
6263 StaticType* key_smi_analysis = property->key()->type();
6265 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6266 Comment cmnt(masm, "[ Inlined store to keyed Property");
6268 // Get the receiver, key and value into registers.
6269 Result value = cgen_->frame()->Pop();
6270 Result key = cgen_->frame()->Pop();
6271 Result receiver = cgen_->frame()->Pop();
6273 Result tmp = cgen_->allocator_->Allocate();
6274 ASSERT(tmp.is_valid());
6276 // Determine whether the value is a constant before putting it
6278 bool value_is_constant = value.is_constant();
6280 // Make sure that value, key and receiver are in registers.
6283 receiver.ToRegister();
6285 DeferredReferenceSetKeyedValue* deferred =
6286 new DeferredReferenceSetKeyedValue(value.reg(),
6290 // Check that the value is a smi if it is not a constant.
6291 // We can skip the write barrier for smis and constants.
6292 if (!value_is_constant) {
6293 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6296 // Check that the key is a non-negative smi.
6297 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6299 // Check that the receiver is not a smi.
6300 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6302 // Check that the receiver is a JSArray.
6303 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6304 deferred->Branch(not_equal);
6306 // Check that the key is within bounds. Both the key and the
6307 // length of the JSArray are smis.
6308 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
6310 deferred->Branch(less_equal);
6312 // Get the elements array from the receiver and check that it
6313 // is a flat array (not a dictionary).
6315 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6316 // Bind the deferred code patch site to be able to locate the
6317 // fixed array map comparison. When debugging, we patch this
6318 // comparison to always fail so that we will hit the IC call
6319 // in the deferred code which will allow the debugger to
6320 // break for fast case stores.
6321 __ bind(deferred->patch_site());
6322 // Avoid using __ to ensure the distance from patch_site
6323 // to the map address is always the same.
6324 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6325 RelocInfo::EMBEDDED_OBJECT);
6326 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6328 deferred->Branch(not_equal);
6332 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6333 __ movq(Operand(tmp.reg(),
6336 FixedArray::kHeaderSize - kHeapObjectTag),
6338 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6340 deferred->BindExit();
6342 cgen_->frame()->Push(&receiver);
6343 cgen_->frame()->Push(&key);
6344 cgen_->frame()->Push(&value);
6346 Result answer = cgen_->frame()->CallKeyedStoreIC();
6347 // Make sure that we do not have a test instruction after the
6348 // call. A test instruction after the call is used to
6349 // indicate that we have generated an inline version of the
6352 cgen_->frame()->Push(&answer);
6354 cgen_->UnloadReference(this);
6364 void FastNewClosureStub::Generate(MacroAssembler* masm) {
6365 // Create a new closure from the given function info in new
6366 // space. Set the context to the current context in rsi.
6368 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
6370 // Get the function info from the stack.
6371 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6373 // Compute the function map in the current global context and set that
6374 // as the map of the allocated object.
6375 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6376 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
6377 __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6378 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
6380 // Initialize the rest of the function. We don't have to update the
6381 // write barrier because the allocated object is in new space.
6382 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
6383 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
6384 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
6385 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
6386 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
6387 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
6388 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
6389 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
6391 // Return and remove the on-stack parameter.
6392 __ ret(1 * kPointerSize);
6394 // Create a new closure through the slower runtime call.
6396 __ pop(rcx); // Temporarily remove return address.
6400 __ push(rcx); // Restore return address.
6401 __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
6405 void FastNewContextStub::Generate(MacroAssembler* masm) {
6406 // Try to allocate the context in new space.
6408 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6409 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
6410 rax, rbx, rcx, &gc, TAG_OBJECT);
6412 // Get the function from the stack.
6413 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
6415 // Setup the object header.
6416 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
6417 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
6418 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
6420 // Setup the fixed slots.
6421 __ xor_(rbx, rbx); // Set to NULL.
6422 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
6423 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
6424 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
6425 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
6427 // Copy the global object from the surrounding context.
6428 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6429 __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
6431 // Initialize the rest of the slots to undefined.
6432 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
6433 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6434 __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
6437 // Return and remove the on-stack parameter.
6439 __ ret(1 * kPointerSize);
6441 // Need to collect. Call into runtime system.
6443 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
6447 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6448 // Stack layout on entry:
6450 // [rsp + kPointerSize]: constant elements.
6451 // [rsp + (2 * kPointerSize)]: literal index.
6452 // [rsp + (3 * kPointerSize)]: literals array.
6454 // All sizes here are multiples of kPointerSize.
6455 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6456 int size = JSArray::kSize + elements_size;
6458 // Load boilerplate object into rcx and check if we need to create a
6461 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
6462 __ movq(rax, Operand(rsp, 2 * kPointerSize));
6463 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
6465 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
6466 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
6467 __ j(equal, &slow_case);
6469 // Allocate both the JS array and the elements array in one big
6470 // allocation. This avoids multiple limit checks.
6471 __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
6473 // Copy the JS array part.
6474 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6475 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6476 __ movq(rbx, FieldOperand(rcx, i));
6477 __ movq(FieldOperand(rax, i), rbx);
6482 // Get hold of the elements array of the boilerplate and setup the
6483 // elements pointer in the resulting object.
6484 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
6485 __ lea(rdx, Operand(rax, JSArray::kSize));
6486 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
6488 // Copy the elements array.
6489 for (int i = 0; i < elements_size; i += kPointerSize) {
6490 __ movq(rbx, FieldOperand(rcx, i));
6491 __ movq(FieldOperand(rdx, i), rbx);
6495 // Return and remove the on-stack parameters.
6496 __ ret(3 * kPointerSize);
6498 __ bind(&slow_case);
6499 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
6503 void ToBooleanStub::Generate(MacroAssembler* masm) {
6504 Label false_result, true_result, not_string;
6505 __ movq(rax, Operand(rsp, 1 * kPointerSize));
6508 __ CompareRoot(rax, Heap::kNullValueRootIndex);
6509 __ j(equal, &false_result);
6511 // Get the map and type of the heap object.
6512 // We don't use CmpObjectType because we manipulate the type field.
6513 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6514 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
6516 // Undetectable => false.
6517 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
6518 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
6519 __ j(not_zero, &false_result);
6521 // JavaScript object => true.
6522 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
6523 __ j(above_equal, &true_result);
6525 // String value => false iff empty.
6526 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
6527 __ j(above_equal, ¬_string);
6528 __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
6530 __ j(zero, &false_result);
6531 __ jmp(&true_result);
6533 __ bind(¬_string);
6534 // HeapNumber => false iff +0, -0, or NaN.
6535 // These three cases set C3 when compared to zero in the FPU.
6536 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6537 __ j(not_equal, &true_result);
6538 __ fldz(); // Load zero onto fp stack
6539 // Load heap-number double value onto fp stack
6540 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
6542 __ j(zero, &false_result);
6543 // Fall through to |true_result|.
6545 // Return 1/0 for true/false in rax.
6546 __ bind(&true_result);
6547 __ movq(rax, Immediate(1));
6548 __ ret(1 * kPointerSize);
6549 __ bind(&false_result);
6551 __ ret(1 * kPointerSize);
6555 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
6556 Object* answer_object = Heap::undefined_value();
6559 // Use intptr_t to detect overflow of 32-bit int.
6560 if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
6561 answer_object = Smi::FromInt(left + right);
6565 // Use intptr_t to detect overflow of 32-bit int.
6566 if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
6567 answer_object = Smi::FromInt(left - right);
6571 double answer = static_cast<double>(left) * right;
6572 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
6573 // If the product is zero and the non-zero factor is negative,
6574 // the spec requires us to return floating point negative zero.
6575 if (answer != 0 || (left + right) >= 0) {
6576 answer_object = Smi::FromInt(static_cast<int>(answer));
6585 answer_object = Smi::FromInt(left | right);
6587 case Token::BIT_AND:
6588 answer_object = Smi::FromInt(left & right);
6590 case Token::BIT_XOR:
6591 answer_object = Smi::FromInt(left ^ right);
6595 int shift_amount = right & 0x1F;
6596 if (Smi::IsValid(left << shift_amount)) {
6597 answer_object = Smi::FromInt(left << shift_amount);
6602 int shift_amount = right & 0x1F;
6603 unsigned int unsigned_left = left;
6604 unsigned_left >>= shift_amount;
6605 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
6606 answer_object = Smi::FromInt(unsigned_left);
6611 int shift_amount = right & 0x1F;
6612 unsigned int unsigned_left = left;
6614 // Perform arithmetic shift of a negative number by
6615 // complementing number, logical shifting, complementing again.
6616 unsigned_left = ~unsigned_left;
6617 unsigned_left >>= shift_amount;
6618 unsigned_left = ~unsigned_left;
6620 unsigned_left >>= shift_amount;
6622 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
6623 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
6630 if (answer_object == Heap::undefined_value()) {
6633 frame_->Push(Handle<Object>(answer_object));
6638 // End of CodeGenerator implementation.
6640 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
6641 // is faster than using the built-in instructions on floating point registers.
6642 // Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
6643 // trashed registers.
6644 void IntegerConvert(MacroAssembler* masm,
6647 Label* conversion_failure) {
6648 ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
6649 Label done, right_exponent, normal_exponent;
6650 Register scratch = rbx;
6651 Register scratch2 = rdi;
6652 // Get exponent word.
6653 __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
6654 // Get exponent alone in scratch2.
6655 __ movl(scratch2, scratch);
6656 __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
6658 CpuFeatures::Scope scope(SSE3);
6659 // Check whether the exponent is too big for a 64 bit signed integer.
6660 static const uint32_t kTooBigExponent =
6661 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
6662 __ cmpl(scratch2, Immediate(kTooBigExponent));
6663 __ j(greater_equal, conversion_failure);
6664 // Load x87 register with heap number.
6665 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
6666 // Reserve space for 64 bit answer.
6667 __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
6668 // Do conversion, which cannot fail because we checked the exponent.
6669 __ fisttp_d(Operand(rsp, 0));
6670 __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
6671 __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
6673 // Load rcx with zero. We use this either for the final shift or
6676 // Check whether the exponent matches a 32 bit signed int that cannot be
6677 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
6678 // exponent is 30 (biased). This is the exponent that we are fastest at and
6679 // also the highest exponent we can handle here.
6680 const uint32_t non_smi_exponent =
6681 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6682 __ cmpl(scratch2, Immediate(non_smi_exponent));
6683 // If we have a match of the int32-but-not-Smi exponent then skip some
6685 __ j(equal, &right_exponent);
6686 // If the exponent is higher than that then go to slow case. This catches
6687 // numbers that don't fit in a signed int32, infinities and NaNs.
6688 __ j(less, &normal_exponent);
6691 // Handle a big exponent. The only reason we have this code is that the
6692 // >>> operator has a tendency to generate numbers with an exponent of 31.
6693 const uint32_t big_non_smi_exponent =
6694 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
6695 __ cmpl(scratch2, Immediate(big_non_smi_exponent));
6696 __ j(not_equal, conversion_failure);
6697 // We have the big exponent, typically from >>>. This means the number is
6698 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
6699 __ movl(scratch2, scratch);
6700 __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
6701 // Put back the implicit 1.
6702 __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
6703 // Shift up the mantissa bits to take up the space the exponent used to
6704 // take. We just orred in the implicit bit so that took care of one and
6705 // we want to use the full unsigned range so we subtract 1 bit from the
6707 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
6708 __ shl(scratch2, Immediate(big_shift_distance));
6709 // Get the second half of the double.
6710 __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
6711 // Shift down 21 bits to get the most significant 11 bits or the low
6713 __ shr(rcx, Immediate(32 - big_shift_distance));
6714 __ or_(rcx, scratch2);
6715 // We have the answer in rcx, but we may need to negate it.
6716 __ testl(scratch, scratch);
6717 __ j(positive, &done);
6722 __ bind(&normal_exponent);
6723 // Exponent word in scratch, exponent part of exponent word in scratch2.
6725 // We know the exponent is smaller than 30 (biased). If it is less than
6726 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
6727 // it rounds to zero.
6728 const uint32_t zero_exponent =
6729 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
6730 __ subl(scratch2, Immediate(zero_exponent));
6731 // rcx already has a Smi zero.
6734 // We have a shifted exponent between 0 and 30 in scratch2.
6735 __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
6736 __ movl(rcx, Immediate(30));
6737 __ subl(rcx, scratch2);
6739 __ bind(&right_exponent);
6740 // Here rcx is the shift, scratch is the exponent word.
6741 // Get the top bits of the mantissa.
6742 __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
6743 // Put back the implicit 1.
6744 __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
6745 // Shift up the mantissa bits to take up the space the exponent used to
6746 // take. We have kExponentShift + 1 significant bits int he low end of the
6747 // word. Shift them to the top bits.
6748 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6749 __ shl(scratch, Immediate(shift_distance));
6750 // Get the second half of the double. For some exponents we don't
6751 // actually need this because the bits get shifted out again, but
6752 // it's probably slower to test than just to do it.
6753 __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
6754 // Shift down 22 bits to get the most significant 10 bits or the low
6756 __ shr(scratch2, Immediate(32 - shift_distance));
6757 __ or_(scratch2, scratch);
6758 // Move down according to the exponent.
6759 __ shr_cl(scratch2);
6760 // Now the unsigned answer is in scratch2. We need to move it to rcx and
6761 // we may need to fix the sign.
6764 __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
6765 __ j(greater, &negative);
6766 __ movl(rcx, scratch2);
6769 __ subl(rcx, scratch2);
6775 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
6778 if (op_ == Token::SUB) {
6779 // Check whether the value is a smi.
6781 __ JumpIfNotSmi(rax, &try_float);
6783 // Enter runtime system if the value of the smi is zero
6784 // to make sure that we switch between 0 and -0.
6785 // Also enter it if the value of the smi is Smi::kMinValue.
6786 __ SmiNeg(rax, rax, &done);
6788 // Either zero or Smi::kMinValue, neither of which become a smi when
6790 __ SmiCompare(rax, Smi::FromInt(0));
6791 __ j(not_equal, &slow);
6792 __ Move(rax, Factory::minus_zero_value());
6795 // Try floating point case.
6796 __ bind(&try_float);
6797 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6798 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6799 __ j(not_equal, &slow);
6800 // Operand is a float, negate its value by flipping sign bit.
6801 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
6802 __ movq(kScratchRegister, Immediate(0x01));
6803 __ shl(kScratchRegister, Immediate(63));
6804 __ xor_(rdx, kScratchRegister); // Flip sign.
6805 // rdx is value to store.
6807 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
6809 __ AllocateHeapNumber(rcx, rbx, &slow);
6810 // rcx: allocated 'empty' number
6811 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
6814 } else if (op_ == Token::BIT_NOT) {
6815 // Check if the operand is a heap number.
6816 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6817 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6818 __ j(not_equal, &slow);
6820 // Convert the heap number in rax to an untagged integer in rcx.
6821 IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
6823 // Do the bitwise operation and check if the result fits in a smi.
6826 // Tag the result as a smi and we're done.
6827 ASSERT(kSmiTagSize == 1);
6828 __ Integer32ToSmi(rax, rcx);
6831 // Return from the stub.
6835 // Handle the slow case by jumping to the JavaScript builtin.
6837 __ pop(rcx); // pop return address
6839 __ push(rcx); // push return address
6842 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
6844 case Token::BIT_NOT:
6845 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
6853 void RegExpExecStub::Generate(MacroAssembler* masm) {
6854 // Just jump directly to runtime if native RegExp is not selected at compile
6855 // time or if regexp entry in generated code is turned off runtime switch or
6857 #ifndef V8_NATIVE_REGEXP
6858 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
6859 #else // V8_NATIVE_REGEXP
6860 if (!FLAG_regexp_entry_native) {
6861 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
6865 // Stack frame on entry.
6866 // esp[0]: return address
6867 // esp[8]: last_match_info (expected JSArray)
6868 // esp[16]: previous index
6869 // esp[24]: subject string
6870 // esp[32]: JSRegExp object
6872 static const int kLastMatchInfoOffset = 1 * kPointerSize;
6873 static const int kPreviousIndexOffset = 2 * kPointerSize;
6874 static const int kSubjectOffset = 3 * kPointerSize;
6875 static const int kJSRegExpOffset = 4 * kPointerSize;
6879 // Ensure that a RegExp stack is allocated.
6880 ExternalReference address_of_regexp_stack_memory_address =
6881 ExternalReference::address_of_regexp_stack_memory_address();
6882 ExternalReference address_of_regexp_stack_memory_size =
6883 ExternalReference::address_of_regexp_stack_memory_size();
6884 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
6885 __ movq(kScratchRegister, Operand(kScratchRegister, 0));
6886 __ testq(kScratchRegister, kScratchRegister);
6887 __ j(zero, &runtime);
6890 // Check that the first argument is a JSRegExp object.
6891 __ movq(rax, Operand(rsp, kJSRegExpOffset));
6892 __ JumpIfSmi(rax, &runtime);
6893 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
6894 __ j(not_equal, &runtime);
6895 // Check that the RegExp has been compiled (data contains a fixed array).
6896 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
6897 if (FLAG_debug_code) {
6898 Condition is_smi = masm->CheckSmi(rcx);
6899 __ Check(NegateCondition(is_smi),
6900 "Unexpected type for RegExp data, FixedArray expected");
6901 __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
6902 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
6905 // rcx: RegExp data (FixedArray)
6906 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
6907 __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
6908 __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
6909 __ j(not_equal, &runtime);
6911 // rcx: RegExp data (FixedArray)
6912 // Check that the number of captures fit in the static offsets vector buffer.
6913 __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
6914 // Calculate number of capture registers (number_of_captures + 1) * 2.
6915 __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
6916 __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
6917 // Check that the static offsets vector buffer is large enough.
6918 __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
6919 __ j(above, &runtime);
6921 // rcx: RegExp data (FixedArray)
6922 // rdx: Number of capture registers
6923 // Check that the second argument is a string.
6924 __ movq(rax, Operand(rsp, kSubjectOffset));
6925 __ JumpIfSmi(rax, &runtime);
6926 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
6927 __ j(NegateCondition(is_string), &runtime);
6928 // Get the length of the string to rbx.
6929 __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
6931 // rbx: Length of subject string
6932 // rcx: RegExp data (FixedArray)
6933 // rdx: Number of capture registers
6934 // Check that the third argument is a positive smi less than the string
6935 // length. A negative value will be greater (usigned comparison).
6936 __ movq(rax, Operand(rsp, kPreviousIndexOffset));
6937 __ SmiToInteger32(rax, rax);
6939 __ j(above, &runtime);
6941 // rcx: RegExp data (FixedArray)
6942 // rdx: Number of capture registers
6943 // Check that the fourth object is a JSArray object.
6944 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
6945 __ JumpIfSmi(rax, &runtime);
6946 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
6947 __ j(not_equal, &runtime);
6948 // Check that the JSArray is in fast case.
6949 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
6950 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
6951 __ Cmp(rax, Factory::fixed_array_map());
6952 __ j(not_equal, &runtime);
6953 // Check that the last match info has space for the capture registers and the
6954 // additional information. Ensure no overflow in add.
6955 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
6956 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
6957 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
6959 __ j(greater, &runtime);
6961 // ecx: RegExp data (FixedArray)
6962 // Check the representation and encoding of the subject string.
6963 Label seq_string, seq_two_byte_string, check_code;
6964 const int kStringRepresentationEncodingMask =
6965 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
6966 __ movq(rax, Operand(rsp, kSubjectOffset));
6967 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
6968 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
6969 __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
6970 // First check for sequential string.
6971 ASSERT_EQ(0, kStringTag);
6972 ASSERT_EQ(0, kSeqStringTag);
6973 __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
6974 __ j(zero, &seq_string);
6976 // Check for flat cons string.
6977 // A flat cons string is a cons string where the second part is the empty
6978 // string. In that case the subject string is just the first part of the cons
6979 // string. Also in this case the first part of the cons string is known to be
6980 // a sequential string or an external string.
6982 __ andb(rdx, Immediate(kStringRepresentationMask));
6983 __ cmpb(rdx, Immediate(kConsStringTag));
6984 __ j(not_equal, &runtime);
6985 __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
6986 __ Cmp(rdx, Factory::empty_string());
6987 __ j(not_equal, &runtime);
6988 __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
6989 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
6990 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
6991 ASSERT_EQ(0, kSeqStringTag);
6992 __ testb(rbx, Immediate(kStringRepresentationMask));
6993 __ j(not_zero, &runtime);
6994 __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
6996 __ bind(&seq_string);
6997 // rax: subject string (sequential either ascii to two byte)
6998 // rbx: suject string type & kStringRepresentationEncodingMask
6999 // rcx: RegExp data (FixedArray)
7000 // Check that the irregexp code has been generated for an ascii string. If
7001 // it has, the field contains a code object otherwise it contains the hole.
7002 __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
7003 __ j(equal, &seq_two_byte_string);
7004 if (FLAG_debug_code) {
7005 __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
7006 __ Check(equal, "Expected sequential ascii string");
7008 __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
7009 __ Set(rdi, 1); // Type is ascii.
7010 __ jmp(&check_code);
7012 __ bind(&seq_two_byte_string);
7013 // rax: subject string
7014 // rcx: RegExp data (FixedArray)
7015 __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
7016 __ Set(rdi, 0); // Type is two byte.
7018 __ bind(&check_code);
7019 // Check that the irregexp code has been generated for the actual string
7020 // encoding. If it has, the field contains a code object otherwise it contains
7022 __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
7023 __ j(not_equal, &runtime);
7025 // rax: subject string
7026 // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
7028 // Load used arguments before starting to push arguments for call to native
7029 // RegExp code to avoid handling changing stack height.
7030 __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
7031 __ SmiToInteger64(rbx, rbx); // Previous index from smi.
7033 // rax: subject string
7034 // rbx: previous index
7035 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
7037 // All checks done. Now push arguments for native regexp code.
7038 __ IncrementCounter(&Counters::regexp_entry_native, 1);
7040 // rsi is caller save on Windows and used to pass parameter on Linux.
7043 static const int kRegExpExecuteArguments = 7;
7044 __ PrepareCallCFunction(kRegExpExecuteArguments);
7045 int argument_slots_on_stack =
7046 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
7048 // Argument 7: Indicate that this is a direct call from JavaScript.
7049 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
7052 // Argument 6: Start (high end) of backtracking stack memory area.
7053 __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
7054 __ movq(r9, Operand(kScratchRegister, 0));
7055 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
7056 __ addq(r9, Operand(kScratchRegister, 0));
7057 // Argument 6 passed in r9 on Linux and on the stack on Windows.
7059 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
7062 // Argument 5: static offsets vector buffer.
7063 __ movq(r8, ExternalReference::address_of_static_offsets_vector());
7064 // Argument 5 passed in r8 on Linux and on the stack on Windows.
7066 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
7069 // First four arguments are passed in registers on both Linux and Windows.
7073 Register arg2 = rdx;
7074 Register arg1 = rcx;
7076 Register arg4 = rcx;
7077 Register arg3 = rdx;
7078 Register arg2 = rsi;
7079 Register arg1 = rdi;
7082 // Keep track on aliasing between argX defined above and the registers used.
7083 // rax: subject string
7084 // rbx: previous index
7085 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
7088 // Argument 4: End of string data
7089 // Argument 3: Start of string data
7090 Label setup_two_byte, setup_rest;
7092 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
7093 __ j(zero, &setup_two_byte);
7094 __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
7095 __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
7096 __ jmp(&setup_rest);
7097 __ bind(&setup_two_byte);
7098 __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
7099 __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
7101 __ bind(&setup_rest);
7102 // Argument 2: Previous index.
7105 // Argument 1: Subject string.
7108 // Locate the code entry and call it.
7109 __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
7110 __ CallCFunction(r12, kRegExpExecuteArguments);
7112 // rsi is caller save, as it is used to pass parameter.
7115 // Check the result.
7117 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
7118 __ j(equal, &success);
7120 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
7121 __ j(equal, &failure);
7122 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
7123 // If not exception it can only be retry. Handle that in the runtime system.
7124 __ j(not_equal, &runtime);
7125 // Result must now be exception. If there is no pending exception already a
7126 // stack overflow (on the backtrack stack) was detected in RegExp code but
7127 // haven't created the exception yet. Handle that in the runtime system.
7128 // TODO(592) Rerunning the RegExp to get the stack overflow exception.
7129 ExternalReference pending_exception_address(Top::k_pending_exception_address);
7130 __ movq(kScratchRegister, pending_exception_address);
7131 __ Cmp(kScratchRegister, Factory::the_hole_value());
7132 __ j(equal, &runtime);
7134 // For failure and exception return null.
7135 __ Move(rax, Factory::null_value());
7136 __ ret(4 * kPointerSize);
7138 // Load RegExp data.
7140 __ movq(rax, Operand(rsp, kJSRegExpOffset));
7141 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
7142 __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
7143 // Calculate number of capture registers (number_of_captures + 1) * 2.
7144 __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
7145 __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
7147 // rdx: Number of capture registers
7148 // Load last_match_info which is still known to be a fast case JSArray.
7149 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
7150 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
7152 // rbx: last_match_info backing store (FixedArray)
7153 // rdx: number of capture registers
7154 // Store the capture count.
7155 __ Integer32ToSmi(kScratchRegister, rdx);
7156 __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
7158 // Store last subject and last input.
7159 __ movq(rax, Operand(rsp, kSubjectOffset));
7160 __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
7162 __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
7163 __ movq(rax, Operand(rsp, kSubjectOffset));
7164 __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
7166 __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
7168 // Get the static offsets vector filled by the native regexp code.
7169 __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
7171 // rbx: last_match_info backing store (FixedArray)
7172 // rcx: offsets vector
7173 // rdx: number of capture registers
7174 Label next_capture, done;
7175 __ movq(rax, Operand(rsp, kPreviousIndexOffset));
7176 // Capture register counter starts from number of capture registers and
7177 // counts down until wraping after zero.
7178 __ bind(&next_capture);
7179 __ subq(rdx, Immediate(1));
7180 __ j(negative, &done);
7181 // Read the value from the static offsets vector buffer and make it a smi.
7182 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
7183 __ Integer32ToSmi(rdi, rdi, &runtime);
7184 // Store the smi value in the last match info.
7185 __ movq(FieldOperand(rbx,
7188 RegExpImpl::kFirstCaptureOffset),
7190 __ jmp(&next_capture);
7193 // Return last match info.
7194 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
7195 __ ret(4 * kPointerSize);
7197 // Do the runtime call to execute the regexp.
7199 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
7200 #endif // V8_NATIVE_REGEXP
7204 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
7211 // Currently only lookup for smis. Check for smi if object is not known to be
7213 if (!object_is_smi) {
7214 __ JumpIfNotSmi(object, not_found);
7217 // Use of registers. Register result is used as a temporary.
7218 Register number_string_cache = result;
7219 Register mask = scratch1;
7220 Register scratch = scratch2;
7222 // Load the number string cache.
7223 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
7225 // Make the hash mask from the length of the number string cache. It
7226 // contains two elements (number and string) for each cache entry.
7227 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
7228 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
7229 __ subl(mask, Immediate(1)); // Make mask.
7231 // Calculate the entry in the number string cache. The hash value in the
7232 // number string cache for smis is just the smi value.
7233 __ movq(scratch, object);
7234 __ SmiToInteger32(scratch, scratch);
7235 __ andl(scratch, mask);
7237 // Each entry in string cache consists of two pointer sized fields,
7238 // but times_twice_pointer_size (multiplication by 16) scale factor
7239 // is not supported by addrmode on x64 platform.
7240 // So we have to premultiply entry index before lookup
7241 __ shl(scratch, Immediate(kPointerSizeLog2 + 1));
7242 // Check if the entry is the smi we are looking for.
7244 FieldOperand(number_string_cache,
7247 FixedArray::kHeaderSize));
7248 __ j(not_equal, not_found);
7250 // Get the result from the cache.
7252 FieldOperand(number_string_cache,
7255 FixedArray::kHeaderSize + kPointerSize));
7256 __ IncrementCounter(&Counters::number_to_string_native, 1);
7260 void NumberToStringStub::Generate(MacroAssembler* masm) {
7263 __ movq(rbx, Operand(rsp, kPointerSize));
7265 // Generate code to lookup number in the number string cache.
7266 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
7267 __ ret(1 * kPointerSize);
7270 // Handle number to string in the runtime system if not found in the cache.
7271 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
7275 void CompareStub::Generate(MacroAssembler* masm) {
7276 Label call_builtin, done;
7278 // NOTICE! This code is only reached after a smi-fast-case check, so
7279 // it is certain that at least one operand isn't a smi.
7281 if (cc_ == equal) { // Both strict and non-strict.
7282 Label slow; // Fallthrough label.
7283 // Equality is almost reflexive (everything but NaN), so start by testing
7284 // for "identity and not NaN".
7286 Label not_identical;
7288 __ j(not_equal, ¬_identical);
7289 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7290 // so we do the second best thing - test it ourselves.
7292 if (never_nan_nan_) {
7298 // If it's not a heap number, then return equal.
7299 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
7300 Factory::heap_number_map());
7301 __ j(equal, &heap_number);
7302 __ bind(&return_equal);
7306 __ bind(&heap_number);
7307 // It is a heap number, so return non-equal if it's NaN and equal if
7309 // The representation of NaN values has all exponent bits (52..62) set,
7310 // and not all mantissa bits (0..51) clear.
7311 // We only allow QNaNs, which have bit 51 set (which also rules out
7312 // the value being Infinity).
7314 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
7315 // all bits in the mask are set. We only need to check the word
7316 // that contains the exponent and high bit of the mantissa.
7317 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
7318 __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
7320 __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
7321 __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
7322 __ setcc(above_equal, rax);
7326 __ bind(¬_identical);
7329 // If we're doing a strict equality comparison, we don't have to do
7330 // type conversion, so we generate code to do fast comparison for objects
7331 // and oddballs. Non-smi numbers and strings still go through the usual
7334 // If either is a Smi (we know that not both are), then they can only
7335 // be equal if the other is a HeapNumber. If so, use the slow case.
7338 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
7340 // Check if the non-smi operand is a heap number.
7341 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
7342 Factory::heap_number_map());
7343 // If heap number, handle it in the slow case.
7345 // Return non-equal. ebx (the lower half of rbx) is not zero.
7352 // If either operand is a JSObject or an oddball value, then they are not
7353 // equal since their pointers are different
7354 // There is no test for undetectability in strict equality.
7356 // If the first object is a JS object, we have done pointer comparison.
7357 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7358 Label first_non_object;
7359 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
7360 __ j(below, &first_non_object);
7361 // Return non-zero (eax (not rax) is not zero)
7362 Label return_not_equal;
7363 ASSERT(kHeapObjectTag != 0);
7364 __ bind(&return_not_equal);
7367 __ bind(&first_non_object);
7368 // Check for oddballs: true, false, null, undefined.
7369 __ CmpInstanceType(rcx, ODDBALL_TYPE);
7370 __ j(equal, &return_not_equal);
7372 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
7373 __ j(above_equal, &return_not_equal);
7375 // Check for oddballs: true, false, null, undefined.
7376 __ CmpInstanceType(rcx, ODDBALL_TYPE);
7377 __ j(equal, &return_not_equal);
7379 // Fall through to the general case.
7384 // Push arguments below the return address to prepare jump to builtin.
7390 // Inlined floating point compare.
7391 // Call builtin if operands are not floating point or smi.
7392 Label check_for_symbols;
7393 // Push arguments on stack, for helper functions.
7394 FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
7395 FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
7398 // Jump to builtin for NaN.
7399 __ j(parity_even, &call_builtin);
7401 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
7402 Label below_lbl, above_lbl;
7403 // use rdx, rax to convert unsigned to signed comparison
7404 __ j(below, &below_lbl);
7405 __ j(above, &above_lbl);
7407 __ xor_(rax, rax); // equal
7408 __ ret(2 * kPointerSize);
7410 __ bind(&below_lbl);
7411 __ movq(rax, Immediate(-1));
7412 __ ret(2 * kPointerSize);
7414 __ bind(&above_lbl);
7415 __ movq(rax, Immediate(1));
7416 __ ret(2 * kPointerSize); // rax, rdx were pushed
7418 // Fast negative check for symbol-to-symbol equality.
7419 __ bind(&check_for_symbols);
7420 Label check_for_strings;
7422 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
7423 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
7425 // We've already checked for object identity, so if both operands
7426 // are symbols they aren't equal. Register eax (not rax) already holds a
7427 // non-zero value, which indicates not equal, so just return.
7428 __ ret(2 * kPointerSize);
7431 __ bind(&check_for_strings);
7433 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
7435 // Inline comparison of ascii strings.
7436 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
7445 __ Abort("Unexpected fall-through from string comparison");
7448 __ bind(&call_builtin);
7449 // must swap argument order
7456 // Figure out which native to call and setup the arguments.
7457 Builtins::JavaScript builtin;
7459 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7461 builtin = Builtins::COMPARE;
7462 int ncr; // NaN compare result
7463 if (cc_ == less || cc_ == less_equal) {
7466 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
7469 __ Push(Smi::FromInt(ncr));
7472 // Restore return address on the stack.
7475 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7476 // tagged as a small integer.
7477 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
7481 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
7485 __ JumpIfSmi(object, label);
7486 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
7488 FieldOperand(scratch, Map::kInstanceTypeOffset));
7489 // Ensure that no non-strings have the symbol bit set.
7490 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
7491 ASSERT(kSymbolTag != 0);
7492 __ testb(scratch, Immediate(kIsSymbolMask));
7497 // Call the function just below TOS on the stack with the given
7498 // arguments. The receiver is the TOS.
7499 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
7500 CallFunctionFlags flags,
7502 // Push the arguments ("left-to-right") on the stack.
7503 int arg_count = args->length();
7504 for (int i = 0; i < arg_count; i++) {
7508 // Record the position for debugging purposes.
7509 CodeForSourcePosition(position);
7511 // Use the shared code stub to call the function.
7512 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
7513 CallFunctionStub call_function(arg_count, in_loop, flags);
7514 Result answer = frame_->CallStub(&call_function, arg_count + 1);
7515 // Restore context and replace function on the stack with the
7516 // result of the stub invocation.
7517 frame_->RestoreContextRegister();
7518 frame_->SetElementAt(0, &answer);
7522 void InstanceofStub::Generate(MacroAssembler* masm) {
7523 // Implements "value instanceof function" operator.
7524 // Expected input state:
7525 // rsp[0] : return address
7526 // rsp[1] : function pointer
7529 // Get the object - go slow case if it's a smi.
7531 __ movq(rax, Operand(rsp, 2 * kPointerSize));
7532 __ JumpIfSmi(rax, &slow);
7534 // Check that the left hand is a JS object. Leave its map in rax.
7535 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
7537 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
7540 // Get the prototype of the function.
7541 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
7542 __ TryGetFunctionPrototype(rdx, rbx, &slow);
7544 // Check that the function prototype is a JS object.
7545 __ JumpIfSmi(rbx, &slow);
7546 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
7548 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
7551 // Register mapping: rax is object map and rbx is function prototype.
7552 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
7554 // Loop through the prototype chain looking for the function prototype.
7555 Label loop, is_instance, is_not_instance;
7556 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
7559 __ j(equal, &is_instance);
7560 __ cmpq(rcx, kScratchRegister);
7561 __ j(equal, &is_not_instance);
7562 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
7563 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
7566 __ bind(&is_instance);
7568 __ ret(2 * kPointerSize);
7570 __ bind(&is_not_instance);
7571 __ movl(rax, Immediate(1));
7572 __ ret(2 * kPointerSize);
7574 // Slow-case: Go through the JavaScript implementation.
7576 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
7580 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
7581 // rsp[0] : return address
7582 // rsp[8] : number of parameters
7583 // rsp[16] : receiver displacement
7584 // rsp[24] : function
7586 // The displacement is used for skipping the return address and the
7587 // frame pointer on the stack. It is the offset of the last
7588 // parameter (if any) relative to the frame pointer.
7589 static const int kDisplacement = 2 * kPointerSize;
7591 // Check if the calling frame is an arguments adaptor frame.
7592 Label adaptor_frame, try_allocate, runtime;
7593 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
7594 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
7595 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
7596 __ j(equal, &adaptor_frame);
7598 // Get the length from the frame.
7599 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
7600 __ jmp(&try_allocate);
7602 // Patch the arguments.length and the parameters pointer.
7603 __ bind(&adaptor_frame);
7604 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7605 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
7606 // Do not clobber the length index for the indexing operation since
7607 // it is used compute the size for allocation later.
7608 SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
7609 __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
7610 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
7612 // Try the new space allocation. Start out with computing the size of
7613 // the arguments object and the elements array.
7614 Label add_arguments_object;
7615 __ bind(&try_allocate);
7617 __ j(zero, &add_arguments_object);
7618 index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
7619 __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
7620 __ bind(&add_arguments_object);
7621 __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
7623 // Do the allocation of both objects in one go.
7624 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
7626 // Get the arguments boilerplate from the current (global) context.
7627 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
7628 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
7629 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
7630 __ movq(rdi, Operand(rdi, offset));
7632 // Copy the JS object part.
7633 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
7634 __ movq(kScratchRegister, FieldOperand(rdi, i));
7635 __ movq(FieldOperand(rax, i), kScratchRegister);
7638 // Setup the callee in-object property.
7639 ASSERT(Heap::arguments_callee_index == 0);
7640 __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
7641 __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
7643 // Get the length (smi tagged) and set that as an in-object property too.
7644 ASSERT(Heap::arguments_length_index == 1);
7645 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
7646 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
7648 // If there are no actual arguments, we're done.
7653 // Get the parameters pointer from the stack and untag the length.
7654 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7655 __ SmiToInteger32(rcx, rcx);
7657 // Setup the elements pointer in the allocated arguments object and
7658 // initialize the header in the elements fixed array.
7659 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
7660 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
7661 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
7662 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
7663 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
7665 // Copy the fixed array slots.
7668 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
7669 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
7670 __ addq(rdi, Immediate(kPointerSize));
7671 __ subq(rdx, Immediate(kPointerSize));
7673 __ j(not_zero, &loop);
7675 // Return and remove the on-stack parameters.
7677 __ ret(3 * kPointerSize);
7679 // Do the runtime call to allocate the arguments object.
7681 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
7685 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
7686 // The key is in rdx and the parameter count is in rax.
7688 // The displacement is used for skipping the frame pointer on the
7689 // stack. It is the offset of the last parameter (if any) relative
7690 // to the frame pointer.
7691 static const int kDisplacement = 1 * kPointerSize;
7693 // Check that the key is a smi.
7695 __ JumpIfNotSmi(rdx, &slow);
7697 // Check if the calling frame is an arguments adaptor frame.
7699 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
7700 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
7701 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
7702 __ j(equal, &adaptor);
7704 // Check index against formal parameters count limit passed in
7705 // through register rax. Use unsigned comparison to get negative
7708 __ j(above_equal, &slow);
7710 // Read the argument from the stack and return it.
7711 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
7712 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
7713 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
7714 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
7717 // Arguments adaptor case: Check index against actual arguments
7718 // limit found in the arguments adaptor frame. Use unsigned
7719 // comparison to get negative check for free.
7721 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7723 __ j(above_equal, &slow);
7725 // Read the argument from the stack and return it.
7726 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
7727 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
7728 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
7729 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
7732 // Slow-case: Handle non-smi or out-of-bounds access to arguments
7733 // by calling the runtime system.
7735 __ pop(rbx); // Return address.
7738 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
7742 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
7743 // Check if the calling frame is an arguments adaptor frame.
7745 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
7746 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
7747 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
7749 // Arguments adaptor case: Read the arguments length from the
7750 // adaptor frame and return it.
7751 // Otherwise nothing to do: The number of formal parameters has already been
7752 // passed in register eax by calling function. Just return it.
7753 __ cmovq(equal, rax,
7754 Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
7759 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
7760 // Check that stack should contain next handler, frame pointer, state and
7761 // return address in that order.
7762 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
7763 StackHandlerConstants::kStateOffset);
7764 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
7765 StackHandlerConstants::kPCOffset);
7767 ExternalReference handler_address(Top::k_handler_address);
7768 __ movq(kScratchRegister, handler_address);
7769 __ movq(rsp, Operand(kScratchRegister, 0));
7770 // get next in chain
7772 __ movq(Operand(kScratchRegister, 0), rcx);
7773 __ pop(rbp); // pop frame pointer
7774 __ pop(rdx); // remove state
7776 // Before returning we restore the context from the frame pointer if not NULL.
7777 // The frame pointer is NULL in the exception handler of a JS entry frame.
7778 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
7780 __ cmpq(rbp, Immediate(0));
7782 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
7788 void CEntryStub::GenerateCore(MacroAssembler* masm,
7789 Label* throw_normal_exception,
7790 Label* throw_termination_exception,
7791 Label* throw_out_of_memory_exception,
7793 bool always_allocate_scope) {
7794 // rax: result parameter for PerformGC, if any.
7795 // rbx: pointer to C function (C callee-saved).
7796 // rbp: frame pointer (restored after C call).
7797 // rsp: stack pointer (restored after C call).
7798 // r14: number of arguments including receiver (C callee-saved).
7799 // r15: pointer to the first argument (C callee-saved).
7800 // This pointer is reused in LeaveExitFrame(), so it is stored in a
7801 // callee-saved register.
7803 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
7804 // Complex results must be written to address passed as first argument.
7805 // AMD64 calling convention: a struct of two pointers in rax+rdx
7808 // Pass failure code returned from last attempt as first argument to GC.
7811 #else // ! defined(_WIN64)
7814 __ movq(kScratchRegister,
7815 FUNCTION_ADDR(Runtime::PerformGC),
7816 RelocInfo::RUNTIME_ENTRY);
7817 __ call(kScratchRegister);
7820 ExternalReference scope_depth =
7821 ExternalReference::heap_always_allocate_scope_depth();
7822 if (always_allocate_scope) {
7823 __ movq(kScratchRegister, scope_depth);
7824 __ incl(Operand(kScratchRegister, 0));
7829 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
7830 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
7831 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
7832 __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
7833 if (result_size_ < 2) {
7834 // Pass a pointer to the Arguments object as the first argument.
7835 // Return result in single register (rax).
7836 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
7838 ASSERT_EQ(2, result_size_);
7839 // Pass a pointer to the result location as the first argument.
7840 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
7841 // Pass a pointer to the Arguments object as the second argument.
7842 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
7845 #else // ! defined(_WIN64)
7846 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
7847 __ movq(rdi, r14); // argc.
7848 __ movq(rsi, r15); // argv.
7851 // Result is in rax - do not destroy this register!
7853 if (always_allocate_scope) {
7854 __ movq(kScratchRegister, scope_depth);
7855 __ decl(Operand(kScratchRegister, 0));
7858 // Check for failure result.
7859 Label failure_returned;
7860 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
7862 // If return value is on the stack, pop it to registers.
7863 if (result_size_ > 1) {
7864 ASSERT_EQ(2, result_size_);
7865 // Read result values stored on stack. Result is stored
7866 // above the four argument mirror slots and the two
7867 // Arguments object slots.
7868 __ movq(rax, Operand(rsp, 6 * kPointerSize));
7869 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
7872 __ lea(rcx, Operand(rax, 1));
7873 // Lower 2 bits of rcx are 0 iff rax has failure tag.
7874 __ testl(rcx, Immediate(kFailureTagMask));
7875 __ j(zero, &failure_returned);
7877 // Exit the JavaScript to C++ exit frame.
7878 __ LeaveExitFrame(mode_, result_size_);
7881 // Handling of failure.
7882 __ bind(&failure_returned);
7885 // If the returned exception is RETRY_AFTER_GC continue at retry label
7886 ASSERT(Failure::RETRY_AFTER_GC == 0);
7887 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
7890 // Special handling of out of memory exceptions.
7891 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
7892 __ cmpq(rax, kScratchRegister);
7893 __ j(equal, throw_out_of_memory_exception);
7895 // Retrieve the pending exception and clear the variable.
7896 ExternalReference pending_exception_address(Top::k_pending_exception_address);
7897 __ movq(kScratchRegister, pending_exception_address);
7898 __ movq(rax, Operand(kScratchRegister, 0));
7899 __ movq(rdx, ExternalReference::the_hole_value_location());
7900 __ movq(rdx, Operand(rdx, 0));
7901 __ movq(Operand(kScratchRegister, 0), rdx);
7903 // Special handling of termination exceptions which are uncatchable
7904 // by javascript code.
7905 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
7906 __ j(equal, throw_termination_exception);
7908 // Handle normal exception.
7909 __ jmp(throw_normal_exception);
7916 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
7917 UncatchableExceptionType type) {
7918 // Fetch top stack handler.
7919 ExternalReference handler_address(Top::k_handler_address);
7920 __ movq(kScratchRegister, handler_address);
7921 __ movq(rsp, Operand(kScratchRegister, 0));
7923 // Unwind the handlers until the ENTRY handler is found.
7926 // Load the type of the current stack handler.
7927 const int kStateOffset = StackHandlerConstants::kStateOffset;
7928 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
7930 // Fetch the next handler in the list.
7931 const int kNextOffset = StackHandlerConstants::kNextOffset;
7932 __ movq(rsp, Operand(rsp, kNextOffset));
7936 // Set the top handler address to next handler past the current ENTRY handler.
7937 __ movq(kScratchRegister, handler_address);
7938 __ pop(Operand(kScratchRegister, 0));
7940 if (type == OUT_OF_MEMORY) {
7941 // Set external caught exception to false.
7942 ExternalReference external_caught(Top::k_external_caught_exception_address);
7943 __ movq(rax, Immediate(false));
7944 __ store_rax(external_caught);
7946 // Set pending exception and rax to out of memory exception.
7947 ExternalReference pending_exception(Top::k_pending_exception_address);
7948 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
7949 __ store_rax(pending_exception);
7952 // Clear the context pointer.
7955 // Restore registers from handler.
7956 ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
7957 StackHandlerConstants::kFPOffset);
7959 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
7960 StackHandlerConstants::kStateOffset);
7961 __ pop(rdx); // State
7963 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
7964 StackHandlerConstants::kPCOffset);
7969 void CallFunctionStub::Generate(MacroAssembler* masm) {
7972 // If the receiver might be a value (string, number or boolean) check for this
7973 // and box it if it is.
7974 if (ReceiverMightBeValue()) {
7975 // Get the receiver from the stack.
7976 // +1 ~ return address
7977 Label receiver_is_value, receiver_is_js_object;
7978 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
7980 // Check if receiver is a smi (which is a number value).
7981 __ JumpIfSmi(rax, &receiver_is_value);
7983 // Check if the receiver is a valid JS object.
7984 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
7985 __ j(above_equal, &receiver_is_js_object);
7987 // Call the runtime to box the value.
7988 __ bind(&receiver_is_value);
7989 __ EnterInternalFrame();
7991 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
7992 __ LeaveInternalFrame();
7993 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
7995 __ bind(&receiver_is_js_object);
7998 // Get the function to call from the stack.
7999 // +2 ~ receiver, return address
8000 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
8002 // Check that the function really is a JavaScript function.
8003 __ JumpIfSmi(rdi, &slow);
8004 // Goto slow case if we do not have a function.
8005 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
8006 __ j(not_equal, &slow);
8008 // Fast-case: Just invoke the function.
8009 ParameterCount actual(argc_);
8010 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
8012 // Slow-case: Non-function called.
8014 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
8015 // of the original receiver from the call site).
8016 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
8019 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
8020 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
8021 __ Jump(adaptor, RelocInfo::CODE_TARGET);
8025 void CEntryStub::Generate(MacroAssembler* masm) {
8026 // rax: number of arguments including receiver
8027 // rbx: pointer to C function (C callee-saved)
8028 // rbp: frame pointer of calling JS frame (restored after C call)
8029 // rsp: stack pointer (restored after C call)
8030 // rsi: current context (restored)
8032 // NOTE: Invocations of builtins may return failure objects
8033 // instead of a proper result. The builtin entry handles
8034 // this by performing a garbage collection and retrying the
8037 // Enter the exit frame that transitions from JavaScript to C++.
8038 __ EnterExitFrame(mode_, result_size_);
8040 // rax: Holds the context at this point, but should not be used.
8041 // On entry to code generated by GenerateCore, it must hold
8042 // a failure result if the collect_garbage argument to GenerateCore
8043 // is true. This failure result can be the result of code
8044 // generated by a previous call to GenerateCore. The value
8045 // of rax is then passed to Runtime::PerformGC.
8046 // rbx: pointer to builtin function (C callee-saved).
8047 // rbp: frame pointer of exit frame (restored after C call).
8048 // rsp: stack pointer (restored after C call).
8049 // r14: number of arguments including receiver (C callee-saved).
8050 // r15: argv pointer (C callee-saved).
8052 Label throw_normal_exception;
8053 Label throw_termination_exception;
8054 Label throw_out_of_memory_exception;
8056 // Call into the runtime system.
8058 &throw_normal_exception,
8059 &throw_termination_exception,
8060 &throw_out_of_memory_exception,
8064 // Do space-specific GC and retry runtime call.
8066 &throw_normal_exception,
8067 &throw_termination_exception,
8068 &throw_out_of_memory_exception,
8072 // Do full GC and retry runtime call one final time.
8073 Failure* failure = Failure::InternalError();
8074 __ movq(rax, failure, RelocInfo::NONE);
8076 &throw_normal_exception,
8077 &throw_termination_exception,
8078 &throw_out_of_memory_exception,
8082 __ bind(&throw_out_of_memory_exception);
8083 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
8085 __ bind(&throw_termination_exception);
8086 GenerateThrowUncatchable(masm, TERMINATION);
8088 __ bind(&throw_normal_exception);
8089 GenerateThrowTOS(masm);
8093 void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
8098 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
8100 #ifdef ENABLE_LOGGING_AND_PROFILING
8101 Label not_outermost_js, not_outermost_js_2;
8108 // Push the stack frame type marker twice.
8109 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
8110 __ Push(Smi::FromInt(marker)); // context slot
8111 __ Push(Smi::FromInt(marker)); // function slot
8112 // Save callee-saved registers (X64 calling conventions).
8120 // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
8121 // callee-save in JS code as well.
8123 // Save copies of the top frame descriptor on the stack.
8124 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
8125 __ load_rax(c_entry_fp);
8128 #ifdef ENABLE_LOGGING_AND_PROFILING
8129 // If this is the outermost JS call, set js_entry_sp value.
8130 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
8131 __ load_rax(js_entry_sp);
8133 __ j(not_zero, ¬_outermost_js);
8135 __ store_rax(js_entry_sp);
8136 __ bind(¬_outermost_js);
8139 // Call a faked try-block that does the invoke.
8142 // Caught exception: Store result (exception) in the pending
8143 // exception field in the JSEnv and return a failure sentinel.
8144 ExternalReference pending_exception(Top::k_pending_exception_address);
8145 __ store_rax(pending_exception);
8146 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
8149 // Invoke: Link this frame into the handler chain.
8151 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
8153 // Clear any pending exceptions.
8154 __ load_rax(ExternalReference::the_hole_value_location());
8155 __ store_rax(pending_exception);
8157 // Fake a receiver (NULL).
8158 __ push(Immediate(0)); // receiver
8160 // Invoke the function by calling through JS entry trampoline
8161 // builtin and pop the faked function when we return. We load the address
8162 // from an external reference instead of inlining the call target address
8163 // directly in the code, because the builtin stubs may not have been
8164 // generated yet at the time this code is generated.
8166 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
8167 __ load_rax(construct_entry);
8169 ExternalReference entry(Builtins::JSEntryTrampoline);
8172 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
8173 __ call(kScratchRegister);
8175 // Unlink this frame from the handler chain.
8176 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
8177 __ pop(Operand(kScratchRegister, 0));
8179 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
8181 #ifdef ENABLE_LOGGING_AND_PROFILING
8182 // If current EBP value is the same as js_entry_sp value, it means that
8183 // the current function is the outermost.
8184 __ movq(kScratchRegister, js_entry_sp);
8185 __ cmpq(rbp, Operand(kScratchRegister, 0));
8186 __ j(not_equal, ¬_outermost_js_2);
8187 __ movq(Operand(kScratchRegister, 0), Immediate(0));
8188 __ bind(¬_outermost_js_2);
8191 // Restore the top frame descriptor from the stack.
8193 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
8194 __ pop(Operand(kScratchRegister, 0));
8196 // Restore callee-saved registers (X64 conventions).
8204 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
8206 // Restore frame pointer and return.
8212 // -----------------------------------------------------------------------------
8213 // Implementation of stubs.
8215 // Stub classes have public member named masm, not masm_.
8217 void StackCheckStub::Generate(MacroAssembler* masm) {
8218 // Because builtins always remove the receiver from the stack, we
8219 // have to fake one to avoid underflowing the stack. The receiver
8220 // must be inserted below the return address on the stack so we
8221 // temporarily store that in a register.
8223 __ Push(Smi::FromInt(0));
8226 // Do tail-call to runtime routine.
8227 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
8231 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8233 Label load_smi, done;
8235 __ JumpIfSmi(number, &load_smi);
8236 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
8240 __ SmiToInteger32(number, number);
8242 __ fild_s(Operand(rsp, 0));
8249 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8252 Label load_smi, done;
8254 __ JumpIfSmi(src, &load_smi);
8255 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
8259 __ SmiToInteger32(src, src);
8260 __ cvtlsi2sd(dst, src);
8266 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8269 __ movq(kScratchRegister, rdx);
8270 LoadFloatOperand(masm, kScratchRegister, dst1);
8271 __ movq(kScratchRegister, rax);
8272 LoadFloatOperand(masm, kScratchRegister, dst2);
8276 void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
8279 __ SmiToInteger32(kScratchRegister, rdx);
8280 __ cvtlsi2sd(dst1, kScratchRegister);
8281 __ SmiToInteger32(kScratchRegister, rax);
8282 __ cvtlsi2sd(dst2, kScratchRegister);
8286 // Input: rdx, rax are the left and right objects of a bit op.
8287 // Output: rax, rcx are left and right integers for a bit op.
8288 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
8290 Label* conversion_failure) {
8291 // Check float operands.
8292 Label arg1_is_object, check_undefined_arg1;
8293 Label arg2_is_object, check_undefined_arg2;
8294 Label load_arg2, done;
8296 __ JumpIfNotSmi(rdx, &arg1_is_object);
8297 __ SmiToInteger32(rdx, rdx);
8300 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8301 __ bind(&check_undefined_arg1);
8302 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
8303 __ j(not_equal, conversion_failure);
8304 __ movl(rdx, Immediate(0));
8307 __ bind(&arg1_is_object);
8308 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
8309 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
8310 __ j(not_equal, &check_undefined_arg1);
8311 // Get the untagged integer version of the edx heap number in rcx.
8312 IntegerConvert(masm, rdx, use_sse3, conversion_failure);
8315 // Here edx has the untagged integer, eax has a Smi or a heap number.
8316 __ bind(&load_arg2);
8317 // Test if arg2 is a Smi.
8318 __ JumpIfNotSmi(rax, &arg2_is_object);
8319 __ SmiToInteger32(rax, rax);
8323 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8324 __ bind(&check_undefined_arg2);
8325 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
8326 __ j(not_equal, conversion_failure);
8327 __ movl(rcx, Immediate(0));
8330 __ bind(&arg2_is_object);
8331 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
8332 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
8333 __ j(not_equal, &check_undefined_arg2);
8334 // Get the untagged integer version of the eax heap number in ecx.
8335 IntegerConvert(masm, rax, use_sse3, conversion_failure);
8341 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8344 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
8345 __ JumpIfSmi(lhs, &load_smi_lhs);
8346 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
8347 __ bind(&done_load_lhs);
8349 __ JumpIfSmi(rhs, &load_smi_rhs);
8350 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
8353 __ bind(&load_smi_lhs);
8354 __ SmiToInteger64(kScratchRegister, lhs);
8355 __ push(kScratchRegister);
8356 __ fild_d(Operand(rsp, 0));
8357 __ pop(kScratchRegister);
8358 __ jmp(&done_load_lhs);
8360 __ bind(&load_smi_rhs);
8361 __ SmiToInteger64(kScratchRegister, rhs);
8362 __ push(kScratchRegister);
8363 __ fild_d(Operand(rsp, 0));
8364 __ pop(kScratchRegister);
8370 void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
8372 Label test_other, done;
8373 // Test if both operands are numbers (heap_numbers or smis).
8374 // If not, jump to label non_float.
8375 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
8376 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
8377 __ j(not_equal, non_float); // The argument in rdx is not a number.
8379 __ bind(&test_other);
8380 __ JumpIfSmi(rax, &done); // argument in rax is OK
8381 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
8382 __ j(not_equal, non_float); // The argument in rax is not a number.
8384 // Fall-through: Both operands are numbers.
8389 const char* GenericBinaryOpStub::GetName() {
8390 if (name_ != NULL) return name_;
8391 const int len = 100;
8392 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
8393 if (name_ == NULL) return "OOM";
8394 const char* op_name = Token::Name(op_);
8395 const char* overwrite_name;
8397 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
8398 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
8399 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
8400 default: overwrite_name = "UnknownOverwrite"; break;
8403 OS::SNPrintF(Vector<char>(name_, len),
8404 "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
8407 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
8408 args_in_registers_ ? "RegArgs" : "StackArgs",
8409 args_reversed_ ? "_R" : "",
8410 use_sse3_ ? "SSE3" : "SSE2",
8411 static_operands_type_.ToString(),
8412 BinaryOpIC::GetName(runtime_operands_type_));
8417 void GenericBinaryOpStub::GenerateCall(
8418 MacroAssembler* masm,
8421 if (!ArgsInRegistersSupported()) {
8422 // Pass arguments on the stack.
8426 // The calling convention with registers is left in rdx and right in rax.
8427 Register left_arg = rdx;
8428 Register right_arg = rax;
8429 if (!(left.is(left_arg) && right.is(right_arg))) {
8430 if (left.is(right_arg) && right.is(left_arg)) {
8431 if (IsOperationCommutative()) {
8434 __ xchg(left, right);
8436 } else if (left.is(left_arg)) {
8437 __ movq(right_arg, right);
8438 } else if (right.is(right_arg)) {
8439 __ movq(left_arg, left);
8440 } else if (left.is(right_arg)) {
8441 if (IsOperationCommutative()) {
8442 __ movq(left_arg, right);
8445 // Order of moves important to avoid destroying left argument.
8446 __ movq(left_arg, left);
8447 __ movq(right_arg, right);
8449 } else if (right.is(left_arg)) {
8450 if (IsOperationCommutative()) {
8451 __ movq(right_arg, left);
8454 // Order of moves important to avoid destroying right argument.
8455 __ movq(right_arg, right);
8456 __ movq(left_arg, left);
8459 // Order of moves is not important.
8460 __ movq(left_arg, left);
8461 __ movq(right_arg, right);
8465 // Update flags to indicate that arguments are in registers.
8466 SetArgsInRegisters();
8467 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
8475 void GenericBinaryOpStub::GenerateCall(
8476 MacroAssembler* masm,
8479 if (!ArgsInRegistersSupported()) {
8480 // Pass arguments on the stack.
8484 // The calling convention with registers is left in rdx and right in rax.
8485 Register left_arg = rdx;
8486 Register right_arg = rax;
8487 if (left.is(left_arg)) {
8488 __ Move(right_arg, right);
8489 } else if (left.is(right_arg) && IsOperationCommutative()) {
8490 __ Move(left_arg, right);
8493 // For non-commutative operations, left and right_arg might be
8494 // the same register. Therefore, the order of the moves is
8495 // important here in order to not overwrite left before moving
8497 __ movq(left_arg, left);
8498 __ Move(right_arg, right);
8501 // Update flags to indicate that arguments are in registers.
8502 SetArgsInRegisters();
8503 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
8511 void GenericBinaryOpStub::GenerateCall(
8512 MacroAssembler* masm,
8515 if (!ArgsInRegistersSupported()) {
8516 // Pass arguments on the stack.
8520 // The calling convention with registers is left in rdx and right in rax.
8521 Register left_arg = rdx;
8522 Register right_arg = rax;
8523 if (right.is(right_arg)) {
8524 __ Move(left_arg, left);
8525 } else if (right.is(left_arg) && IsOperationCommutative()) {
8526 __ Move(right_arg, left);
8529 // For non-commutative operations, right and left_arg might be
8530 // the same register. Therefore, the order of the moves is
8531 // important here in order to not overwrite right before moving
8533 __ movq(right_arg, right);
8534 __ Move(left_arg, left);
8536 // Update flags to indicate that arguments are in registers.
8537 SetArgsInRegisters();
8538 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
8546 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
8547 VirtualFrame* frame,
8550 if (ArgsInRegistersSupported()) {
8551 SetArgsInRegisters();
8552 return frame->CallStub(this, left, right);
8556 return frame->CallStub(this, 2);
8561 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
8562 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
8563 // dividend in rax and rdx free for the division. Use rax, rbx for those.
8564 Comment load_comment(masm, "-- Load arguments");
8565 Register left = rdx;
8566 Register right = rax;
8567 if (op_ == Token::DIV || op_ == Token::MOD) {
8570 if (HasArgsInRegisters()) {
8575 if (!HasArgsInRegisters()) {
8576 __ movq(right, Operand(rsp, 1 * kPointerSize));
8577 __ movq(left, Operand(rsp, 2 * kPointerSize));
8580 // 2. Smi check both operands. Skip the check for OR as it is better combined
8581 // with the actual operation.
8583 if (op_ != Token::BIT_OR) {
8584 Comment smi_check_comment(masm, "-- Smi check arguments");
8585 __ JumpIfNotBothSmi(left, right, ¬_smis);
8588 // 3. Operands are both smis (except for OR), perform the operation leaving
8589 // the result in rax and check the result if necessary.
8590 Comment perform_smi(masm, "-- Perform smi operation");
8591 Label use_fp_on_smis;
8594 ASSERT(right.is(rax));
8595 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
8600 __ SmiSub(left, left, right, &use_fp_on_smis);
8606 ASSERT(right.is(rax));
8607 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
8611 ASSERT(left.is(rax));
8612 __ SmiDiv(left, left, right, &use_fp_on_smis);
8616 ASSERT(left.is(rax));
8617 __ SmiMod(left, left, right, slow);
8621 ASSERT(right.is(rax));
8622 __ movq(rcx, right); // Save the right operand.
8623 __ SmiOr(right, right, left); // BIT_OR is commutative.
8624 __ testb(right, Immediate(kSmiTagMask));
8625 __ j(not_zero, ¬_smis);
8628 case Token::BIT_AND:
8629 ASSERT(right.is(rax));
8630 __ SmiAnd(right, right, left); // BIT_AND is commutative.
8633 case Token::BIT_XOR:
8634 ASSERT(right.is(rax));
8635 __ SmiXor(right, right, left); // BIT_XOR is commutative.
8643 __ SmiShiftArithmeticRight(left, left, right);
8646 __ SmiShiftLogicalRight(left, left, right, slow);
8649 __ SmiShiftLeft(left, left, right, slow);
8662 // 4. Emit return of result in rax.
8663 GenerateReturn(masm);
8665 // 5. For some operations emit inline code to perform floating point
8666 // operations on known smis (e.g., if the result of the operation
8667 // overflowed the smi range).
8673 __ bind(&use_fp_on_smis);
8674 if (op_ == Token::DIV) {
8678 // left is rdx, right is rax.
8679 __ AllocateHeapNumber(rbx, rcx, slow);
8680 FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
8682 case Token::ADD: __ addsd(xmm4, xmm5); break;
8683 case Token::SUB: __ subsd(xmm4, xmm5); break;
8684 case Token::MUL: __ mulsd(xmm4, xmm5); break;
8685 case Token::DIV: __ divsd(xmm4, xmm5); break;
8686 default: UNREACHABLE();
8688 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
8690 GenerateReturn(masm);
8696 // 6. Non-smi operands, fall out to the non-smi code with the operands in
8698 Comment done_comment(masm, "-- Enter non-smi code");
8704 // Operands are in rax, rbx at this point.
8710 // Right operand is saved in rcx and rax was destroyed by the smi
8721 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
8724 if (ShouldGenerateSmiCode()) {
8725 GenerateSmiCode(masm, &call_runtime);
8726 } else if (op_ != Token::MOD) {
8727 if (!HasArgsInRegisters()) {
8728 GenerateLoadArguments(masm);
8731 // Floating point case.
8732 if (ShouldGenerateFPCode()) {
8738 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
8739 HasSmiCodeInStub()) {
8740 // Execution reaches this point when the first non-smi argument occurs
8741 // (and only if smi code is generated). This is the right moment to
8742 // patch to HEAP_NUMBERS state. The transition is attempted only for
8743 // the four basic operations. The stub stays in the DEFAULT state
8744 // forever for all other operations (also if smi code is skipped).
8745 GenerateTypeTransition(masm);
8751 if (static_operands_type_.IsNumber()) {
8752 if (FLAG_debug_code) {
8753 // Assert at runtime that inputs are only numbers.
8754 __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
8755 __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
8758 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
8760 // Fast-case: Both operands are numbers.
8761 // xmm4 and xmm5 are volatile XMM registers.
8762 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
8765 case Token::ADD: __ addsd(xmm4, xmm5); break;
8766 case Token::SUB: __ subsd(xmm4, xmm5); break;
8767 case Token::MUL: __ mulsd(xmm4, xmm5); break;
8768 case Token::DIV: __ divsd(xmm4, xmm5); break;
8769 default: UNREACHABLE();
8771 // Allocate a heap number, if needed.
8772 Label skip_allocation;
8773 OverwriteMode mode = mode_;
8774 if (HasArgsReversed()) {
8775 if (mode == OVERWRITE_RIGHT) {
8776 mode = OVERWRITE_LEFT;
8777 } else if (mode == OVERWRITE_LEFT) {
8778 mode = OVERWRITE_RIGHT;
8782 case OVERWRITE_LEFT:
8783 __ JumpIfNotSmi(rdx, &skip_allocation);
8784 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
8786 __ bind(&skip_allocation);
8789 case OVERWRITE_RIGHT:
8790 // If the argument in rax is already an object, we skip the
8791 // allocation of a heap number.
8792 __ JumpIfNotSmi(rax, &skip_allocation);
8795 // Allocate a heap number for the result. Keep rax and rdx intact
8796 // for the possible runtime call.
8797 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
8799 __ bind(&skip_allocation);
8801 default: UNREACHABLE();
8803 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
8804 GenerateReturn(masm);
8805 __ bind(¬_floats);
8806 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
8807 !HasSmiCodeInStub()) {
8808 // Execution reaches this point when the first non-number argument
8809 // occurs (and only if smi code is skipped from the stub, otherwise
8810 // the patching has already been done earlier in this case branch).
8811 // A perfect moment to try patching to STRINGS for ADD operation.
8812 if (op_ == Token::ADD) {
8813 GenerateTypeTransition(masm);
8819 // For MOD we go directly to runtime in the non-smi case.
8823 case Token::BIT_AND:
8824 case Token::BIT_XOR:
8828 Label skip_allocation, non_smi_result;
8829 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
8831 case Token::BIT_OR: __ orl(rax, rcx); break;
8832 case Token::BIT_AND: __ andl(rax, rcx); break;
8833 case Token::BIT_XOR: __ xorl(rax, rcx); break;
8834 case Token::SAR: __ sarl_cl(rax); break;
8835 case Token::SHL: __ shll_cl(rax); break;
8836 case Token::SHR: __ shrl_cl(rax); break;
8837 default: UNREACHABLE();
8839 if (op_ == Token::SHR) {
8840 // Check if result is non-negative. This can only happen for a shift
8841 // by zero, which also doesn't update the sign flag.
8843 __ j(negative, &non_smi_result);
8845 __ JumpIfNotValidSmiValue(rax, &non_smi_result);
8846 // Tag smi result, if possible, and return.
8847 __ Integer32ToSmi(rax, rax);
8848 GenerateReturn(masm);
8850 // All ops except SHR return a signed int32 that we load in
8852 if (op_ != Token::SHR && non_smi_result.is_linked()) {
8853 __ bind(&non_smi_result);
8854 // Allocate a heap number if needed.
8855 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
8857 case OVERWRITE_LEFT:
8858 case OVERWRITE_RIGHT:
8859 // If the operand was an object, we skip the
8860 // allocation of a heap number.
8861 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
8862 1 * kPointerSize : 2 * kPointerSize));
8863 __ JumpIfNotSmi(rax, &skip_allocation);
8866 __ AllocateHeapNumber(rax, rcx, &call_runtime);
8867 __ bind(&skip_allocation);
8869 default: UNREACHABLE();
8871 // Store the result in the HeapNumber and return.
8872 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
8873 __ fild_s(Operand(rsp, 1 * kPointerSize));
8874 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
8875 GenerateReturn(masm);
8878 // SHR should return uint32 - go to runtime for non-smi/negative result.
8879 if (op_ == Token::SHR) {
8880 __ bind(&non_smi_result);
8884 default: UNREACHABLE(); break;
8888 // If all else fails, use the runtime system to get the correct
8889 // result. If arguments was passed in registers now place them on the
8890 // stack in the correct order below the return address.
8891 __ bind(&call_runtime);
8893 if (HasArgsInRegisters()) {
8894 GenerateRegisterArgsPush(masm);
8899 // Registers containing left and right operands respectively.
8902 if (HasArgsReversed()) {
8910 // Test for string arguments before calling runtime.
8911 Label not_strings, both_strings, not_string1, string1, string1_smi2;
8913 // If this stub has already generated FP-specific code then the arguments
8914 // are already in rdx, rax
8915 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
8916 GenerateLoadArguments(masm);
8920 is_smi = masm->CheckSmi(lhs);
8921 __ j(is_smi, ¬_string1);
8922 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
8923 __ j(above_equal, ¬_string1);
8925 // First argument is a a string, test second.
8926 is_smi = masm->CheckSmi(rhs);
8927 __ j(is_smi, &string1_smi2);
8928 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
8929 __ j(above_equal, &string1);
8931 // First and second argument are strings.
8932 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
8933 __ TailCallStub(&string_add_stub);
8935 __ bind(&string1_smi2);
8936 // First argument is a string, second is a smi. Try to lookup the number
8937 // string for the smi in the number string cache.
8938 NumberToStringStub::GenerateLookupNumberStringCache(
8939 masm, rhs, rbx, rcx, r8, true, &string1);
8941 // Replace second argument on stack and tailcall string add stub to make
8943 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
8944 __ TailCallStub(&string_add_stub);
8946 // Only first argument is a string.
8948 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
8950 // First argument was not a string, test second.
8951 __ bind(¬_string1);
8952 is_smi = masm->CheckSmi(rhs);
8953 __ j(is_smi, ¬_strings);
8954 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
8955 __ j(above_equal, ¬_strings);
8957 // Only second argument is a string.
8958 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
8960 __ bind(¬_strings);
8961 // Neither argument is a string.
8962 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
8966 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
8969 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
8972 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
8975 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
8978 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
8980 case Token::BIT_AND:
8981 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
8983 case Token::BIT_XOR:
8984 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
8987 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
8990 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
8993 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
8999 // TODO(kaznacheev) Remove this (along with clearing) if it does not harm
9001 // Generate an unreachable reference to the DEFAULT stub so that it can be
9002 // found at the end of this stub when clearing ICs at GC.
9003 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
9004 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
9005 __ TailCallStub(&uninit);
9010 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
9011 ASSERT(!HasArgsInRegisters());
9012 __ movq(rax, Operand(rsp, 1 * kPointerSize));
9013 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
9017 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
9018 // If arguments are not passed in registers remove them from the stack before
9020 if (!HasArgsInRegisters()) {
9021 __ ret(2 * kPointerSize); // Remove both operands
9028 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
9029 ASSERT(HasArgsInRegisters());
9031 if (HasArgsReversed()) {
9042 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
9045 // Keep a copy of operands on the stack and make sure they are also in
9047 if (HasArgsInRegisters()) {
9048 GenerateRegisterArgsPush(masm);
9050 GenerateLoadArguments(masm);
9053 // Internal frame is necessary to handle exceptions properly.
9054 __ EnterInternalFrame();
9056 // Push arguments on stack if the stub expects them there.
9057 if (!HasArgsInRegisters()) {
9061 // Call the stub proper to get the result in rax.
9062 __ call(&get_result);
9063 __ LeaveInternalFrame();
9065 // Left and right arguments are already on stack.
9067 // Push the operation result. The tail call to BinaryOp_Patch will
9068 // return it to the original caller..
9071 // Push this stub's key.
9072 __ movq(rax, Immediate(MinorKey()));
9073 __ Integer32ToSmi(rax, rax);
9076 // Although the operation and the type info are encoded into the key,
9077 // the encoding is opaque, so push them too.
9078 __ movq(rax, Immediate(op_));
9079 __ Integer32ToSmi(rax, rax);
9082 __ movq(rax, Immediate(runtime_operands_type_));
9083 __ Integer32ToSmi(rax, rax);
9088 // Perform patching to an appropriate fast case and return the result.
9089 __ TailCallExternalReference(
9090 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
9094 // The entry point for the result calculation is assumed to be immediately
9095 // after this sequence.
9096 __ bind(&get_result);
9100 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
9101 GenericBinaryOpStub stub(key, type_info);
9102 return stub.GetCode();
9106 int CompareStub::MinorKey() {
9107 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
9108 // stubs the never NaN NaN condition is only taken into account if the
9109 // condition is equals.
9110 ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
9111 return ConditionField::encode(static_cast<unsigned>(cc_))
9112 | StrictField::encode(strict_)
9113 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
9114 | IncludeNumberCompareField::encode(include_number_compare_);
9118 // Unfortunately you have to run without snapshots to see most of these
9119 // names in the profile since most compare stubs end up in the snapshot.
9120 const char* CompareStub::GetName() {
9121 if (name_ != NULL) return name_;
9122 const int kMaxNameLength = 100;
9123 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
9124 if (name_ == NULL) return "OOM";
9126 const char* cc_name;
9128 case less: cc_name = "LT"; break;
9129 case greater: cc_name = "GT"; break;
9130 case less_equal: cc_name = "LE"; break;
9131 case greater_equal: cc_name = "GE"; break;
9132 case equal: cc_name = "EQ"; break;
9133 case not_equal: cc_name = "NE"; break;
9134 default: cc_name = "UnknownCondition"; break;
9137 const char* strict_name = "";
9138 if (strict_ && (cc_ == equal || cc_ == not_equal)) {
9139 strict_name = "_STRICT";
9142 const char* never_nan_nan_name = "";
9143 if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
9144 never_nan_nan_name = "_NO_NAN";
9147 const char* include_number_compare_name = "";
9148 if (!include_number_compare_) {
9149 include_number_compare_name = "_NO_NUMBER";
9152 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
9153 "CompareStub_%s%s%s%s",
9157 include_number_compare_name);
9162 void StringAddStub::Generate(MacroAssembler* masm) {
9163 Label string_add_runtime;
9165 // Load the two arguments.
9166 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
9167 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
9169 // Make sure that both arguments are strings if not known in advance.
9170 if (string_check_) {
9172 is_smi = masm->CheckSmi(rax);
9173 __ j(is_smi, &string_add_runtime);
9174 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
9175 __ j(above_equal, &string_add_runtime);
9177 // First argument is a a string, test second.
9178 is_smi = masm->CheckSmi(rdx);
9179 __ j(is_smi, &string_add_runtime);
9180 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
9181 __ j(above_equal, &string_add_runtime);
9184 // Both arguments are strings.
9185 // rax: first string
9186 // rdx: second string
9187 // Check if either of the strings are empty. In that case return the other.
9188 Label second_not_zero_length, both_not_zero_length;
9189 __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
9191 __ j(not_zero, &second_not_zero_length);
9192 // Second string is empty, result is first string which is already in rax.
9193 __ IncrementCounter(&Counters::string_add_native, 1);
9194 __ ret(2 * kPointerSize);
9195 __ bind(&second_not_zero_length);
9196 __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
9198 __ j(not_zero, &both_not_zero_length);
9199 // First string is empty, result is second string which is in rdx.
9201 __ IncrementCounter(&Counters::string_add_native, 1);
9202 __ ret(2 * kPointerSize);
9204 // Both strings are non-empty.
9205 // rax: first string
9206 // rbx: length of first string
9207 // rcx: length of second string
9208 // rdx: second string
9209 // r8: map of first string if string check was performed above
9210 // r9: map of second string if string check was performed above
9211 Label string_add_flat_result, longer_than_two;
9212 __ bind(&both_not_zero_length);
9214 // If arguments where known to be strings, maps are not loaded to r8 and r9
9215 // by the code above.
9216 if (!string_check_) {
9217 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
9218 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
9220 // Get the instance types of the two strings as they will be needed soon.
9221 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
9222 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
9224 // Look at the length of the result of adding the two strings.
9226 // Use the runtime system when adding two one character strings, as it
9227 // contains optimizations for this specific case using the symbol table.
9228 __ cmpl(rbx, Immediate(2));
9229 __ j(not_equal, &longer_than_two);
9231 // Check that both strings are non-external ascii strings.
9232 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
9233 &string_add_runtime);
9235 // Get the two characters forming the sub string.
9236 __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
9237 __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
9239 // Try to lookup two character string in symbol table. If it is not found
9240 // just allocate a new one.
9241 Label make_two_character_string, make_flat_ascii_string;
9242 GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, r14, r12, rdi, r15,
9243 &make_two_character_string);
9244 __ IncrementCounter(&Counters::string_add_native, 1);
9245 __ ret(2 * kPointerSize);
9247 __ bind(&make_two_character_string);
9249 __ jmp(&make_flat_ascii_string);
9251 __ bind(&longer_than_two);
9252 // Check if resulting string will be flat.
9253 __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
9254 __ j(below, &string_add_flat_result);
9255 // Handle exceptionally long strings in the runtime system.
9256 ASSERT((String::kMaxLength & 0x80000000) == 0);
9257 __ cmpl(rbx, Immediate(String::kMaxLength));
9258 __ j(above, &string_add_runtime);
9260 // If result is not supposed to be flat, allocate a cons string object. If
9261 // both strings are ascii the result is an ascii cons string.
9262 // rax: first string
9263 // ebx: length of resulting flat string
9264 // rdx: second string
9265 // r8: instance type of first string
9266 // r9: instance type of second string
9267 Label non_ascii, allocated;
9270 ASSERT(kStringEncodingMask == kAsciiStringTag);
9271 __ testl(rcx, Immediate(kAsciiStringTag));
9272 __ j(zero, &non_ascii);
9273 // Allocate an acsii cons string.
9274 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
9275 __ bind(&allocated);
9276 // Fill the fields of the cons string.
9277 __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
9278 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
9279 Immediate(String::kEmptyHashField));
9280 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
9281 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
9283 __ IncrementCounter(&Counters::string_add_native, 1);
9284 __ ret(2 * kPointerSize);
9285 __ bind(&non_ascii);
9286 // Allocate a two byte cons string.
9287 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
9290 // Handle creating a flat result. First check that both strings are not
9291 // external strings.
9292 // rax: first string
9293 // ebx: length of resulting flat string
9294 // rdx: second string
9295 // r8: instance type of first string
9296 // r9: instance type of first string
9297 __ bind(&string_add_flat_result);
9299 __ and_(rcx, Immediate(kStringRepresentationMask));
9300 __ cmpl(rcx, Immediate(kExternalStringTag));
9301 __ j(equal, &string_add_runtime);
9303 __ and_(rcx, Immediate(kStringRepresentationMask));
9304 __ cmpl(rcx, Immediate(kExternalStringTag));
9305 __ j(equal, &string_add_runtime);
9306 // Now check if both strings are ascii strings.
9307 // rax: first string
9308 // ebx: length of resulting flat string
9309 // rdx: second string
9310 // r8: instance type of first string
9311 // r9: instance type of second string
9312 Label non_ascii_string_add_flat_result;
9313 ASSERT(kStringEncodingMask == kAsciiStringTag);
9314 __ testl(r8, Immediate(kAsciiStringTag));
9315 __ j(zero, &non_ascii_string_add_flat_result);
9316 __ testl(r9, Immediate(kAsciiStringTag));
9317 __ j(zero, &string_add_runtime);
9319 __ bind(&make_flat_ascii_string);
9320 // Both strings are ascii strings. As they are short they are both flat.
9321 __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
9322 // rcx: result string
9324 // Locate first character of result.
9325 __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9326 // Locate first character of first argument
9327 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
9328 __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9329 // rax: first char of first argument
9330 // rbx: result string
9331 // rcx: first character of result
9332 // rdx: second string
9333 // rdi: length of first argument
9334 GenerateCopyCharacters(masm, rcx, rax, rdi, true);
9335 // Locate first character of second argument.
9336 __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
9337 __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9338 // rbx: result string
9339 // rcx: next character of result
9340 // rdx: first char of second argument
9341 // rdi: length of second argument
9342 GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
9344 __ IncrementCounter(&Counters::string_add_native, 1);
9345 __ ret(2 * kPointerSize);
9347 // Handle creating a flat two byte result.
9348 // rax: first string - known to be two byte
9349 // rbx: length of resulting flat string
9350 // rdx: second string
9351 // r8: instance type of first string
9352 // r9: instance type of first string
9353 __ bind(&non_ascii_string_add_flat_result);
9354 __ and_(r9, Immediate(kAsciiStringTag));
9355 __ j(not_zero, &string_add_runtime);
9356 // Both strings are two byte strings. As they are short they are both
9358 __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
9359 // rcx: result string
9361 // Locate first character of result.
9362 __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9363 // Locate first character of first argument.
9364 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
9365 __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9366 // rax: first char of first argument
9367 // rbx: result string
9368 // rcx: first character of result
9369 // rdx: second argument
9370 // rdi: length of first argument
9371 GenerateCopyCharacters(masm, rcx, rax, rdi, false);
9372 // Locate first character of second argument.
9373 __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
9374 __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
9375 // rbx: result string
9376 // rcx: next character of result
9377 // rdx: first char of second argument
9378 // rdi: length of second argument
9379 GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
9381 __ IncrementCounter(&Counters::string_add_native, 1);
9382 __ ret(2 * kPointerSize);
9384 // Just jump to runtime to add the two strings.
9385 __ bind(&string_add_runtime);
9386 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
9390 void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
9397 // This loop just copies one character at a time, as it is only used for very
9400 __ movb(kScratchRegister, Operand(src, 0));
9401 __ movb(Operand(dest, 0), kScratchRegister);
9402 __ addq(src, Immediate(1));
9403 __ addq(dest, Immediate(1));
9405 __ movzxwl(kScratchRegister, Operand(src, 0));
9406 __ movw(Operand(dest, 0), kScratchRegister);
9407 __ addq(src, Immediate(2));
9408 __ addq(dest, Immediate(2));
9410 __ subl(count, Immediate(1));
9411 __ j(not_zero, &loop);
9415 void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
9420 // Copy characters using rep movs of doublewords. Align destination on 4 byte
9421 // boundary before starting rep movs. Copy remaining characters after running
9423 ASSERT(dest.is(rdi)); // rep movs destination
9424 ASSERT(src.is(rsi)); // rep movs source
9425 ASSERT(count.is(rcx)); // rep movs count
9427 // Nothing to do for zero characters.
9429 __ testq(count, count);
9432 // Make count the number of bytes to copy.
9434 ASSERT_EQ(2, sizeof(uc16)); // NOLINT
9435 __ addq(count, count);
9438 // Don't enter the rep movs if there are less than 4 bytes to copy.
9440 __ testq(count, Immediate(~7));
9441 __ j(zero, &last_bytes);
9443 // Copy from edi to esi using rep movs instruction.
9444 __ movq(kScratchRegister, count);
9445 __ sar(count, Immediate(3)); // Number of doublewords to copy.
9448 // Find number of bytes left.
9449 __ movq(count, kScratchRegister);
9450 __ and_(count, Immediate(7));
9452 // Check if there are more bytes to copy.
9453 __ bind(&last_bytes);
9454 __ testq(count, count);
9457 // Copy remaining characters.
9460 __ movb(kScratchRegister, Operand(src, 0));
9461 __ movb(Operand(dest, 0), kScratchRegister);
9462 __ addq(src, Immediate(1));
9463 __ addq(dest, Immediate(1));
9464 __ subq(count, Immediate(1));
9465 __ j(not_zero, &loop);
9470 void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
9478 // Register scratch3 is the general scratch register in this function.
9479 Register scratch = scratch3;
9481 // Make sure that both characters are not digits as such strings has a
9482 // different hash algorithm. Don't try to look for these in the symbol table.
9483 Label not_array_index;
9484 __ movq(scratch, c1);
9485 __ subq(scratch, Immediate(static_cast<int>('0')));
9486 __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
9487 __ j(above, ¬_array_index);
9488 __ movq(scratch, c2);
9489 __ subq(scratch, Immediate(static_cast<int>('0')));
9490 __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
9491 __ j(below_equal, not_found);
9493 __ bind(¬_array_index);
9494 // Calculate the two character string hash.
9495 Register hash = scratch1;
9496 GenerateHashInit(masm, hash, c1, scratch);
9497 GenerateHashAddCharacter(masm, hash, c2, scratch);
9498 GenerateHashGetHash(masm, hash, scratch);
9500 // Collect the two characters in a register.
9501 Register chars = c1;
9502 __ shl(c2, Immediate(kBitsPerByte));
9505 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9506 // hash: hash of two character string.
9508 // Load the symbol table.
9509 Register symbol_table = c2;
9510 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
9512 // Calculate capacity mask from the symbol table capacity.
9513 Register mask = scratch2;
9514 __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
9515 __ SmiToInteger32(mask, mask);
9518 Register undefined = scratch4;
9519 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
9522 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
9523 // hash: hash of two character string (32-bit int)
9524 // symbol_table: symbol table
9525 // mask: capacity mask (32-bit int)
9526 // undefined: undefined value
9529 // Perform a number of probes in the symbol table.
9530 static const int kProbes = 4;
9531 Label found_in_symbol_table;
9532 Label next_probe[kProbes];
9533 for (int i = 0; i < kProbes; i++) {
9534 // Calculate entry in symbol table.
9535 __ movl(scratch, hash);
9537 __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
9539 __ andl(scratch, mask);
9541 // Load the entry from the symble table.
9542 Register candidate = scratch; // Scratch register contains candidate.
9543 ASSERT_EQ(1, SymbolTable::kEntrySize);
9545 FieldOperand(symbol_table,
9548 SymbolTable::kElementsStartOffset));
9550 // If entry is undefined no string with this hash can be found.
9551 __ cmpq(candidate, undefined);
9552 __ j(equal, not_found);
9554 // If length is not 2 the string is not a candidate.
9555 __ cmpl(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
9556 __ j(not_equal, &next_probe[i]);
9558 // We use kScratchRegister as a temporary register in assumption that
9559 // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
9560 Register temp = kScratchRegister;
9562 // Check that the candidate is a non-external ascii string.
9563 __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
9564 __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
9565 __ JumpIfInstanceTypeIsNotSequentialAscii(
9566 temp, temp, &next_probe[i]);
9568 // Check if the two characters match.
9569 __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
9570 __ andl(temp, Immediate(0x0000ffff));
9571 __ cmpl(chars, temp);
9572 __ j(equal, &found_in_symbol_table);
9573 __ bind(&next_probe[i]);
9576 // No matching 2 character string found by probing.
9579 // Scratch register contains result when we fall through to here.
9580 Register result = scratch;
9581 __ bind(&found_in_symbol_table);
9582 if (!result.is(rax)) {
9583 __ movq(rax, result);
9588 void StringStubBase::GenerateHashInit(MacroAssembler* masm,
9592 // hash = character + (character << 10);
9593 __ movl(hash, character);
9594 __ shll(hash, Immediate(10));
9595 __ addl(hash, character);
9596 // hash ^= hash >> 6;
9597 __ movl(scratch, hash);
9598 __ sarl(scratch, Immediate(6));
9599 __ xorl(hash, scratch);
9603 void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
9607 // hash += character;
9608 __ addl(hash, character);
9609 // hash += hash << 10;
9610 __ movl(scratch, hash);
9611 __ shll(scratch, Immediate(10));
9612 __ addl(hash, scratch);
9613 // hash ^= hash >> 6;
9614 __ movl(scratch, hash);
9615 __ sarl(scratch, Immediate(6));
9616 __ xorl(hash, scratch);
9620 void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
9623 // hash += hash << 3;
9624 __ movl(scratch, hash);
9625 __ shll(scratch, Immediate(3));
9626 __ addl(hash, scratch);
9627 // hash ^= hash >> 11;
9628 __ movl(scratch, hash);
9629 __ sarl(scratch, Immediate(11));
9630 __ xorl(hash, scratch);
9631 // hash += hash << 15;
9632 __ movl(scratch, hash);
9633 __ shll(scratch, Immediate(15));
9634 __ addl(hash, scratch);
9636 // if (hash == 0) hash = 27;
9637 Label hash_not_zero;
9638 __ testl(hash, hash);
9639 __ j(not_zero, &hash_not_zero);
9640 __ movl(hash, Immediate(27));
9641 __ bind(&hash_not_zero);
9644 void SubStringStub::Generate(MacroAssembler* masm) {
9647 // Stack frame on entry.
9648 // rsp[0]: return address
9653 const int kToOffset = 1 * kPointerSize;
9654 const int kFromOffset = kToOffset + kPointerSize;
9655 const int kStringOffset = kFromOffset + kPointerSize;
9656 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
9658 // Make sure first argument is a string.
9659 __ movq(rax, Operand(rsp, kStringOffset));
9660 ASSERT_EQ(0, kSmiTag);
9661 __ testl(rax, Immediate(kSmiTagMask));
9662 __ j(zero, &runtime);
9663 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
9664 __ j(NegateCondition(is_string), &runtime);
9667 // rbx: instance type
9668 // Calculate length of sub string using the smi values.
9669 Label result_longer_than_two;
9670 __ movq(rcx, Operand(rsp, kToOffset));
9671 __ movq(rdx, Operand(rsp, kFromOffset));
9672 __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
9674 __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
9675 __ j(negative, &runtime);
9676 // Special handling of sub-strings of length 1 and 2. One character strings
9677 // are handled in the runtime system (looked up in the single character
9678 // cache). Two character strings are looked for in the symbol cache.
9679 __ SmiToInteger32(rcx, rcx);
9680 __ cmpl(rcx, Immediate(2));
9681 __ j(greater, &result_longer_than_two);
9682 __ j(less, &runtime);
9684 // Sub string of length 2 requested.
9686 // rbx: instance type
9687 // rcx: sub string length (value is 2)
9688 // rdx: from index (smi)
9689 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
9691 // Get the two characters forming the sub string.
9692 __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
9693 __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
9695 FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
9697 // Try to lookup two character string in symbol table.
9698 Label make_two_character_string;
9699 GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, rax, rdx, rdi, r14,
9700 &make_two_character_string);
9701 __ ret(3 * kPointerSize);
9703 __ bind(&make_two_character_string);
9704 // Setup registers for allocating the two character string.
9705 __ movq(rax, Operand(rsp, kStringOffset));
9706 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
9707 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
9710 __ bind(&result_longer_than_two);
9713 // rbx: instance type
9714 // rcx: result string length
9715 // Check for flat ascii string
9716 Label non_ascii_flat;
9717 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
9719 // Allocate the result.
9720 __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
9722 // rax: result string
9723 // rcx: result string length
9724 __ movq(rdx, rsi); // esi used by following code.
9725 // Locate first character of result.
9726 __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
9727 // Load string argument and locate character of sub string start.
9728 __ movq(rsi, Operand(rsp, kStringOffset));
9729 __ movq(rbx, Operand(rsp, kFromOffset));
9731 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
9732 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
9733 SeqAsciiString::kHeaderSize - kHeapObjectTag));
9736 // rax: result string
9737 // rcx: result length
9738 // rdx: original value of rsi
9739 // rdi: first character of result
9740 // rsi: character of sub string start
9741 GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
9742 __ movq(rsi, rdx); // Restore rsi.
9743 __ IncrementCounter(&Counters::sub_string_native, 1);
9744 __ ret(kArgumentsSize);
9746 __ bind(&non_ascii_flat);
9748 // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
9749 // rcx: result string length
9750 // Check for sequential two byte string
9751 __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
9752 __ j(not_equal, &runtime);
9754 // Allocate the result.
9755 __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
9757 // rax: result string
9758 // rcx: result string length
9759 __ movq(rdx, rsi); // esi used by following code.
9760 // Locate first character of result.
9761 __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
9762 // Load string argument and locate character of sub string start.
9763 __ movq(rsi, Operand(rsp, kStringOffset));
9764 __ movq(rbx, Operand(rsp, kFromOffset));
9766 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
9767 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
9768 SeqAsciiString::kHeaderSize - kHeapObjectTag));
9771 // rax: result string
9772 // rcx: result length
9773 // rdx: original value of rsi
9774 // rdi: first character of result
9775 // rsi: character of sub string start
9776 GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
9777 __ movq(rsi, rdx); // Restore esi.
9778 __ IncrementCounter(&Counters::sub_string_native, 1);
9779 __ ret(kArgumentsSize);
9781 // Just jump to runtime to create the sub string.
9783 __ TailCallRuntime(Runtime::kSubString, 3, 1);
9787 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
9793 Register scratch4) {
9794 // Ensure that you can always subtract a string length from a non-negative
9795 // number (e.g. another length).
9796 ASSERT(String::kMaxLength < 0x7fffffff);
9798 // Find minimum length and length difference.
9799 __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
9800 __ movl(scratch4, scratch1);
9801 __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
9802 // Register scratch4 now holds left.length - right.length.
9803 const Register length_difference = scratch4;
9805 __ j(less, &left_shorter);
9806 // The right string isn't longer that the left one.
9807 // Get the right string's length by subtracting the (non-negative) difference
9808 // from the left string's length.
9809 __ subl(scratch1, length_difference);
9810 __ bind(&left_shorter);
9811 // Register scratch1 now holds Min(left.length, right.length).
9812 const Register min_length = scratch1;
9814 Label compare_lengths;
9815 // If min-length is zero, go directly to comparing lengths.
9816 __ testl(min_length, min_length);
9817 __ j(zero, &compare_lengths);
9819 // Registers scratch2 and scratch3 are free.
9820 Label result_not_equal;
9823 // Check characters 0 .. min_length - 1 in a loop.
9824 // Use scratch3 as loop index, min_length as limit and scratch2
9826 const Register index = scratch3;
9827 __ movl(index, Immediate(0)); // Index into strings.
9829 // Compare characters.
9830 // TODO(lrn): Could we load more than one character at a time?
9831 __ movb(scratch2, FieldOperand(left,
9834 SeqAsciiString::kHeaderSize));
9835 // Increment index and use -1 modifier on next load to give
9836 // the previous load extra time to complete.
9837 __ addl(index, Immediate(1));
9838 __ cmpb(scratch2, FieldOperand(right,
9841 SeqAsciiString::kHeaderSize - 1));
9842 __ j(not_equal, &result_not_equal);
9843 __ cmpl(index, min_length);
9844 __ j(not_equal, &loop);
9846 // Completed loop without finding different characters.
9847 // Compare lengths (precomputed).
9848 __ bind(&compare_lengths);
9849 __ testl(length_difference, length_difference);
9850 __ j(not_zero, &result_not_equal);
9853 __ Move(rax, Smi::FromInt(EQUAL));
9854 __ ret(2 * kPointerSize);
9856 Label result_greater;
9857 __ bind(&result_not_equal);
9858 // Unequal comparison of left to right, either character or length.
9859 __ j(greater, &result_greater);
9862 __ Move(rax, Smi::FromInt(LESS));
9863 __ ret(2 * kPointerSize);
9865 // Result is GREATER.
9866 __ bind(&result_greater);
9867 __ Move(rax, Smi::FromInt(GREATER));
9868 __ ret(2 * kPointerSize);
9872 void StringCompareStub::Generate(MacroAssembler* masm) {
9875 // Stack frame on entry.
9876 // rsp[0]: return address
9877 // rsp[8]: right string
9878 // rsp[16]: left string
9880 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
9881 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
9883 // Check for identity.
9886 __ j(not_equal, ¬_same);
9887 __ Move(rax, Smi::FromInt(EQUAL));
9888 __ IncrementCounter(&Counters::string_compare_native, 1);
9889 __ ret(2 * kPointerSize);
9893 // Check that both are sequential ASCII strings.
9894 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
9896 // Inline comparison of ascii strings.
9897 __ IncrementCounter(&Counters::string_compare_native, 1);
9898 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
9900 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
9901 // tagged as a small integer.
9903 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
9911 typedef double (*ModuloFunction)(double, double);
9912 // Define custom fmod implementation.
9913 ModuloFunction CreateModuloFunction() {
9915 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
9919 Assembler masm(buffer, static_cast<int>(actual_size));
9920 // Generated code is put into a fixed, unmovable, buffer, and not into
9921 // the V8 heap. We can't, and don't, refer to any relocatable addresses
9922 // (e.g. the JavaScript nan-object).
9924 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
9925 // returns result in xmm0.
9926 // Argument backing space is allocated on the stack above
9927 // the return address.
9930 // Load y and x (use argument backing store as temporary storage).
9931 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
9932 __ movsd(Operand(rsp, kPointerSize), xmm0);
9933 __ fld_d(Operand(rsp, kPointerSize * 2));
9934 __ fld_d(Operand(rsp, kPointerSize));
9936 // Clear exception flags before operation.
9938 Label no_exceptions;
9941 // Clear if Illegal Operand or Zero Division exceptions are set.
9942 __ testb(rax, Immediate(5));
9943 __ j(zero, &no_exceptions);
9945 __ bind(&no_exceptions);
9948 // Compute st(0) % st(1)
9950 Label partial_remainder_loop;
9951 __ bind(&partial_remainder_loop);
9955 __ testl(rax, Immediate(0x400 /* C2 */));
9956 // If C2 is set, computation only has partial result. Loop to
9957 // continue computation.
9958 __ j(not_zero, &partial_remainder_loop);
9962 Label return_result;
9963 // If Invalid Operand or Zero Division exceptions are set,
9965 __ testb(rax, Immediate(5));
9966 __ j(zero, &valid_result);
9967 __ fstp(0); // Drop result in st(0).
9968 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
9969 __ movq(rcx, kNaNValue, RelocInfo::NONE);
9970 __ movq(Operand(rsp, kPointerSize), rcx);
9971 __ movsd(xmm0, Operand(rsp, kPointerSize));
9972 __ jmp(&return_result);
9974 // If result is valid, return that.
9975 __ bind(&valid_result);
9976 __ fstp_d(Operand(rsp, kPointerSize));
9977 __ movsd(xmm0, Operand(rsp, kPointerSize));
9979 // Clean up FPU stack and exceptions and return xmm0
9980 __ bind(&return_result);
9981 __ fstp(0); // Unload y.
9983 Label clear_exceptions;
9984 __ testb(rax, Immediate(0x3f /* Any Exception*/));
9985 __ j(not_zero, &clear_exceptions);
9987 __ bind(&clear_exceptions);
9992 masm.GetCode(&desc);
9993 // Call the function from C++.
9994 return FUNCTION_CAST<ModuloFunction>(buffer);
10002 } } // namespace v8::internal