1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "bootstrapper.h"
31 #include "codegen-inl.h"
36 #include "regexp-macro-assembler.h"
37 #include "register-allocator-inl.h"
39 #include "virtual-frame-inl.h"
44 #define __ ACCESS_MASM(masm_)
46 // -------------------------------------------------------------------------
47 // Platform-specific DeferredCode functions.
49 void DeferredCode::SaveRegisters() {
50 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
51 int action = registers_[i];
52 if (action == kPush) {
53 __ push(RegisterAllocator::ToRegister(i));
54 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
55 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
61 void DeferredCode::RestoreRegisters() {
62 // Restore registers in reverse order due to the stack.
63 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
64 int action = registers_[i];
65 if (action == kPush) {
66 __ pop(RegisterAllocator::ToRegister(i));
67 } else if (action != kIgnore) {
68 action &= ~kSyncedFlag;
69 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
75 // -------------------------------------------------------------------------
76 // CodeGenState implementation.
78 CodeGenState::CodeGenState(CodeGenerator* owner)
82 owner_->set_state(this);
86 CodeGenState::CodeGenState(CodeGenerator* owner,
87 ControlDestination* destination)
89 destination_(destination),
90 previous_(owner->state()) {
91 owner_->set_state(this);
95 CodeGenState::~CodeGenState() {
96 ASSERT(owner_->state() == this);
97 owner_->set_state(previous_);
101 // -------------------------------------------------------------------------
102 // Deferred code objects
104 // These subclasses of DeferredCode add pieces of code to the end of generated
105 // code. They are branched to from the generated code, and
106 // keep some slower code out of the main body of the generated code.
107 // Many of them call a code stub or a runtime function.
109 class DeferredInlineSmiAdd: public DeferredCode {
111 DeferredInlineSmiAdd(Register dst,
113 OverwriteMode overwrite_mode)
114 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
115 set_comment("[ DeferredInlineSmiAdd");
118 virtual void Generate();
123 OverwriteMode overwrite_mode_;
127 // The result of value + src is in dst. It either overflowed or was not
128 // smi tagged. Undo the speculative addition and call the appropriate
129 // specialized stub for add. The result is left in dst.
130 class DeferredInlineSmiAddReversed: public DeferredCode {
132 DeferredInlineSmiAddReversed(Register dst,
134 OverwriteMode overwrite_mode)
135 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
136 set_comment("[ DeferredInlineSmiAddReversed");
139 virtual void Generate();
144 OverwriteMode overwrite_mode_;
148 class DeferredInlineSmiSub: public DeferredCode {
150 DeferredInlineSmiSub(Register dst,
152 OverwriteMode overwrite_mode)
153 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
154 set_comment("[ DeferredInlineSmiSub");
157 virtual void Generate();
162 OverwriteMode overwrite_mode_;
166 // Call the appropriate binary operation stub to compute src op value
167 // and leave the result in dst.
168 class DeferredInlineSmiOperation: public DeferredCode {
170 DeferredInlineSmiOperation(Token::Value op,
174 OverwriteMode overwrite_mode)
179 overwrite_mode_(overwrite_mode) {
180 set_comment("[ DeferredInlineSmiOperation");
183 virtual void Generate();
190 OverwriteMode overwrite_mode_;
194 class FloatingPointHelper : public AllStatic {
196 // Code pattern for loading a floating point value. Input value must
197 // be either a smi or a heap number object (fp value). Requirements:
198 // operand on TOS+1. Returns operand as floating point number on FPU
200 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
202 // Code pattern for loading a floating point value. Input value must
203 // be either a smi or a heap number object (fp value). Requirements:
204 // operand in src register. Returns operand as floating point number
205 // in XMM register. May destroy src register.
206 static void LoadFloatOperand(MacroAssembler* masm,
210 // Code pattern for loading a possible number into a XMM register.
211 // If the contents of src is not a number, control branches to
212 // the Label not_number. If contents of src is a smi or a heap number
213 // object (fp value), it is loaded into the XMM register as a double.
214 // The register src is not changed, and src may not be kScratchRegister.
215 static void LoadFloatOperand(MacroAssembler* masm,
220 // Code pattern for loading floating point values. Input values must
221 // be either smi or heap number objects (fp values). Requirements:
222 // operand_1 in rdx, operand_2 in rax; Returns operands as
223 // floating point numbers in XMM registers.
224 static void LoadFloatOperands(MacroAssembler* masm,
228 // Similar to LoadFloatOperands, assumes that the operands are smis.
229 static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
233 // Code pattern for loading floating point values onto the fp stack.
234 // Input values must be either smi or heap number objects (fp values).
236 // Register version: operands in registers lhs and rhs.
237 // Stack version: operands on TOS+1 and TOS+2.
238 // Returns operands as floating point numbers on fp stack.
239 static void LoadFloatOperands(MacroAssembler* masm,
243 // Test if operands are smi or number objects (fp). Requirements:
244 // operand_1 in rax, operand_2 in rdx; falls through on float or smi
245 // operands, jumps to the non_float label otherwise.
246 static void CheckNumberOperands(MacroAssembler* masm,
249 // Takes the operands in rdx and rax and loads them as integers in rax
251 static void LoadAsIntegers(MacroAssembler* masm,
253 Label* operand_conversion_failure);
257 // -----------------------------------------------------------------------------
258 // CodeGenerator implementation.
260 CodeGenerator::CodeGenerator(MacroAssembler* masm)
268 function_return_is_shadowed_(false),
269 in_spilled_code_(false) {
273 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
274 // Call the runtime to declare the globals. The inevitable call
275 // will sync frame elements to memory anyway, so we do it eagerly to
276 // allow us to push the arguments directly into place.
277 frame_->SyncRange(0, frame_->element_count() - 1);
279 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
280 frame_->EmitPush(rsi); // The context is the first argument.
281 frame_->EmitPush(kScratchRegister);
282 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
283 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
284 // Return value is ignored.
288 void CodeGenerator::Generate(CompilationInfo* info) {
289 // Record the position for debugging purposes.
290 CodeForFunctionPosition(info->function());
291 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
295 ASSERT(allocator_ == NULL);
296 RegisterAllocator register_allocator(this);
297 allocator_ = ®ister_allocator;
298 ASSERT(frame_ == NULL);
299 frame_ = new VirtualFrame();
300 set_in_spilled_code(false);
302 // Adjust for function-level loop nesting.
303 ASSERT_EQ(0, loop_nesting_);
304 loop_nesting_ += info->loop_nesting();
306 JumpTarget::set_compiling_deferred_code(false);
309 if (strlen(FLAG_stop_at) > 0 &&
310 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
316 // New scope to get automatic timing calculation.
317 { HistogramTimerScope codegen_timer(&Counters::code_generation);
318 CodeGenState state(this);
321 // Stack: receiver, arguments, return address.
322 // rbp: caller's frame pointer
323 // rsp: stack pointer
324 // rdi: called JS function
325 // rsi: callee's context
326 allocator_->Initialize();
328 if (info->mode() == CompilationInfo::PRIMARY) {
331 // Allocate space for locals and initialize them.
332 frame_->AllocateStackSlots();
334 // Allocate the local context if needed.
335 int heap_slots = scope()->num_heap_slots();
336 if (heap_slots > 0) {
337 Comment cmnt(masm_, "[ allocate local context");
338 // Allocate local context.
339 // Get outer context and create a new context based on it.
340 frame_->PushFunction();
342 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
343 FastNewContextStub stub(heap_slots);
344 context = frame_->CallStub(&stub, 1);
346 context = frame_->CallRuntime(Runtime::kNewContext, 1);
349 // Update context local.
350 frame_->SaveContextRegister();
352 // Verify that the runtime call result and rsi agree.
353 if (FLAG_debug_code) {
354 __ cmpq(context.reg(), rsi);
355 __ Assert(equal, "Runtime::NewContext should end up in rsi");
359 // TODO(1241774): Improve this code:
360 // 1) only needed if we have a context
361 // 2) no need to recompute context ptr every single time
362 // 3) don't copy parameter operand code from SlotOperand!
364 Comment cmnt2(masm_, "[ copy context parameters into .context");
365 // Note that iteration order is relevant here! If we have the same
366 // parameter twice (e.g., function (x, y, x)), and that parameter
367 // needs to be copied into the context, it must be the last argument
368 // passed to the parameter that needs to be copied. This is a rare
369 // case so we don't check for it, instead we rely on the copying
370 // order: such a parameter is copied repeatedly into the same
371 // context location and thus the last value is what is seen inside
373 for (int i = 0; i < scope()->num_parameters(); i++) {
374 Variable* par = scope()->parameter(i);
375 Slot* slot = par->slot();
376 if (slot != NULL && slot->type() == Slot::CONTEXT) {
377 // The use of SlotOperand below is safe in unspilled code
378 // because the slot is guaranteed to be a context slot.
380 // There are no parameters in the global scope.
381 ASSERT(!scope()->is_global_scope());
382 frame_->PushParameterAt(i);
383 Result value = frame_->Pop();
386 // SlotOperand loads context.reg() with the context object
387 // stored to, used below in RecordWrite.
388 Result context = allocator_->Allocate();
389 ASSERT(context.is_valid());
390 __ movq(SlotOperand(slot, context.reg()), value.reg());
391 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
392 Result scratch = allocator_->Allocate();
393 ASSERT(scratch.is_valid());
394 frame_->Spill(context.reg());
395 frame_->Spill(value.reg());
396 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
401 // Store the arguments object. This must happen after context
402 // initialization because the arguments object may be stored in
404 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
405 StoreArgumentsObject(true);
408 // Initialize ThisFunction reference if present.
409 if (scope()->is_function_scope() && scope()->function() != NULL) {
410 frame_->Push(Factory::the_hole_value());
411 StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
414 // When used as the secondary compiler for splitting, rbp, rsi,
415 // and rdi have been pushed on the stack. Adjust the virtual
416 // frame to match this state.
418 allocator_->Unuse(rdi);
420 // Bind all the bailout labels to the beginning of the function.
421 List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
422 for (int i = 0; i < bailouts->length(); i++) {
423 __ bind(bailouts->at(i)->label());
427 // Initialize the function return target after the locals are set
428 // up, because it needs the expected frame height from the frame.
429 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
430 function_return_is_shadowed_ = false;
432 // Generate code to 'execute' declarations and initialize functions
433 // (source elements). In case of an illegal redeclaration we need to
434 // handle that instead of processing the declarations.
435 if (scope()->HasIllegalRedeclaration()) {
436 Comment cmnt(masm_, "[ illegal redeclarations");
437 scope()->VisitIllegalRedeclaration(this);
439 Comment cmnt(masm_, "[ declarations");
440 ProcessDeclarations(scope()->declarations());
441 // Bail out if a stack-overflow exception occurred when processing
443 if (HasStackOverflow()) return;
447 frame_->CallRuntime(Runtime::kTraceEnter, 0);
448 // Ignore the return value.
452 // Compile the body of the function in a vanilla state. Don't
453 // bother compiling all the code if the scope has an illegal
455 if (!scope()->HasIllegalRedeclaration()) {
456 Comment cmnt(masm_, "[ function body");
458 bool is_builtin = Bootstrapper::IsActive();
460 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
462 frame_->CallRuntime(Runtime::kDebugTrace, 0);
463 // Ignore the return value.
466 VisitStatements(info->function()->body());
468 // Handle the return from the function.
469 if (has_valid_frame()) {
470 // If there is a valid frame, control flow can fall off the end of
471 // the body. In that case there is an implicit return statement.
472 ASSERT(!function_return_is_shadowed_);
473 CodeForReturnPosition(info->function());
474 frame_->PrepareForReturn();
475 Result undefined(Factory::undefined_value());
476 if (function_return_.is_bound()) {
477 function_return_.Jump(&undefined);
479 function_return_.Bind(&undefined);
480 GenerateReturnSequence(&undefined);
482 } else if (function_return_.is_linked()) {
483 // If the return target has dangling jumps to it, then we have not
484 // yet generated the return sequence. This can happen when (a)
485 // control does not flow off the end of the body so we did not
486 // compile an artificial return statement just above, and (b) there
487 // are return statements in the body but (c) they are all shadowed.
489 function_return_.Bind(&return_value);
490 GenerateReturnSequence(&return_value);
495 // Adjust for function-level loop nesting.
496 ASSERT_EQ(loop_nesting_, info->loop_nesting());
499 // Code generation state must be reset.
500 ASSERT(state_ == NULL);
501 ASSERT(!function_return_is_shadowed_);
502 function_return_.Unuse();
505 // Process any deferred code using the register allocator.
506 if (!HasStackOverflow()) {
507 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
508 JumpTarget::set_compiling_deferred_code(true);
510 JumpTarget::set_compiling_deferred_code(false);
513 // There is no need to delete the register allocator, it is a
514 // stack-allocated local.
518 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
519 // The return value is a live (but not currently reference counted)
520 // reference to rax. This is safe because the current frame does not
521 // contain a reference to rax (it is prepared for the return by spilling
524 frame_->Push(return_value);
525 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
527 return_value->ToRegister(rax);
529 // Add a label for checking the size of the code used for returning.
531 Label check_exit_codesize;
532 masm_->bind(&check_exit_codesize);
535 // Leave the frame and return popping the arguments and the
538 masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
539 #ifdef ENABLE_DEBUGGER_SUPPORT
540 // Add padding that will be overwritten by a debugger breakpoint.
541 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
542 // with length 7 (3 + 1 + 3).
543 const int kPadding = Assembler::kJSReturnSequenceLength - 7;
544 for (int i = 0; i < kPadding; ++i) {
547 // Check that the size of the code used for returning matches what is
548 // expected by the debugger.
549 ASSERT_EQ(Assembler::kJSReturnSequenceLength,
550 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
557 bool CodeGenerator::HasValidEntryRegisters() {
558 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
559 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
560 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
561 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
562 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
563 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
564 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
565 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
566 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
567 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
568 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
573 class DeferredReferenceGetKeyedValue: public DeferredCode {
575 explicit DeferredReferenceGetKeyedValue(Register dst,
579 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
580 set_comment("[ DeferredReferenceGetKeyedValue");
583 virtual void Generate();
585 Label* patch_site() { return &patch_site_; }
596 void DeferredReferenceGetKeyedValue::Generate() {
597 __ push(receiver_); // First IC argument.
598 __ push(key_); // Second IC argument.
600 // Calculate the delta from the IC call instruction to the map check
601 // movq instruction in the inlined version. This delta is stored in
602 // a test(rax, delta) instruction after the call so that we can find
603 // it in the IC initialization code and patch the movq instruction.
604 // This means that we cannot allow test instructions after calls to
605 // KeyedLoadIC stubs in other places.
606 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
607 RelocInfo::Mode mode = is_global_
608 ? RelocInfo::CODE_TARGET_CONTEXT
609 : RelocInfo::CODE_TARGET;
611 // The delta from the start of the map-compare instruction to the
612 // test instruction. We use masm_-> directly here instead of the __
613 // macro because the macro sometimes uses macro expansion to turn
614 // into something that can't return a value. This is encountered
615 // when doing generated code coverage tests.
616 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
617 // Here we use masm_-> instead of the __ macro because this is the
618 // instruction that gets patched and coverage code gets in the way.
619 // TODO(X64): Consider whether it's worth switching the test to a
620 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
621 // be generated normally.
622 masm_->testl(rax, Immediate(-delta_to_patch_site));
623 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
625 if (!dst_.is(rax)) __ movq(dst_, rax);
631 class DeferredReferenceSetKeyedValue: public DeferredCode {
633 DeferredReferenceSetKeyedValue(Register value,
636 : value_(value), key_(key), receiver_(receiver) {
637 set_comment("[ DeferredReferenceSetKeyedValue");
640 virtual void Generate();
642 Label* patch_site() { return &patch_site_; }
652 void DeferredReferenceSetKeyedValue::Generate() {
653 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
654 // Push receiver and key arguments on the stack.
657 // Move value argument to eax as expected by the IC stub.
658 if (!value_.is(rax)) __ movq(rax, value_);
660 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
661 __ Call(ic, RelocInfo::CODE_TARGET);
662 // The delta from the start of the map-compare instructions (initial movq)
663 // to the test instruction. We use masm_-> directly here instead of the
664 // __ macro because the macro sometimes uses macro expansion to turn
665 // into something that can't return a value. This is encountered
666 // when doing generated code coverage tests.
667 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
668 // Here we use masm_-> instead of the __ macro because this is the
669 // instruction that gets patched and coverage code gets in the way.
670 masm_->testl(rax, Immediate(-delta_to_patch_site));
671 // Restore value (returned from store IC), key and receiver
673 if (!value_.is(rax)) __ movq(value_, rax);
679 void CodeGenerator::CallApplyLazy(Expression* applicand,
680 Expression* receiver,
681 VariableProxy* arguments,
683 // An optimized implementation of expressions of the form
684 // x.apply(y, arguments).
685 // If the arguments object of the scope has not been allocated,
686 // and x.apply is Function.prototype.apply, this optimization
687 // just copies y and the arguments of the current function on the
688 // stack, as receiver and arguments, and calls x.
689 // In the implementation comments, we call x the applicand
690 // and y the receiver.
691 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
692 ASSERT(arguments->IsArguments());
694 // Load applicand.apply onto the stack. This will usually
695 // give us a megamorphic load site. Not super, but it works.
697 Handle<String> name = Factory::LookupAsciiSymbol("apply");
699 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
701 frame()->Push(&answer);
703 // Load the receiver and the existing arguments object onto the
704 // expression stack. Avoid allocating the arguments object here.
706 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
708 // Emit the source position information after having loaded the
709 // receiver and the arguments.
710 CodeForSourcePosition(position);
711 // Contents of frame at this point:
712 // Frame[0]: arguments object of the current function or the hole.
713 // Frame[1]: receiver
714 // Frame[2]: applicand.apply
715 // Frame[3]: applicand.
717 // Check if the arguments object has been lazily allocated
718 // already. If so, just use that instead of copying the arguments
719 // from the stack. This also deals with cases where a local variable
720 // named 'arguments' has been introduced.
722 Result probe = frame_->Pop();
723 { VirtualFrame::SpilledScope spilled_scope;
725 bool try_lazy = true;
726 if (probe.is_constant()) {
727 try_lazy = probe.handle()->IsTheHole();
729 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
731 __ j(not_equal, &slow);
736 // Get rid of the arguments object probe.
737 frame_->Drop(); // Can be called on a spilled frame.
738 // Stack now has 3 elements on it.
739 // Contents of stack at this point:
741 // rsp[1]: applicand.apply
742 // rsp[2]: applicand.
744 // Check that the receiver really is a JavaScript object.
745 __ movq(rax, Operand(rsp, 0));
746 Condition is_smi = masm_->CheckSmi(rax);
747 __ j(is_smi, &build_args);
748 // We allow all JSObjects including JSFunctions. As long as
749 // JS_FUNCTION_TYPE is the last instance type and it is right
750 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
752 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
753 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
754 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
755 __ j(below, &build_args);
757 // Check that applicand.apply is Function.prototype.apply.
758 __ movq(rax, Operand(rsp, kPointerSize));
759 is_smi = masm_->CheckSmi(rax);
760 __ j(is_smi, &build_args);
761 __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
762 __ j(not_equal, &build_args);
763 __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
764 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
765 __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
766 __ j(not_equal, &build_args);
768 // Check that applicand is a function.
769 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
770 is_smi = masm_->CheckSmi(rdi);
771 __ j(is_smi, &build_args);
772 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
773 __ j(not_equal, &build_args);
775 // Copy the arguments to this function possibly from the
776 // adaptor frame below it.
777 Label invoke, adapted;
778 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
779 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
780 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
781 __ j(equal, &adapted);
783 // No arguments adaptor frame. Copy fixed number of arguments.
784 __ movq(rax, Immediate(scope()->num_parameters()));
785 for (int i = 0; i < scope()->num_parameters(); i++) {
786 __ push(frame_->ParameterAt(i));
790 // Arguments adaptor frame present. Copy arguments from there, but
791 // avoid copying too many arguments to avoid stack overflows.
793 static const uint32_t kArgumentsLimit = 1 * KB;
794 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
795 __ SmiToInteger32(rax, rax);
797 __ cmpq(rax, Immediate(kArgumentsLimit));
798 __ j(above, &build_args);
800 // Loop through the arguments pushing them onto the execution
801 // stack. We don't inform the virtual frame of the push, so we don't
802 // have to worry about getting rid of the elements from the virtual
805 // rcx is a small non-negative integer, due to the test above.
809 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
811 __ j(not_zero, &loop);
813 // Invoke the function.
815 ParameterCount actual(rax);
816 __ InvokeFunction(rdi, actual, CALL_FUNCTION);
817 // Drop applicand.apply and applicand from the stack, and push
818 // the result of the function call, but leave the spilled frame
819 // unchanged, with 3 elements, so it is correct when we compile the
821 __ addq(rsp, Immediate(2 * kPointerSize));
823 // Stack now has 1 element:
827 // Slow-case: Allocate the arguments object since we know it isn't
828 // there, and fall-through to the slow-case where we call
830 __ bind(&build_args);
831 // Stack now has 3 elements, because we have jumped from where:
833 // rsp[1]: applicand.apply
834 // rsp[2]: applicand.
836 // StoreArgumentsObject requires a correct frame, and may modify it.
837 Result arguments_object = StoreArgumentsObject(false);
839 arguments_object.ToRegister();
840 frame_->EmitPush(arguments_object.reg());
841 arguments_object.Unuse();
842 // Stack and frame now have 4 elements.
846 // Generic computation of x.apply(y, args) with no special optimization.
847 // Flip applicand.apply and applicand on the stack, so
848 // applicand looks like the receiver of the applicand.apply call.
849 // Then process it as a normal function call.
850 __ movq(rax, Operand(rsp, 3 * kPointerSize));
851 __ movq(rbx, Operand(rsp, 2 * kPointerSize));
852 __ movq(Operand(rsp, 2 * kPointerSize), rax);
853 __ movq(Operand(rsp, 3 * kPointerSize), rbx);
855 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
856 Result res = frame_->CallStub(&call_function, 3);
857 // The function and its two arguments have been dropped.
858 frame_->Drop(1); // Drop the receiver as well.
860 frame_->EmitPush(res.reg());
861 // Stack now has 1 element:
863 if (try_lazy) __ bind(&done);
864 } // End of spilled scope.
865 // Restore the context register after a call.
866 frame_->RestoreContextRegister();
870 class DeferredStackCheck: public DeferredCode {
872 DeferredStackCheck() {
873 set_comment("[ DeferredStackCheck");
876 virtual void Generate();
880 void DeferredStackCheck::Generate() {
886 void CodeGenerator::CheckStack() {
887 DeferredStackCheck* deferred = new DeferredStackCheck;
888 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
889 deferred->Branch(below);
890 deferred->BindExit();
894 void CodeGenerator::VisitAndSpill(Statement* statement) {
895 // TODO(X64): No architecture specific code. Move to shared location.
896 ASSERT(in_spilled_code());
897 set_in_spilled_code(false);
899 if (frame_ != NULL) {
902 set_in_spilled_code(true);
906 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
907 ASSERT(in_spilled_code());
908 set_in_spilled_code(false);
909 VisitStatements(statements);
910 if (frame_ != NULL) {
913 set_in_spilled_code(true);
917 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
918 ASSERT(!in_spilled_code());
919 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
920 Visit(statements->at(i));
925 void CodeGenerator::VisitBlock(Block* node) {
926 ASSERT(!in_spilled_code());
927 Comment cmnt(masm_, "[ Block");
928 CodeForStatementPosition(node);
929 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
930 VisitStatements(node->statements());
931 if (node->break_target()->is_linked()) {
932 node->break_target()->Bind();
934 node->break_target()->Unuse();
938 void CodeGenerator::VisitDeclaration(Declaration* node) {
939 Comment cmnt(masm_, "[ Declaration");
940 Variable* var = node->proxy()->var();
941 ASSERT(var != NULL); // must have been resolved
942 Slot* slot = var->slot();
944 // If it was not possible to allocate the variable at compile time,
945 // we need to "declare" it at runtime to make sure it actually
946 // exists in the local context.
947 if (slot != NULL && slot->type() == Slot::LOOKUP) {
948 // Variables with a "LOOKUP" slot were introduced as non-locals
949 // during variable resolution and must have mode DYNAMIC.
950 ASSERT(var->is_dynamic());
951 // For now, just do a runtime call. Sync the virtual frame eagerly
952 // so we can simply push the arguments into place.
953 frame_->SyncRange(0, frame_->element_count() - 1);
954 frame_->EmitPush(rsi);
955 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
956 frame_->EmitPush(kScratchRegister);
957 // Declaration nodes are always introduced in one of two modes.
958 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
959 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
960 frame_->EmitPush(Smi::FromInt(attr));
961 // Push initial value, if any.
962 // Note: For variables we must not push an initial value (such as
963 // 'undefined') because we may have a (legal) redeclaration and we
964 // must not destroy the current value.
965 if (node->mode() == Variable::CONST) {
966 frame_->EmitPush(Heap::kTheHoleValueRootIndex);
967 } else if (node->fun() != NULL) {
970 frame_->EmitPush(Smi::FromInt(0)); // no initial value!
972 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
973 // Ignore the return value (declarations are statements).
977 ASSERT(!var->is_global());
979 // If we have a function or a constant, we need to initialize the variable.
980 Expression* val = NULL;
981 if (node->mode() == Variable::CONST) {
982 val = new Literal(Factory::the_hole_value());
984 val = node->fun(); // NULL if we don't have a function
989 // Set the initial value.
990 Reference target(this, node->proxy());
992 target.SetValue(NOT_CONST_INIT);
993 // The reference is removed from the stack (preserving TOS) when
994 // it goes out of scope.
996 // Get rid of the assigned value (declarations are statements).
1002 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1003 ASSERT(!in_spilled_code());
1004 Comment cmnt(masm_, "[ ExpressionStatement");
1005 CodeForStatementPosition(node);
1006 Expression* expression = node->expression();
1007 expression->MarkAsStatement();
1009 // Remove the lingering expression result from the top of stack.
1014 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1015 ASSERT(!in_spilled_code());
1016 Comment cmnt(masm_, "// EmptyStatement");
1017 CodeForStatementPosition(node);
1022 void CodeGenerator::VisitIfStatement(IfStatement* node) {
1023 ASSERT(!in_spilled_code());
1024 Comment cmnt(masm_, "[ IfStatement");
1025 // Generate different code depending on which parts of the if statement
1026 // are present or not.
1027 bool has_then_stm = node->HasThenStatement();
1028 bool has_else_stm = node->HasElseStatement();
1030 CodeForStatementPosition(node);
1032 if (has_then_stm && has_else_stm) {
1035 ControlDestination dest(&then, &else_, true);
1036 LoadCondition(node->condition(), &dest, true);
1038 if (dest.false_was_fall_through()) {
1039 // The else target was bound, so we compile the else part first.
1040 Visit(node->else_statement());
1042 // We may have dangling jumps to the then part.
1043 if (then.is_linked()) {
1044 if (has_valid_frame()) exit.Jump();
1046 Visit(node->then_statement());
1049 // The then target was bound, so we compile the then part first.
1050 Visit(node->then_statement());
1052 if (else_.is_linked()) {
1053 if (has_valid_frame()) exit.Jump();
1055 Visit(node->else_statement());
1059 } else if (has_then_stm) {
1060 ASSERT(!has_else_stm);
1062 ControlDestination dest(&then, &exit, true);
1063 LoadCondition(node->condition(), &dest, true);
1065 if (dest.false_was_fall_through()) {
1066 // The exit label was bound. We may have dangling jumps to the
1068 if (then.is_linked()) {
1072 Visit(node->then_statement());
1075 // The then label was bound.
1076 Visit(node->then_statement());
1079 } else if (has_else_stm) {
1080 ASSERT(!has_then_stm);
1082 ControlDestination dest(&exit, &else_, false);
1083 LoadCondition(node->condition(), &dest, true);
1085 if (dest.true_was_fall_through()) {
1086 // The exit label was bound. We may have dangling jumps to the
1088 if (else_.is_linked()) {
1092 Visit(node->else_statement());
1095 // The else label was bound.
1096 Visit(node->else_statement());
1100 ASSERT(!has_then_stm && !has_else_stm);
1101 // We only care about the condition's side effects (not its value
1102 // or control flow effect). LoadCondition is called without
1103 // forcing control flow.
1104 ControlDestination dest(&exit, &exit, true);
1105 LoadCondition(node->condition(), &dest, false);
1106 if (!dest.is_used()) {
1107 // We got a value on the frame rather than (or in addition to)
1113 if (exit.is_linked()) {
1119 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1120 ASSERT(!in_spilled_code());
1121 Comment cmnt(masm_, "[ ContinueStatement");
1122 CodeForStatementPosition(node);
1123 node->target()->continue_target()->Jump();
1127 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1128 ASSERT(!in_spilled_code());
1129 Comment cmnt(masm_, "[ BreakStatement");
1130 CodeForStatementPosition(node);
1131 node->target()->break_target()->Jump();
1135 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1136 ASSERT(!in_spilled_code());
1137 Comment cmnt(masm_, "[ ReturnStatement");
1139 CodeForStatementPosition(node);
1140 Load(node->expression());
1141 Result return_value = frame_->Pop();
1142 if (function_return_is_shadowed_) {
1143 function_return_.Jump(&return_value);
1145 frame_->PrepareForReturn();
1146 if (function_return_.is_bound()) {
1147 // If the function return label is already bound we reuse the
1148 // code by jumping to the return site.
1149 function_return_.Jump(&return_value);
1151 function_return_.Bind(&return_value);
1152 GenerateReturnSequence(&return_value);
1158 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1159 ASSERT(!in_spilled_code());
1160 Comment cmnt(masm_, "[ WithEnterStatement");
1161 CodeForStatementPosition(node);
1162 Load(node->expression());
1164 if (node->is_catch_block()) {
1165 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1167 context = frame_->CallRuntime(Runtime::kPushContext, 1);
1170 // Update context local.
1171 frame_->SaveContextRegister();
1173 // Verify that the runtime call result and rsi agree.
1174 if (FLAG_debug_code) {
1175 __ cmpq(context.reg(), rsi);
1176 __ Assert(equal, "Runtime::NewContext should end up in rsi");
1181 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1182 ASSERT(!in_spilled_code());
1183 Comment cmnt(masm_, "[ WithExitStatement");
1184 CodeForStatementPosition(node);
1186 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1187 // Update context local.
1188 frame_->SaveContextRegister();
1192 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1193 // TODO(X64): This code is completely generic and should be moved somewhere
1194 // where it can be shared between architectures.
1195 ASSERT(!in_spilled_code());
1196 Comment cmnt(masm_, "[ SwitchStatement");
1197 CodeForStatementPosition(node);
1198 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1200 // Compile the switch value.
1203 ZoneList<CaseClause*>* cases = node->cases();
1204 int length = cases->length();
1205 CaseClause* default_clause = NULL;
1207 JumpTarget next_test;
1208 // Compile the case label expressions and comparisons. Exit early
1209 // if a comparison is unconditionally true. The target next_test is
1210 // bound before the loop in order to indicate control flow to the
1211 // first comparison.
1213 for (int i = 0; i < length && !next_test.is_unused(); i++) {
1214 CaseClause* clause = cases->at(i);
1215 // The default is not a test, but remember it for later.
1216 if (clause->is_default()) {
1217 default_clause = clause;
1221 Comment cmnt(masm_, "[ Case comparison");
1222 // We recycle the same target next_test for each test. Bind it if
1223 // the previous test has not done so and then unuse it for the
1225 if (next_test.is_linked()) {
1230 // Duplicate the switch value.
1233 // Compile the label expression.
1234 Load(clause->label());
1236 // Compare and branch to the body if true or the next test if
1237 // false. Prefer the next test as a fall through.
1238 ControlDestination dest(clause->body_target(), &next_test, false);
1239 Comparison(node, equal, true, &dest);
1241 // If the comparison fell through to the true target, jump to the
1243 if (dest.true_was_fall_through()) {
1244 clause->body_target()->Unuse();
1245 clause->body_target()->Jump();
1249 // If there was control flow to a next test from the last one
1250 // compiled, compile a jump to the default or break target.
1251 if (!next_test.is_unused()) {
1252 if (next_test.is_linked()) {
1255 // Drop the switch value.
1257 if (default_clause != NULL) {
1258 default_clause->body_target()->Jump();
1260 node->break_target()->Jump();
1264 // The last instruction emitted was a jump, either to the default
1265 // clause or the break target, or else to a case body from the loop
1266 // that compiles the tests.
1267 ASSERT(!has_valid_frame());
1268 // Compile case bodies as needed.
1269 for (int i = 0; i < length; i++) {
1270 CaseClause* clause = cases->at(i);
1272 // There are two ways to reach the body: from the corresponding
1273 // test or as the fall through of the previous body.
1274 if (clause->body_target()->is_linked() || has_valid_frame()) {
1275 if (clause->body_target()->is_linked()) {
1276 if (has_valid_frame()) {
1277 // If we have both a jump to the test and a fall through, put
1278 // a jump on the fall through path to avoid the dropping of
1279 // the switch value on the test path. The exception is the
1280 // default which has already had the switch value dropped.
1281 if (clause->is_default()) {
1282 clause->body_target()->Bind();
1286 clause->body_target()->Bind();
1291 // No fall through to worry about.
1292 clause->body_target()->Bind();
1293 if (!clause->is_default()) {
1298 // Otherwise, we have only fall through.
1299 ASSERT(has_valid_frame());
1302 // We are now prepared to compile the body.
1303 Comment cmnt(masm_, "[ Case body");
1304 VisitStatements(clause->statements());
1306 clause->body_target()->Unuse();
1309 // We may not have a valid frame here so bind the break target only
1311 if (node->break_target()->is_linked()) {
1312 node->break_target()->Bind();
1314 node->break_target()->Unuse();
1318 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
1319 ASSERT(!in_spilled_code());
1320 Comment cmnt(masm_, "[ DoWhileStatement");
1321 CodeForStatementPosition(node);
1322 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1323 JumpTarget body(JumpTarget::BIDIRECTIONAL);
1324 IncrementLoopNesting();
1326 ConditionAnalysis info = AnalyzeCondition(node->cond());
1327 // Label the top of the loop for the backward jump if necessary.
1330 // Use the continue target.
1331 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1332 node->continue_target()->Bind();
1335 // No need to label it.
1336 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1339 // Continue is the test, so use the backward body target.
1340 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1345 CheckStack(); // TODO(1222600): ignore if body contains calls.
1346 Visit(node->body());
1348 // Compile the test.
1351 // If control flow can fall off the end of the body, jump back
1352 // to the top and bind the break target at the exit.
1353 if (has_valid_frame()) {
1354 node->continue_target()->Jump();
1356 if (node->break_target()->is_linked()) {
1357 node->break_target()->Bind();
1361 // We may have had continues or breaks in the body.
1362 if (node->continue_target()->is_linked()) {
1363 node->continue_target()->Bind();
1365 if (node->break_target()->is_linked()) {
1366 node->break_target()->Bind();
1370 // We have to compile the test expression if it can be reached by
1371 // control flow falling out of the body or via continue.
1372 if (node->continue_target()->is_linked()) {
1373 node->continue_target()->Bind();
1375 if (has_valid_frame()) {
1376 Comment cmnt(masm_, "[ DoWhileCondition");
1377 CodeForDoWhileConditionPosition(node);
1378 ControlDestination dest(&body, node->break_target(), false);
1379 LoadCondition(node->cond(), &dest, true);
1381 if (node->break_target()->is_linked()) {
1382 node->break_target()->Bind();
1387 DecrementLoopNesting();
1388 node->continue_target()->Unuse();
1389 node->break_target()->Unuse();
1393 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1394 ASSERT(!in_spilled_code());
1395 Comment cmnt(masm_, "[ WhileStatement");
1396 CodeForStatementPosition(node);
1398 // If the condition is always false and has no side effects, we do not
1399 // need to compile anything.
1400 ConditionAnalysis info = AnalyzeCondition(node->cond());
1401 if (info == ALWAYS_FALSE) return;
1403 // Do not duplicate conditions that may have function literal
1404 // subexpressions. This can cause us to compile the function literal
1406 bool test_at_bottom = !node->may_have_function_literal();
1407 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1408 IncrementLoopNesting();
1410 if (test_at_bottom) {
1411 body.set_direction(JumpTarget::BIDIRECTIONAL);
1414 // Based on the condition analysis, compile the test as necessary.
1417 // We will not compile the test expression. Label the top of the
1418 // loop with the continue target.
1419 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1420 node->continue_target()->Bind();
1423 if (test_at_bottom) {
1424 // Continue is the test at the bottom, no need to label the test
1425 // at the top. The body is a backward target.
1426 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1428 // Label the test at the top as the continue target. The body
1429 // is a forward-only target.
1430 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1431 node->continue_target()->Bind();
1433 // Compile the test with the body as the true target and preferred
1434 // fall-through and with the break target as the false target.
1435 ControlDestination dest(&body, node->break_target(), true);
1436 LoadCondition(node->cond(), &dest, true);
1438 if (dest.false_was_fall_through()) {
1439 // If we got the break target as fall-through, the test may have
1440 // been unconditionally false (if there are no jumps to the
1442 if (!body.is_linked()) {
1443 DecrementLoopNesting();
1447 // Otherwise, jump around the body on the fall through and then
1448 // bind the body target.
1449 node->break_target()->Unuse();
1450 node->break_target()->Jump();
1460 CheckStack(); // TODO(1222600): ignore if body contains calls.
1461 Visit(node->body());
1463 // Based on the condition analysis, compile the backward jump as
1467 // The loop body has been labeled with the continue target.
1468 if (has_valid_frame()) {
1469 node->continue_target()->Jump();
1473 if (test_at_bottom) {
1474 // If we have chosen to recompile the test at the bottom,
1475 // then it is the continue target.
1476 if (node->continue_target()->is_linked()) {
1477 node->continue_target()->Bind();
1479 if (has_valid_frame()) {
1480 // The break target is the fall-through (body is a backward
1481 // jump from here and thus an invalid fall-through).
1482 ControlDestination dest(&body, node->break_target(), false);
1483 LoadCondition(node->cond(), &dest, true);
1486 // If we have chosen not to recompile the test at the
1487 // bottom, jump back to the one at the top.
1488 if (has_valid_frame()) {
1489 node->continue_target()->Jump();
1498 // The break target may be already bound (by the condition), or there
1499 // may not be a valid frame. Bind it only if needed.
1500 if (node->break_target()->is_linked()) {
1501 node->break_target()->Bind();
1503 DecrementLoopNesting();
1507 void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
1508 ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
1509 if (slot->type() == Slot::LOCAL) {
1510 frame_->SetTypeForLocalAt(slot->index(), info);
1512 frame_->SetTypeForParamAt(slot->index(), info);
1514 if (FLAG_debug_code && info.IsSmi()) {
1515 if (slot->type() == Slot::LOCAL) {
1516 frame_->PushLocalAt(slot->index());
1518 frame_->PushParameterAt(slot->index());
1520 Result var = frame_->Pop();
1522 __ AbortIfNotSmi(var.reg(), "Non-smi value in smi-typed stack slot.");
1527 void CodeGenerator::VisitForStatement(ForStatement* node) {
1528 ASSERT(!in_spilled_code());
1529 Comment cmnt(masm_, "[ ForStatement");
1530 CodeForStatementPosition(node);
1532 // Compile the init expression if present.
1533 if (node->init() != NULL) {
1534 Visit(node->init());
1537 // If the condition is always false and has no side effects, we do not
1538 // need to compile anything else.
1539 ConditionAnalysis info = AnalyzeCondition(node->cond());
1540 if (info == ALWAYS_FALSE) return;
1542 // Do not duplicate conditions that may have function literal
1543 // subexpressions. This can cause us to compile the function literal
1545 bool test_at_bottom = !node->may_have_function_literal();
1546 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1547 IncrementLoopNesting();
1549 // Target for backward edge if no test at the bottom, otherwise
1551 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1553 // Target for backward edge if there is a test at the bottom,
1554 // otherwise used as target for test at the top.
1556 if (test_at_bottom) {
1557 body.set_direction(JumpTarget::BIDIRECTIONAL);
1560 // Based on the condition analysis, compile the test as necessary.
1563 // We will not compile the test expression. Label the top of the
1565 if (node->next() == NULL) {
1566 // Use the continue target if there is no update expression.
1567 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1568 node->continue_target()->Bind();
1570 // Otherwise use the backward loop target.
1571 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1576 if (test_at_bottom) {
1577 // Continue is either the update expression or the test at the
1578 // bottom, no need to label the test at the top.
1579 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1580 } else if (node->next() == NULL) {
1581 // We are not recompiling the test at the bottom and there is no
1582 // update expression.
1583 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1584 node->continue_target()->Bind();
1586 // We are not recompiling the test at the bottom and there is an
1587 // update expression.
1588 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1592 // Compile the test with the body as the true target and preferred
1593 // fall-through and with the break target as the false target.
1594 ControlDestination dest(&body, node->break_target(), true);
1595 LoadCondition(node->cond(), &dest, true);
1597 if (dest.false_was_fall_through()) {
1598 // If we got the break target as fall-through, the test may have
1599 // been unconditionally false (if there are no jumps to the
1601 if (!body.is_linked()) {
1602 DecrementLoopNesting();
1606 // Otherwise, jump around the body on the fall through and then
1607 // bind the body target.
1608 node->break_target()->Unuse();
1609 node->break_target()->Jump();
1619 CheckStack(); // TODO(1222600): ignore if body contains calls.
1621 // We know that the loop index is a smi if it is not modified in the
1622 // loop body and it is checked against a constant limit in the loop
1623 // condition. In this case, we reset the static type information of the
1624 // loop index to smi before compiling the body, the update expression, and
1625 // the bottom check of the loop condition.
1626 if (node->is_fast_smi_loop()) {
1627 // Set number type of the loop variable to smi.
1628 SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
1631 Visit(node->body());
1633 // If there is an update expression, compile it if necessary.
1634 if (node->next() != NULL) {
1635 if (node->continue_target()->is_linked()) {
1636 node->continue_target()->Bind();
1639 // Control can reach the update by falling out of the body or by a
1641 if (has_valid_frame()) {
1642 // Record the source position of the statement as this code which
1643 // is after the code for the body actually belongs to the loop
1644 // statement and not the body.
1645 CodeForStatementPosition(node);
1646 Visit(node->next());
1650 // Set the type of the loop variable to smi before compiling the test
1651 // expression if we are in a fast smi loop condition.
1652 if (node->is_fast_smi_loop() && has_valid_frame()) {
1653 // Set number type of the loop variable to smi.
1654 SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
1657 // Based on the condition analysis, compile the backward jump as
1661 if (has_valid_frame()) {
1662 if (node->next() == NULL) {
1663 node->continue_target()->Jump();
1670 if (test_at_bottom) {
1671 if (node->continue_target()->is_linked()) {
1672 // We can have dangling jumps to the continue target if there
1673 // was no update expression.
1674 node->continue_target()->Bind();
1676 // Control can reach the test at the bottom by falling out of
1677 // the body, by a continue in the body, or from the update
1679 if (has_valid_frame()) {
1680 // The break target is the fall-through (body is a backward
1682 ControlDestination dest(&body, node->break_target(), false);
1683 LoadCondition(node->cond(), &dest, true);
1686 // Otherwise, jump back to the test at the top.
1687 if (has_valid_frame()) {
1688 if (node->next() == NULL) {
1689 node->continue_target()->Jump();
1701 // The break target may be already bound (by the condition), or there
1702 // may not be a valid frame. Bind it only if needed.
1703 if (node->break_target()->is_linked()) {
1704 node->break_target()->Bind();
1706 DecrementLoopNesting();
1710 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1711 ASSERT(!in_spilled_code());
1712 VirtualFrame::SpilledScope spilled_scope;
1713 Comment cmnt(masm_, "[ ForInStatement");
1714 CodeForStatementPosition(node);
1716 JumpTarget primitive;
1717 JumpTarget jsobject;
1718 JumpTarget fixed_array;
1719 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1720 JumpTarget end_del_check;
1723 // Get the object to enumerate over (converted to JSObject).
1724 LoadAndSpill(node->enumerable());
1726 // Both SpiderMonkey and kjs ignore null and undefined in contrast
1727 // to the specification. 12.6.4 mandates a call to ToObject.
1728 frame_->EmitPop(rax);
1730 // rax: value to be iterated over
1731 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1733 __ CompareRoot(rax, Heap::kNullValueRootIndex);
1736 // Stack layout in body:
1737 // [iteration counter (smi)] <- slot 0
1738 // [length of array] <- slot 1
1739 // [FixedArray] <- slot 2
1740 // [Map or 0] <- slot 3
1741 // [Object] <- slot 4
1743 // Check if enumerable is already a JSObject
1744 // rax: value to be iterated over
1745 Condition is_smi = masm_->CheckSmi(rax);
1746 primitive.Branch(is_smi);
1747 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1748 jsobject.Branch(above_equal);
1751 frame_->EmitPush(rax);
1752 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1753 // function call returns the value in rax, which is where we want it below
1756 // Get the set of properties (as a FixedArray or Map).
1757 // rax: value to be iterated over
1758 frame_->EmitPush(rax); // Push the object being iterated over.
1761 // Check cache validity in generated code. This is a fast case for
1762 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1763 // guarantee cache validity, call the runtime system to check cache
1764 // validity or get the property names in a fixed array.
1765 JumpTarget call_runtime;
1766 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1767 JumpTarget check_prototype;
1768 JumpTarget use_cache;
1771 // Check that there are no elements.
1772 __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
1773 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1774 call_runtime.Branch(not_equal);
1775 // Check that instance descriptors are not empty so that we can
1776 // check for an enum cache. Leave the map in ebx for the subsequent
1778 __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
1779 __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
1780 __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
1781 call_runtime.Branch(equal);
1782 // Check that there in an enum cache in the non-empty instance
1783 // descriptors. This is the case if the next enumeration index
1784 // field does not contain a smi.
1785 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
1786 is_smi = masm_->CheckSmi(rdx);
1787 call_runtime.Branch(is_smi);
1788 // For all objects but the receiver, check that the cache is empty.
1790 check_prototype.Branch(equal);
1791 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1792 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1793 call_runtime.Branch(not_equal);
1794 check_prototype.Bind();
1795 // Load the prototype from the map and loop if non-null.
1796 __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
1797 __ CompareRoot(rcx, Heap::kNullValueRootIndex);
1798 loop.Branch(not_equal);
1799 // The enum cache is valid. Load the map of the object being
1800 // iterated over and use the cache for the iteration.
1801 __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
1804 call_runtime.Bind();
1805 // Call the runtime to get the property names for the object.
1806 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
1807 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1809 // If we got a Map, we can do a fast modification check.
1810 // Otherwise, we got a FixedArray, and we have to do a slow check.
1811 // rax: map or fixed array (result from call to
1812 // Runtime::kGetPropertyNamesFast)
1814 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1815 __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1816 fixed_array.Branch(not_equal);
1820 // rax: map (either the result from a call to
1821 // Runtime::kGetPropertyNamesFast or has been fetched directly from
1824 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1825 // Get the bridge array held in the enumeration index field.
1826 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1827 // Get the cache from the bridge array.
1828 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1830 frame_->EmitPush(rax); // <- slot 3
1831 frame_->EmitPush(rdx); // <- slot 2
1832 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1833 __ Integer32ToSmi(rax, rax);
1834 frame_->EmitPush(rax); // <- slot 1
1835 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1839 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1840 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
1841 frame_->EmitPush(rax); // <- slot 2
1843 // Push the length of the array and the initial index onto the stack.
1844 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1845 __ Integer32ToSmi(rax, rax);
1846 frame_->EmitPush(rax); // <- slot 1
1847 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
1851 // Grab the current frame's height for the break and continue
1852 // targets only after all the state is pushed on the frame.
1853 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1854 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1856 __ movq(rax, frame_->ElementAt(0)); // load the current count
1857 __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
1858 node->break_target()->Branch(below_equal);
1860 // Get the i'th entry of the array.
1861 __ movq(rdx, frame_->ElementAt(2));
1862 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1864 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1866 // Get the expected map from the stack or a zero map in the
1867 // permanent slow case rax: current iteration count rbx: i'th entry
1868 // of the enum cache
1869 __ movq(rdx, frame_->ElementAt(3));
1870 // Check if the expected map still matches that of the enumerable.
1871 // If not, we have to filter the key.
1872 // rax: current iteration count
1873 // rbx: i'th entry of the enum cache
1874 // rdx: expected map value
1875 __ movq(rcx, frame_->ElementAt(4));
1876 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1878 end_del_check.Branch(equal);
1880 // Convert the entry to a string (or null if it isn't a property anymore).
1881 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
1882 frame_->EmitPush(rbx); // push entry
1883 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1886 // If the property has been removed while iterating, we just skip it.
1887 __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1888 node->continue_target()->Branch(equal);
1890 end_del_check.Bind();
1891 // Store the entry in the 'each' expression and take another spin in the
1892 // loop. rdx: i'th entry of the enum cache (or string there of)
1893 frame_->EmitPush(rbx);
1894 { Reference each(this, node->each());
1895 // Loading a reference may leave the frame in an unspilled state.
1897 if (!each.is_illegal()) {
1898 if (each.size() > 0) {
1899 frame_->EmitPush(frame_->ElementAt(each.size()));
1900 each.SetValue(NOT_CONST_INIT);
1901 frame_->Drop(2); // Drop the original and the copy of the element.
1903 // If the reference has size zero then we can use the value below
1904 // the reference as if it were above the reference, instead of pushing
1905 // a new copy of it above the reference.
1906 each.SetValue(NOT_CONST_INIT);
1907 frame_->Drop(); // Drop the original of the element.
1911 // Unloading a reference may leave the frame in an unspilled state.
1915 CheckStack(); // TODO(1222600): ignore if body contains calls.
1916 VisitAndSpill(node->body());
1918 // Next. Reestablish a spilled frame in case we are coming here via
1919 // a continue in the body.
1920 node->continue_target()->Bind();
1922 frame_->EmitPop(rax);
1923 __ SmiAddConstant(rax, rax, Smi::FromInt(1));
1924 frame_->EmitPush(rax);
1927 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
1929 node->break_target()->Bind();
1935 node->continue_target()->Unuse();
1936 node->break_target()->Unuse();
1939 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
1940 ASSERT(!in_spilled_code());
1941 VirtualFrame::SpilledScope spilled_scope;
1942 Comment cmnt(masm_, "[ TryCatchStatement");
1943 CodeForStatementPosition(node);
1945 JumpTarget try_block;
1949 // --- Catch block ---
1950 frame_->EmitPush(rax);
1952 // Store the caught exception in the catch variable.
1953 Variable* catch_var = node->catch_var()->var();
1954 ASSERT(catch_var != NULL && catch_var->slot() != NULL);
1955 StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
1957 // Remove the exception from the stack.
1960 VisitStatementsAndSpill(node->catch_block()->statements());
1961 if (has_valid_frame()) {
1966 // --- Try block ---
1969 frame_->PushTryHandler(TRY_CATCH_HANDLER);
1970 int handler_height = frame_->height();
1972 // Shadow the jump targets for all escapes from the try block, including
1973 // returns. During shadowing, the original target is hidden as the
1974 // ShadowTarget and operations on the original actually affect the
1975 // shadowing target.
1977 // We should probably try to unify the escaping targets and the return
1979 int nof_escapes = node->escaping_targets()->length();
1980 List<ShadowTarget*> shadows(1 + nof_escapes);
1982 // Add the shadow target for the function return.
1983 static const int kReturnShadowIndex = 0;
1984 shadows.Add(new ShadowTarget(&function_return_));
1985 bool function_return_was_shadowed = function_return_is_shadowed_;
1986 function_return_is_shadowed_ = true;
1987 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1989 // Add the remaining shadow targets.
1990 for (int i = 0; i < nof_escapes; i++) {
1991 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1994 // Generate code for the statements in the try block.
1995 VisitStatementsAndSpill(node->try_block()->statements());
1997 // Stop the introduced shadowing and count the number of required unlinks.
1998 // After shadowing stops, the original targets are unshadowed and the
1999 // ShadowTargets represent the formerly shadowing targets.
2000 bool has_unlinks = false;
2001 for (int i = 0; i < shadows.length(); i++) {
2002 shadows[i]->StopShadowing();
2003 has_unlinks = has_unlinks || shadows[i]->is_linked();
2005 function_return_is_shadowed_ = function_return_was_shadowed;
2007 // Get an external reference to the handler address.
2008 ExternalReference handler_address(Top::k_handler_address);
2010 // Make sure that there's nothing left on the stack above the
2011 // handler structure.
2012 if (FLAG_debug_code) {
2013 __ movq(kScratchRegister, handler_address);
2014 __ cmpq(rsp, Operand(kScratchRegister, 0));
2015 __ Assert(equal, "stack pointer should point to top handler");
2018 // If we can fall off the end of the try block, unlink from try chain.
2019 if (has_valid_frame()) {
2020 // The next handler address is on top of the frame. Unlink from
2021 // the handler list and drop the rest of this handler from the
2023 ASSERT(StackHandlerConstants::kNextOffset == 0);
2024 __ movq(kScratchRegister, handler_address);
2025 frame_->EmitPop(Operand(kScratchRegister, 0));
2026 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2032 // Generate unlink code for the (formerly) shadowing targets that
2033 // have been jumped to. Deallocate each shadow target.
2034 Result return_value;
2035 for (int i = 0; i < shadows.length(); i++) {
2036 if (shadows[i]->is_linked()) {
2037 // Unlink from try chain; be careful not to destroy the TOS if
2039 if (i == kReturnShadowIndex) {
2040 shadows[i]->Bind(&return_value);
2041 return_value.ToRegister(rax);
2045 // Because we can be jumping here (to spilled code) from
2046 // unspilled code, we need to reestablish a spilled frame at
2050 // Reload sp from the top handler, because some statements that we
2051 // break from (eg, for...in) may have left stuff on the stack.
2052 __ movq(kScratchRegister, handler_address);
2053 __ movq(rsp, Operand(kScratchRegister, 0));
2054 frame_->Forget(frame_->height() - handler_height);
2056 ASSERT(StackHandlerConstants::kNextOffset == 0);
2057 __ movq(kScratchRegister, handler_address);
2058 frame_->EmitPop(Operand(kScratchRegister, 0));
2059 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2061 if (i == kReturnShadowIndex) {
2062 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
2063 shadows[i]->other_target()->Jump(&return_value);
2065 shadows[i]->other_target()->Jump();
2074 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2075 ASSERT(!in_spilled_code());
2076 VirtualFrame::SpilledScope spilled_scope;
2077 Comment cmnt(masm_, "[ TryFinallyStatement");
2078 CodeForStatementPosition(node);
2080 // State: Used to keep track of reason for entering the finally
2081 // block. Should probably be extended to hold information for
2082 // break/continue from within the try block.
2083 enum { FALLING, THROWING, JUMPING };
2085 JumpTarget try_block;
2086 JumpTarget finally_block;
2090 frame_->EmitPush(rax);
2091 // In case of thrown exceptions, this is where we continue.
2092 __ Move(rcx, Smi::FromInt(THROWING));
2093 finally_block.Jump();
2095 // --- Try block ---
2098 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2099 int handler_height = frame_->height();
2101 // Shadow the jump targets for all escapes from the try block, including
2102 // returns. During shadowing, the original target is hidden as the
2103 // ShadowTarget and operations on the original actually affect the
2104 // shadowing target.
2106 // We should probably try to unify the escaping targets and the return
2108 int nof_escapes = node->escaping_targets()->length();
2109 List<ShadowTarget*> shadows(1 + nof_escapes);
2111 // Add the shadow target for the function return.
2112 static const int kReturnShadowIndex = 0;
2113 shadows.Add(new ShadowTarget(&function_return_));
2114 bool function_return_was_shadowed = function_return_is_shadowed_;
2115 function_return_is_shadowed_ = true;
2116 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2118 // Add the remaining shadow targets.
2119 for (int i = 0; i < nof_escapes; i++) {
2120 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2123 // Generate code for the statements in the try block.
2124 VisitStatementsAndSpill(node->try_block()->statements());
2126 // Stop the introduced shadowing and count the number of required unlinks.
2127 // After shadowing stops, the original targets are unshadowed and the
2128 // ShadowTargets represent the formerly shadowing targets.
2129 int nof_unlinks = 0;
2130 for (int i = 0; i < shadows.length(); i++) {
2131 shadows[i]->StopShadowing();
2132 if (shadows[i]->is_linked()) nof_unlinks++;
2134 function_return_is_shadowed_ = function_return_was_shadowed;
2136 // Get an external reference to the handler address.
2137 ExternalReference handler_address(Top::k_handler_address);
2139 // If we can fall off the end of the try block, unlink from the try
2140 // chain and set the state on the frame to FALLING.
2141 if (has_valid_frame()) {
2142 // The next handler address is on top of the frame.
2143 ASSERT(StackHandlerConstants::kNextOffset == 0);
2144 __ movq(kScratchRegister, handler_address);
2145 frame_->EmitPop(Operand(kScratchRegister, 0));
2146 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2148 // Fake a top of stack value (unneeded when FALLING) and set the
2149 // state in ecx, then jump around the unlink blocks if any.
2150 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2151 __ Move(rcx, Smi::FromInt(FALLING));
2152 if (nof_unlinks > 0) {
2153 finally_block.Jump();
2157 // Generate code to unlink and set the state for the (formerly)
2158 // shadowing targets that have been jumped to.
2159 for (int i = 0; i < shadows.length(); i++) {
2160 if (shadows[i]->is_linked()) {
2161 // If we have come from the shadowed return, the return value is
2162 // on the virtual frame. We must preserve it until it is
2164 if (i == kReturnShadowIndex) {
2165 Result return_value;
2166 shadows[i]->Bind(&return_value);
2167 return_value.ToRegister(rax);
2171 // Because we can be jumping here (to spilled code) from
2172 // unspilled code, we need to reestablish a spilled frame at
2176 // Reload sp from the top handler, because some statements that
2177 // we break from (eg, for...in) may have left stuff on the
2179 __ movq(kScratchRegister, handler_address);
2180 __ movq(rsp, Operand(kScratchRegister, 0));
2181 frame_->Forget(frame_->height() - handler_height);
2183 // Unlink this handler and drop it from the frame.
2184 ASSERT(StackHandlerConstants::kNextOffset == 0);
2185 __ movq(kScratchRegister, handler_address);
2186 frame_->EmitPop(Operand(kScratchRegister, 0));
2187 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2189 if (i == kReturnShadowIndex) {
2190 // If this target shadowed the function return, materialize
2191 // the return value on the stack.
2192 frame_->EmitPush(rax);
2194 // Fake TOS for targets that shadowed breaks and continues.
2195 frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2197 __ Move(rcx, Smi::FromInt(JUMPING + i));
2198 if (--nof_unlinks > 0) {
2199 // If this is not the last unlink block, jump around the next.
2200 finally_block.Jump();
2205 // --- Finally block ---
2206 finally_block.Bind();
2208 // Push the state on the stack.
2209 frame_->EmitPush(rcx);
2211 // We keep two elements on the stack - the (possibly faked) result
2212 // and the state - while evaluating the finally block.
2214 // Generate code for the statements in the finally block.
2215 VisitStatementsAndSpill(node->finally_block()->statements());
2217 if (has_valid_frame()) {
2218 // Restore state and return value or faked TOS.
2219 frame_->EmitPop(rcx);
2220 frame_->EmitPop(rax);
2223 // Generate code to jump to the right destination for all used
2224 // formerly shadowing targets. Deallocate each shadow target.
2225 for (int i = 0; i < shadows.length(); i++) {
2226 if (has_valid_frame() && shadows[i]->is_bound()) {
2227 BreakTarget* original = shadows[i]->other_target();
2228 __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
2229 if (i == kReturnShadowIndex) {
2230 // The return value is (already) in rax.
2231 Result return_value = allocator_->Allocate(rax);
2232 ASSERT(return_value.is_valid());
2233 if (function_return_is_shadowed_) {
2234 original->Branch(equal, &return_value);
2236 // Branch around the preparation for return which may emit
2239 skip.Branch(not_equal);
2240 frame_->PrepareForReturn();
2241 original->Jump(&return_value);
2245 original->Branch(equal);
2250 if (has_valid_frame()) {
2251 // Check if we need to rethrow the exception.
2253 __ SmiCompare(rcx, Smi::FromInt(THROWING));
2254 exit.Branch(not_equal);
2256 // Rethrow exception.
2257 frame_->EmitPush(rax); // undo pop from above
2258 frame_->CallRuntime(Runtime::kReThrow, 1);
2266 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2267 ASSERT(!in_spilled_code());
2268 Comment cmnt(masm_, "[ DebuggerStatement");
2269 CodeForStatementPosition(node);
2270 #ifdef ENABLE_DEBUGGER_SUPPORT
2271 // Spill everything, even constants, to the frame.
2274 frame_->DebugBreak();
2275 // Ignore the return value.
2280 void CodeGenerator::InstantiateFunction(
2281 Handle<SharedFunctionInfo> function_info) {
2282 // The inevitable call will sync frame elements to memory anyway, so
2283 // we do it eagerly to allow us to push the arguments directly into
2285 frame_->SyncRange(0, frame_->element_count() - 1);
2287 // Use the fast case closure allocation code that allocates in new
2288 // space for nested functions that don't need literals cloning.
2289 if (scope()->is_function_scope() && function_info->num_literals() == 0) {
2290 FastNewClosureStub stub;
2291 frame_->Push(function_info);
2292 Result answer = frame_->CallStub(&stub, 1);
2293 frame_->Push(&answer);
2295 // Call the runtime to instantiate the function based on the
2296 // shared function info.
2297 frame_->EmitPush(rsi);
2298 frame_->EmitPush(function_info);
2299 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2300 frame_->Push(&result);
2305 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2306 Comment cmnt(masm_, "[ FunctionLiteral");
2308 // Build the function info and instantiate it.
2309 Handle<SharedFunctionInfo> function_info =
2310 Compiler::BuildFunctionInfo(node, script(), this);
2311 // Check for stack-overflow exception.
2312 if (HasStackOverflow()) return;
2313 InstantiateFunction(function_info);
2317 void CodeGenerator::VisitSharedFunctionInfoLiteral(
2318 SharedFunctionInfoLiteral* node) {
2319 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2320 InstantiateFunction(node->shared_function_info());
2324 void CodeGenerator::VisitConditional(Conditional* node) {
2325 Comment cmnt(masm_, "[ Conditional");
2329 ControlDestination dest(&then, &else_, true);
2330 LoadCondition(node->condition(), &dest, true);
2332 if (dest.false_was_fall_through()) {
2333 // The else target was bound, so we compile the else part first.
2334 Load(node->else_expression());
2336 if (then.is_linked()) {
2339 Load(node->then_expression());
2342 // The then target was bound, so we compile the then part first.
2343 Load(node->then_expression());
2345 if (else_.is_linked()) {
2348 Load(node->else_expression());
2356 void CodeGenerator::VisitSlot(Slot* node) {
2357 Comment cmnt(masm_, "[ Slot");
2358 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
2362 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2363 Comment cmnt(masm_, "[ VariableProxy");
2364 Variable* var = node->var();
2365 Expression* expr = var->rewrite();
2369 ASSERT(var->is_global());
2370 Reference ref(this, node);
2376 void CodeGenerator::VisitLiteral(Literal* node) {
2377 Comment cmnt(masm_, "[ Literal");
2378 frame_->Push(node->handle());
2382 // Materialize the regexp literal 'node' in the literals array
2383 // 'literals' of the function. Leave the regexp boilerplate in
2385 class DeferredRegExpLiteral: public DeferredCode {
2387 DeferredRegExpLiteral(Register boilerplate,
2389 RegExpLiteral* node)
2390 : boilerplate_(boilerplate), literals_(literals), node_(node) {
2391 set_comment("[ DeferredRegExpLiteral");
2397 Register boilerplate_;
2399 RegExpLiteral* node_;
2403 void DeferredRegExpLiteral::Generate() {
2404 // Since the entry is undefined we call the runtime system to
2405 // compute the literal.
2406 // Literal array (0).
2408 // Literal index (1).
2409 __ Push(Smi::FromInt(node_->literal_index()));
2410 // RegExp pattern (2).
2411 __ Push(node_->pattern());
2412 // RegExp flags (3).
2413 __ Push(node_->flags());
2414 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2415 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2419 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2420 Comment cmnt(masm_, "[ RegExp Literal");
2422 // Retrieve the literals array and check the allocated entry. Begin
2423 // with a writable copy of the function of this activation in a
2425 frame_->PushFunction();
2426 Result literals = frame_->Pop();
2427 literals.ToRegister();
2428 frame_->Spill(literals.reg());
2430 // Load the literals array of the function.
2431 __ movq(literals.reg(),
2432 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2434 // Load the literal at the ast saved index.
2435 Result boilerplate = allocator_->Allocate();
2436 ASSERT(boilerplate.is_valid());
2437 int literal_offset =
2438 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2439 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2441 // Check whether we need to materialize the RegExp object. If so,
2442 // jump to the deferred code passing the literals array.
2443 DeferredRegExpLiteral* deferred =
2444 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2445 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2446 deferred->Branch(equal);
2447 deferred->BindExit();
2450 // Push the boilerplate object.
2451 frame_->Push(&boilerplate);
2455 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2456 Comment cmnt(masm_, "[ ObjectLiteral");
2458 // Load a writable copy of the function of this activation in a
2460 frame_->PushFunction();
2461 Result literals = frame_->Pop();
2462 literals.ToRegister();
2463 frame_->Spill(literals.reg());
2465 // Load the literals array of the function.
2466 __ movq(literals.reg(),
2467 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2469 frame_->Push(&literals);
2471 frame_->Push(Smi::FromInt(node->literal_index()));
2472 // Constant properties.
2473 frame_->Push(node->constant_properties());
2474 // Should the object literal have fast elements?
2475 frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
2477 if (node->depth() > 1) {
2478 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
2480 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
2482 frame_->Push(&clone);
2484 for (int i = 0; i < node->properties()->length(); i++) {
2485 ObjectLiteral::Property* property = node->properties()->at(i);
2486 switch (property->kind()) {
2487 case ObjectLiteral::Property::CONSTANT:
2489 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2490 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2491 // else fall through.
2492 case ObjectLiteral::Property::COMPUTED: {
2493 Handle<Object> key(property->key()->handle());
2494 if (key->IsSymbol()) {
2495 // Duplicate the object as the IC receiver.
2497 Load(property->value());
2499 Result ignored = frame_->CallStoreIC();
2504 case ObjectLiteral::Property::PROTOTYPE: {
2505 // Duplicate the object as an argument to the runtime call.
2507 Load(property->key());
2508 Load(property->value());
2509 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2510 // Ignore the result.
2513 case ObjectLiteral::Property::SETTER: {
2514 // Duplicate the object as an argument to the runtime call.
2516 Load(property->key());
2517 frame_->Push(Smi::FromInt(1));
2518 Load(property->value());
2519 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2520 // Ignore the result.
2523 case ObjectLiteral::Property::GETTER: {
2524 // Duplicate the object as an argument to the runtime call.
2526 Load(property->key());
2527 frame_->Push(Smi::FromInt(0));
2528 Load(property->value());
2529 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2530 // Ignore the result.
2533 default: UNREACHABLE();
2539 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2540 Comment cmnt(masm_, "[ ArrayLiteral");
2542 // Load a writable copy of the function of this activation in a
2544 frame_->PushFunction();
2545 Result literals = frame_->Pop();
2546 literals.ToRegister();
2547 frame_->Spill(literals.reg());
2549 // Load the literals array of the function.
2550 __ movq(literals.reg(),
2551 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2553 frame_->Push(&literals);
2554 frame_->Push(Smi::FromInt(node->literal_index()));
2555 frame_->Push(node->constant_elements());
2556 int length = node->values()->length();
2558 if (node->depth() > 1) {
2559 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2560 } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
2561 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
2563 FastCloneShallowArrayStub stub(length);
2564 clone = frame_->CallStub(&stub, 3);
2566 frame_->Push(&clone);
2568 // Generate code to set the elements in the array that are not
2570 for (int i = 0; i < node->values()->length(); i++) {
2571 Expression* value = node->values()->at(i);
2573 // If value is a literal the property value is already set in the
2574 // boilerplate object.
2575 if (value->AsLiteral() != NULL) continue;
2576 // If value is a materialized literal the property value is already set
2577 // in the boilerplate object if it is simple.
2578 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2580 // The property must be set by generated code.
2583 // Get the property value off the stack.
2584 Result prop_value = frame_->Pop();
2585 prop_value.ToRegister();
2587 // Fetch the array literal while leaving a copy on the stack and
2588 // use it to get the elements array.
2590 Result elements = frame_->Pop();
2591 elements.ToRegister();
2592 frame_->Spill(elements.reg());
2593 // Get the elements FixedArray.
2594 __ movq(elements.reg(),
2595 FieldOperand(elements.reg(), JSObject::kElementsOffset));
2597 // Write to the indexed properties array.
2598 int offset = i * kPointerSize + FixedArray::kHeaderSize;
2599 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2601 // Update the write barrier for the array address.
2602 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
2603 Result scratch = allocator_->Allocate();
2604 ASSERT(scratch.is_valid());
2605 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2610 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2611 ASSERT(!in_spilled_code());
2612 // Call runtime routine to allocate the catch extension object and
2613 // assign the exception value to the catch variable.
2614 Comment cmnt(masm_, "[ CatchExtensionObject");
2616 Load(node->value());
2618 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2619 frame_->Push(&result);
2623 void CodeGenerator::VisitAssignment(Assignment* node) {
2624 Comment cmnt(masm_, "[ Assignment");
2626 { Reference target(this, node->target(), node->is_compound());
2627 if (target.is_illegal()) {
2628 // Fool the virtual frame into thinking that we left the assignment's
2629 // value on the frame.
2630 frame_->Push(Smi::FromInt(0));
2633 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2635 if (node->starts_initialization_block()) {
2636 ASSERT(target.type() == Reference::NAMED ||
2637 target.type() == Reference::KEYED);
2638 // Change to slow case in the beginning of an initialization
2639 // block to avoid the quadratic behavior of repeatedly adding
2642 // The receiver is the argument to the runtime call. It is the
2643 // first value pushed when the reference was loaded to the
2645 frame_->PushElementAt(target.size() - 1);
2646 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2648 if (node->ends_initialization_block()) {
2649 // Add an extra copy of the receiver to the frame, so that it can be
2650 // converted back to fast case after the assignment.
2651 ASSERT(target.type() == Reference::NAMED ||
2652 target.type() == Reference::KEYED);
2653 if (target.type() == Reference::NAMED) {
2655 // Dup target receiver on stack.
2657 ASSERT(target.type() == Reference::KEYED);
2658 Result temp = frame_->Pop();
2660 frame_->Push(&temp);
2663 if (node->op() == Token::ASSIGN ||
2664 node->op() == Token::INIT_VAR ||
2665 node->op() == Token::INIT_CONST) {
2666 Load(node->value());
2668 } else { // Assignment is a compound assignment.
2669 Literal* literal = node->value()->AsLiteral();
2670 bool overwrite_value =
2671 (node->value()->AsBinaryOperation() != NULL &&
2672 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2673 Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2674 // There are two cases where the target is not read in the right hand
2675 // side, that are easy to test for: the right hand side is a literal,
2676 // or the right hand side is a different variable. TakeValue invalidates
2677 // the target, with an implicit promise that it will be written to again
2678 // before it is read.
2679 if (literal != NULL || (right_var != NULL && right_var != var)) {
2684 Load(node->value());
2685 GenericBinaryOperation(node->binary_op(),
2687 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2691 var->mode() == Variable::CONST &&
2692 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2693 // Assignment ignored - leave the value on the stack.
2694 UnloadReference(&target);
2696 CodeForSourcePosition(node->position());
2697 if (node->op() == Token::INIT_CONST) {
2698 // Dynamic constant initializations must use the function context
2699 // and initialize the actual constant declared. Dynamic variable
2700 // initializations are simply assignments and use SetValue.
2701 target.SetValue(CONST_INIT);
2703 target.SetValue(NOT_CONST_INIT);
2705 if (node->ends_initialization_block()) {
2706 ASSERT(target.type() == Reference::UNLOADED);
2707 // End of initialization block. Revert to fast case. The
2708 // argument to the runtime call is the extra copy of the receiver,
2709 // which is below the value of the assignment.
2710 // Swap the receiver and the value of the assignment expression.
2711 Result lhs = frame_->Pop();
2712 Result receiver = frame_->Pop();
2714 frame_->Push(&receiver);
2715 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2722 void CodeGenerator::VisitThrow(Throw* node) {
2723 Comment cmnt(masm_, "[ Throw");
2724 Load(node->exception());
2725 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2726 frame_->Push(&result);
2730 void CodeGenerator::VisitProperty(Property* node) {
2731 Comment cmnt(masm_, "[ Property");
2732 Reference property(this, node);
2733 property.GetValue();
2737 void CodeGenerator::VisitCall(Call* node) {
2738 Comment cmnt(masm_, "[ Call");
2740 ZoneList<Expression*>* args = node->arguments();
2742 // Check if the function is a variable or a property.
2743 Expression* function = node->expression();
2744 Variable* var = function->AsVariableProxy()->AsVariable();
2745 Property* property = function->AsProperty();
2747 // ------------------------------------------------------------------------
2748 // Fast-case: Use inline caching.
2750 // According to ECMA-262, section 11.2.3, page 44, the function to call
2751 // must be resolved after the arguments have been evaluated. The IC code
2752 // automatically handles this by loading the arguments before the function
2753 // is resolved in cache misses (this also holds for megamorphic calls).
2754 // ------------------------------------------------------------------------
2756 if (var != NULL && var->is_possibly_eval()) {
2757 // ----------------------------------
2758 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
2759 // ----------------------------------
2761 // In a call to eval, we first call %ResolvePossiblyDirectEval to
2762 // resolve the function we need to call and the receiver of the
2763 // call. Then we call the resolved function using the given
2766 // Prepare the stack for the call to the resolved function.
2769 // Allocate a frame slot for the receiver.
2770 frame_->Push(Factory::undefined_value());
2771 int arg_count = args->length();
2772 for (int i = 0; i < arg_count; i++) {
2776 // Prepare the stack for the call to ResolvePossiblyDirectEval.
2777 frame_->PushElementAt(arg_count + 1);
2778 if (arg_count > 0) {
2779 frame_->PushElementAt(arg_count);
2781 frame_->Push(Factory::undefined_value());
2784 // Push the receiver.
2785 frame_->PushParameterAt(-1);
2787 // Resolve the call.
2789 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
2791 // The runtime call returns a pair of values in rax (function) and
2792 // rdx (receiver). Touch up the stack with the right values.
2793 Result receiver = allocator_->Allocate(rdx);
2794 frame_->SetElementAt(arg_count + 1, &result);
2795 frame_->SetElementAt(arg_count, &receiver);
2798 // Call the function.
2799 CodeForSourcePosition(node->position());
2800 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2801 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
2802 result = frame_->CallStub(&call_function, arg_count + 1);
2804 // Restore the context and overwrite the function on the stack with
2806 frame_->RestoreContextRegister();
2807 frame_->SetElementAt(0, &result);
2809 } else if (var != NULL && !var->is_this() && var->is_global()) {
2810 // ----------------------------------
2811 // JavaScript example: 'foo(1, 2, 3)' // foo is global
2812 // ----------------------------------
2814 // Pass the global object as the receiver and let the IC stub
2815 // patch the stack to use the global proxy as 'this' in the
2816 // invoked function.
2819 // Load the arguments.
2820 int arg_count = args->length();
2821 for (int i = 0; i < arg_count; i++) {
2825 // Push the name of the function on the frame.
2826 frame_->Push(var->name());
2828 // Call the IC initialization code.
2829 CodeForSourcePosition(node->position());
2830 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2833 frame_->RestoreContextRegister();
2834 // Replace the function on the stack with the result.
2835 frame_->Push(&result);
2837 } else if (var != NULL && var->slot() != NULL &&
2838 var->slot()->type() == Slot::LOOKUP) {
2839 // ----------------------------------
2840 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
2841 // ----------------------------------
2843 // Load the function from the context. Sync the frame so we can
2844 // push the arguments directly into place.
2845 frame_->SyncRange(0, frame_->element_count() - 1);
2846 frame_->EmitPush(rsi);
2847 frame_->EmitPush(var->name());
2848 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2849 // The runtime call returns a pair of values in rax and rdx. The
2850 // looked-up function is in rax and the receiver is in rdx. These
2851 // register references are not ref counted here. We spill them
2852 // eagerly since they are arguments to an inevitable call (and are
2853 // not sharable by the arguments).
2854 ASSERT(!allocator()->is_used(rax));
2855 frame_->EmitPush(rax);
2857 // Load the receiver.
2858 ASSERT(!allocator()->is_used(rdx));
2859 frame_->EmitPush(rdx);
2861 // Call the function.
2862 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2864 } else if (property != NULL) {
2865 // Check if the key is a literal string.
2866 Literal* literal = property->key()->AsLiteral();
2868 if (literal != NULL && literal->handle()->IsSymbol()) {
2869 // ------------------------------------------------------------------
2870 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2871 // ------------------------------------------------------------------
2873 Handle<String> name = Handle<String>::cast(literal->handle());
2875 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2876 name->IsEqualTo(CStrVector("apply")) &&
2877 args->length() == 2 &&
2878 args->at(1)->AsVariableProxy() != NULL &&
2879 args->at(1)->AsVariableProxy()->IsArguments()) {
2880 // Use the optimized Function.prototype.apply that avoids
2881 // allocating lazily allocated arguments objects.
2882 CallApplyLazy(property->obj(),
2884 args->at(1)->AsVariableProxy(),
2888 // Push the receiver onto the frame.
2889 Load(property->obj());
2891 // Load the arguments.
2892 int arg_count = args->length();
2893 for (int i = 0; i < arg_count; i++) {
2897 // Push the name of the function onto the frame.
2900 // Call the IC initialization code.
2901 CodeForSourcePosition(node->position());
2902 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2905 frame_->RestoreContextRegister();
2906 frame_->Push(&result);
2910 // -------------------------------------------
2911 // JavaScript example: 'array[index](1, 2, 3)'
2912 // -------------------------------------------
2914 // Load the function to call from the property through a reference.
2915 if (property->is_synthetic()) {
2916 Reference ref(this, property, false);
2918 // Use global object as receiver.
2919 LoadGlobalReceiver();
2921 Reference ref(this, property, false);
2922 ASSERT(ref.size() == 2);
2923 Result key = frame_->Pop();
2924 frame_->Dup(); // Duplicate the receiver.
2927 // Top of frame contains function to call, with duplicate copy of
2928 // receiver below it. Swap them.
2929 Result function = frame_->Pop();
2930 Result receiver = frame_->Pop();
2931 frame_->Push(&function);
2932 frame_->Push(&receiver);
2935 // Call the function.
2936 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
2940 // ----------------------------------
2941 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
2942 // ----------------------------------
2944 // Load the function.
2947 // Pass the global proxy as the receiver.
2948 LoadGlobalReceiver();
2950 // Call the function.
2951 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2956 void CodeGenerator::VisitCallNew(CallNew* node) {
2957 Comment cmnt(masm_, "[ CallNew");
2959 // According to ECMA-262, section 11.2.2, page 44, the function
2960 // expression in new calls must be evaluated before the
2961 // arguments. This is different from ordinary calls, where the
2962 // actual function to call is resolved after the arguments have been
2965 // Compute function to call and use the global object as the
2966 // receiver. There is no need to use the global proxy here because
2967 // it will always be replaced with a newly allocated object.
2968 Load(node->expression());
2971 // Push the arguments ("left-to-right") on the stack.
2972 ZoneList<Expression*>* args = node->arguments();
2973 int arg_count = args->length();
2974 for (int i = 0; i < arg_count; i++) {
2978 // Call the construct call builtin that handles allocation and
2979 // constructor invocation.
2980 CodeForSourcePosition(node->position());
2981 Result result = frame_->CallConstructor(arg_count);
2982 // Replace the function on the stack with the result.
2983 frame_->SetElementAt(0, &result);
2987 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2988 if (CheckForInlineRuntimeCall(node)) {
2992 ZoneList<Expression*>* args = node->arguments();
2993 Comment cmnt(masm_, "[ CallRuntime");
2994 Runtime::Function* function = node->function();
2996 if (function == NULL) {
2997 // Push the builtins object found in the current global object.
2998 Result temp = allocator()->Allocate();
2999 ASSERT(temp.is_valid());
3000 __ movq(temp.reg(), GlobalObject());
3002 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
3003 frame_->Push(&temp);
3006 // Push the arguments ("left-to-right").
3007 int arg_count = args->length();
3008 for (int i = 0; i < arg_count; i++) {
3012 if (function == NULL) {
3013 // Call the JS runtime function.
3014 frame_->Push(node->name());
3015 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
3018 frame_->RestoreContextRegister();
3019 frame_->Push(&answer);
3021 // Call the C runtime function.
3022 Result answer = frame_->CallRuntime(function, arg_count);
3023 frame_->Push(&answer);
3028 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
3029 Comment cmnt(masm_, "[ UnaryOperation");
3031 Token::Value op = node->op();
3033 if (op == Token::NOT) {
3034 // Swap the true and false targets but keep the same actual label
3035 // as the fall through.
3036 destination()->Invert();
3037 LoadCondition(node->expression(), destination(), true);
3038 // Swap the labels back.
3039 destination()->Invert();
3041 } else if (op == Token::DELETE) {
3042 Property* property = node->expression()->AsProperty();
3043 if (property != NULL) {
3044 Load(property->obj());
3045 Load(property->key());
3046 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
3047 frame_->Push(&answer);
3051 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
3052 if (variable != NULL) {
3053 Slot* slot = variable->slot();
3054 if (variable->is_global()) {
3056 frame_->Push(variable->name());
3057 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3059 frame_->Push(&answer);
3062 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
3063 // Call the runtime to look up the context holding the named
3064 // variable. Sync the virtual frame eagerly so we can push the
3065 // arguments directly into place.
3066 frame_->SyncRange(0, frame_->element_count() - 1);
3067 frame_->EmitPush(rsi);
3068 frame_->EmitPush(variable->name());
3069 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3070 ASSERT(context.is_register());
3071 frame_->EmitPush(context.reg());
3073 frame_->EmitPush(variable->name());
3074 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3076 frame_->Push(&answer);
3080 // Default: Result of deleting non-global, not dynamically
3081 // introduced variables is false.
3082 frame_->Push(Factory::false_value());
3085 // Default: Result of deleting expressions is true.
3086 Load(node->expression()); // may have side-effects
3087 frame_->SetElementAt(0, Factory::true_value());
3090 } else if (op == Token::TYPEOF) {
3091 // Special case for loading the typeof expression; see comment on
3092 // LoadTypeofExpression().
3093 LoadTypeofExpression(node->expression());
3094 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3095 frame_->Push(&answer);
3097 } else if (op == Token::VOID) {
3098 Expression* expression = node->expression();
3099 if (expression && expression->AsLiteral() && (
3100 expression->AsLiteral()->IsTrue() ||
3101 expression->AsLiteral()->IsFalse() ||
3102 expression->AsLiteral()->handle()->IsNumber() ||
3103 expression->AsLiteral()->handle()->IsString() ||
3104 expression->AsLiteral()->handle()->IsJSRegExp() ||
3105 expression->AsLiteral()->IsNull())) {
3106 // Omit evaluating the value of the primitive literal.
3107 // It will be discarded anyway, and can have no side effect.
3108 frame_->Push(Factory::undefined_value());
3110 Load(node->expression());
3111 frame_->SetElementAt(0, Factory::undefined_value());
3116 (node->expression()->AsBinaryOperation() != NULL &&
3117 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
3118 Load(node->expression());
3123 UNREACHABLE(); // handled above
3127 GenericUnaryOpStub stub(Token::SUB, overwrite);
3128 Result operand = frame_->Pop();
3129 Result answer = frame_->CallStub(&stub, &operand);
3130 answer.set_type_info(TypeInfo::Number());
3131 frame_->Push(&answer);
3135 case Token::BIT_NOT: {
3137 JumpTarget smi_label;
3138 JumpTarget continue_label;
3139 Result operand = frame_->Pop();
3140 operand.ToRegister();
3142 Condition is_smi = masm_->CheckSmi(operand.reg());
3143 smi_label.Branch(is_smi, &operand);
3145 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
3146 Result answer = frame_->CallStub(&stub, &operand);
3147 continue_label.Jump(&answer);
3149 smi_label.Bind(&answer);
3150 answer.ToRegister();
3151 frame_->Spill(answer.reg());
3152 __ SmiNot(answer.reg(), answer.reg());
3153 continue_label.Bind(&answer);
3154 answer.set_type_info(TypeInfo::Smi());
3155 frame_->Push(&answer);
3161 JumpTarget continue_label;
3162 Result operand = frame_->Pop();
3163 TypeInfo operand_info = operand.type_info();
3164 operand.ToRegister();
3165 Condition is_smi = masm_->CheckSmi(operand.reg());
3166 continue_label.Branch(is_smi, &operand);
3167 frame_->Push(&operand);
3168 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3171 continue_label.Bind(&answer);
3172 if (operand_info.IsSmi()) {
3173 answer.set_type_info(TypeInfo::Smi());
3174 } else if (operand_info.IsInteger32()) {
3175 answer.set_type_info(TypeInfo::Integer32());
3177 answer.set_type_info(TypeInfo::Number());
3179 frame_->Push(&answer);
3189 // The value in dst was optimistically incremented or decremented.
3190 // The result overflowed or was not smi tagged. Call into the runtime
3191 // to convert the argument to a number, and call the specialized add
3192 // or subtract stub. The result is left in dst.
3193 class DeferredPrefixCountOperation: public DeferredCode {
3195 DeferredPrefixCountOperation(Register dst,
3197 TypeInfo input_type)
3198 : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
3199 set_comment("[ DeferredCountOperation");
3202 virtual void Generate();
3207 TypeInfo input_type_;
3211 void DeferredPrefixCountOperation::Generate() {
3213 if (input_type_.IsNumber()) {
3217 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3221 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
3223 NO_GENERIC_BINARY_FLAGS,
3224 TypeInfo::Number());
3225 stub.GenerateCall(masm_, left, Smi::FromInt(1));
3227 if (!dst_.is(rax)) __ movq(dst_, rax);
3231 // The value in dst was optimistically incremented or decremented.
3232 // The result overflowed or was not smi tagged. Call into the runtime
3233 // to convert the argument to a number. Update the original value in
3234 // old. Call the specialized add or subtract stub. The result is
3236 class DeferredPostfixCountOperation: public DeferredCode {
3238 DeferredPostfixCountOperation(Register dst,
3241 TypeInfo input_type)
3244 is_increment_(is_increment),
3245 input_type_(input_type) {
3246 set_comment("[ DeferredCountOperation");
3249 virtual void Generate();
3255 TypeInfo input_type_;
3259 void DeferredPostfixCountOperation::Generate() {
3261 if (input_type_.IsNumber()) {
3262 __ push(dst_); // Save the input to use as the old value.
3266 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3267 __ push(rax); // Save the result of ToNumber to use as the old value.
3271 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
3273 NO_GENERIC_BINARY_FLAGS,
3274 TypeInfo::Number());
3275 stub.GenerateCall(masm_, left, Smi::FromInt(1));
3277 if (!dst_.is(rax)) __ movq(dst_, rax);
3282 void CodeGenerator::VisitCountOperation(CountOperation* node) {
3283 Comment cmnt(masm_, "[ CountOperation");
3285 bool is_postfix = node->is_postfix();
3286 bool is_increment = node->op() == Token::INC;
3288 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3289 bool is_const = (var != NULL && var->mode() == Variable::CONST);
3291 // Postfix operations need a stack slot under the reference to hold
3292 // the old value while the new value is being stored. This is so that
3293 // in the case that storing the new value requires a call, the old
3294 // value will be in the frame to be spilled.
3295 if (is_postfix) frame_->Push(Smi::FromInt(0));
3297 // A constant reference is not saved to, so the reference is not a
3298 // compound assignment reference.
3299 { Reference target(this, node->expression(), !is_const);
3300 if (target.is_illegal()) {
3301 // Spoof the virtual frame to have the expected height (one higher
3303 if (!is_postfix) frame_->Push(Smi::FromInt(0));
3308 Result new_value = frame_->Pop();
3309 new_value.ToRegister();
3311 Result old_value; // Only allocated in the postfix case.
3313 // Allocate a temporary to preserve the old value.
3314 old_value = allocator_->Allocate();
3315 ASSERT(old_value.is_valid());
3316 __ movq(old_value.reg(), new_value.reg());
3318 // The return value for postfix operations is ToNumber(input).
3319 // Keep more precise type info if the input is some kind of
3320 // number already. If the input is not a number we have to wait
3321 // for the deferred code to convert it.
3322 if (new_value.type_info().IsNumber()) {
3323 old_value.set_type_info(new_value.type_info());
3326 // Ensure the new value is writable.
3327 frame_->Spill(new_value.reg());
3329 DeferredCode* deferred = NULL;
3331 deferred = new DeferredPostfixCountOperation(new_value.reg(),
3334 new_value.type_info());
3336 deferred = new DeferredPrefixCountOperation(new_value.reg(),
3338 new_value.type_info());
3341 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
3343 __ SmiAddConstant(kScratchRegister,
3346 deferred->entry_label());
3348 __ SmiSubConstant(kScratchRegister,
3351 deferred->entry_label());
3353 __ movq(new_value.reg(), kScratchRegister);
3354 deferred->BindExit();
3356 // Postfix count operations return their input converted to
3357 // number. The case when the input is already a number is covered
3358 // above in the allocation code for old_value.
3359 if (is_postfix && !new_value.type_info().IsNumber()) {
3360 old_value.set_type_info(TypeInfo::Number());
3363 new_value.set_type_info(TypeInfo::Number());
3365 // Postfix: store the old value in the allocated slot under the
3367 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3369 frame_->Push(&new_value);
3370 // Non-constant: update the reference.
3371 if (!is_const) target.SetValue(NOT_CONST_INIT);
3374 // Postfix: drop the new value and use the old.
3375 if (is_postfix) frame_->Drop();
3379 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
3380 // According to ECMA-262 section 11.11, page 58, the binary logical
3381 // operators must yield the result of one of the two expressions
3382 // before any ToBoolean() conversions. This means that the value
3383 // produced by a && or || operator is not necessarily a boolean.
3385 // NOTE: If the left hand side produces a materialized value (not
3386 // control flow), we force the right hand side to do the same. This
3387 // is necessary because we assume that if we get control flow on the
3388 // last path out of an expression we got it on all paths.
3389 if (node->op() == Token::AND) {
3391 ControlDestination dest(&is_true, destination()->false_target(), true);
3392 LoadCondition(node->left(), &dest, false);
3394 if (dest.false_was_fall_through()) {
3395 // The current false target was used as the fall-through. If
3396 // there are no dangling jumps to is_true then the left
3397 // subexpression was unconditionally false. Otherwise we have
3398 // paths where we do have to evaluate the right subexpression.
3399 if (is_true.is_linked()) {
3400 // We need to compile the right subexpression. If the jump to
3401 // the current false target was a forward jump then we have a
3402 // valid frame, we have just bound the false target, and we
3403 // have to jump around the code for the right subexpression.
3404 if (has_valid_frame()) {
3405 destination()->false_target()->Unuse();
3406 destination()->false_target()->Jump();
3409 // The left subexpression compiled to control flow, so the
3410 // right one is free to do so as well.
3411 LoadCondition(node->right(), destination(), false);
3413 // We have actually just jumped to or bound the current false
3414 // target but the current control destination is not marked as
3416 destination()->Use(false);
3419 } else if (dest.is_used()) {
3420 // The left subexpression compiled to control flow (and is_true
3421 // was just bound), so the right is free to do so as well.
3422 LoadCondition(node->right(), destination(), false);
3425 // We have a materialized value on the frame, so we exit with
3426 // one on all paths. There are possibly also jumps to is_true
3427 // from nested subexpressions.
3428 JumpTarget pop_and_continue;
3431 // Avoid popping the result if it converts to 'false' using the
3432 // standard ToBoolean() conversion as described in ECMA-262,
3433 // section 9.2, page 30.
3435 // Duplicate the TOS value. The duplicate will be popped by
3438 ControlDestination dest(&pop_and_continue, &exit, true);
3441 // Pop the result of evaluating the first part.
3444 // Compile right side expression.
3446 Load(node->right());
3448 // Exit (always with a materialized value).
3453 ASSERT(node->op() == Token::OR);
3454 JumpTarget is_false;
3455 ControlDestination dest(destination()->true_target(), &is_false, false);
3456 LoadCondition(node->left(), &dest, false);
3458 if (dest.true_was_fall_through()) {
3459 // The current true target was used as the fall-through. If
3460 // there are no dangling jumps to is_false then the left
3461 // subexpression was unconditionally true. Otherwise we have
3462 // paths where we do have to evaluate the right subexpression.
3463 if (is_false.is_linked()) {
3464 // We need to compile the right subexpression. If the jump to
3465 // the current true target was a forward jump then we have a
3466 // valid frame, we have just bound the true target, and we
3467 // have to jump around the code for the right subexpression.
3468 if (has_valid_frame()) {
3469 destination()->true_target()->Unuse();
3470 destination()->true_target()->Jump();
3473 // The left subexpression compiled to control flow, so the
3474 // right one is free to do so as well.
3475 LoadCondition(node->right(), destination(), false);
3477 // We have just jumped to or bound the current true target but
3478 // the current control destination is not marked as used.
3479 destination()->Use(true);
3482 } else if (dest.is_used()) {
3483 // The left subexpression compiled to control flow (and is_false
3484 // was just bound), so the right is free to do so as well.
3485 LoadCondition(node->right(), destination(), false);
3488 // We have a materialized value on the frame, so we exit with
3489 // one on all paths. There are possibly also jumps to is_false
3490 // from nested subexpressions.
3491 JumpTarget pop_and_continue;
3494 // Avoid popping the result if it converts to 'true' using the
3495 // standard ToBoolean() conversion as described in ECMA-262,
3496 // section 9.2, page 30.
3498 // Duplicate the TOS value. The duplicate will be popped by
3501 ControlDestination dest(&exit, &pop_and_continue, false);
3504 // Pop the result of evaluating the first part.
3507 // Compile right side expression.
3509 Load(node->right());
3511 // Exit (always with a materialized value).
3517 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3518 Comment cmnt(masm_, "[ BinaryOperation");
3520 if (node->op() == Token::AND || node->op() == Token::OR) {
3521 GenerateLogicalBooleanOperation(node);
3523 // NOTE: The code below assumes that the slow cases (calls to runtime)
3524 // never return a constant/immutable object.
3525 OverwriteMode overwrite_mode = NO_OVERWRITE;
3526 if (node->left()->AsBinaryOperation() != NULL &&
3527 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3528 overwrite_mode = OVERWRITE_LEFT;
3529 } else if (node->right()->AsBinaryOperation() != NULL &&
3530 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3531 overwrite_mode = OVERWRITE_RIGHT;
3535 Load(node->right());
3536 GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3542 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3543 Comment cmnt(masm_, "[ CompareOperation");
3545 // Get the expressions from the node.
3546 Expression* left = node->left();
3547 Expression* right = node->right();
3548 Token::Value op = node->op();
3549 // To make typeof testing for natives implemented in JavaScript really
3550 // efficient, we generate special code for expressions of the form:
3551 // 'typeof <expression> == <string>'.
3552 UnaryOperation* operation = left->AsUnaryOperation();
3553 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3554 (operation != NULL && operation->op() == Token::TYPEOF) &&
3555 (right->AsLiteral() != NULL &&
3556 right->AsLiteral()->handle()->IsString())) {
3557 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3559 // Load the operand and move it to a register.
3560 LoadTypeofExpression(operation->expression());
3561 Result answer = frame_->Pop();
3562 answer.ToRegister();
3564 if (check->Equals(Heap::number_symbol())) {
3565 Condition is_smi = masm_->CheckSmi(answer.reg());
3566 destination()->true_target()->Branch(is_smi);
3567 frame_->Spill(answer.reg());
3568 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3569 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3571 destination()->Split(equal);
3573 } else if (check->Equals(Heap::string_symbol())) {
3574 Condition is_smi = masm_->CheckSmi(answer.reg());
3575 destination()->false_target()->Branch(is_smi);
3577 // It can be an undetectable string object.
3578 __ movq(kScratchRegister,
3579 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3580 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3581 Immediate(1 << Map::kIsUndetectable));
3582 destination()->false_target()->Branch(not_zero);
3583 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3585 destination()->Split(below); // Unsigned byte comparison needed.
3587 } else if (check->Equals(Heap::boolean_symbol())) {
3588 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3589 destination()->true_target()->Branch(equal);
3590 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3592 destination()->Split(equal);
3594 } else if (check->Equals(Heap::undefined_symbol())) {
3595 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3596 destination()->true_target()->Branch(equal);
3598 Condition is_smi = masm_->CheckSmi(answer.reg());
3599 destination()->false_target()->Branch(is_smi);
3601 // It can be an undetectable object.
3602 __ movq(kScratchRegister,
3603 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3604 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3605 Immediate(1 << Map::kIsUndetectable));
3607 destination()->Split(not_zero);
3609 } else if (check->Equals(Heap::function_symbol())) {
3610 Condition is_smi = masm_->CheckSmi(answer.reg());
3611 destination()->false_target()->Branch(is_smi);
3612 frame_->Spill(answer.reg());
3613 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3614 destination()->true_target()->Branch(equal);
3615 // Regular expressions are callable so typeof == 'function'.
3616 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
3618 destination()->Split(equal);
3620 } else if (check->Equals(Heap::object_symbol())) {
3621 Condition is_smi = masm_->CheckSmi(answer.reg());
3622 destination()->false_target()->Branch(is_smi);
3623 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3624 destination()->true_target()->Branch(equal);
3626 // Regular expressions are typeof == 'function', not 'object'.
3627 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
3628 destination()->false_target()->Branch(equal);
3630 // It can be an undetectable object.
3631 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3632 Immediate(1 << Map::kIsUndetectable));
3633 destination()->false_target()->Branch(not_zero);
3634 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3635 destination()->false_target()->Branch(below);
3636 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3638 destination()->Split(below_equal);
3640 // Uncommon case: typeof testing against a string literal that is
3641 // never returned from the typeof operator.
3643 destination()->Goto(false);
3648 Condition cc = no_condition;
3649 bool strict = false;
3651 case Token::EQ_STRICT:
3672 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3673 frame_->Push(&answer); // push the result
3676 case Token::INSTANCEOF: {
3679 InstanceofStub stub;
3680 Result answer = frame_->CallStub(&stub, 2);
3681 answer.ToRegister();
3682 __ testq(answer.reg(), answer.reg());
3684 destination()->Split(zero);
3692 Comparison(node, cc, strict, destination());
3696 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3697 frame_->PushFunction();
3701 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
3702 ASSERT(args->length() == 1);
3704 // ArgumentsAccessStub expects the key in rdx and the formal
3705 // parameter count in rax.
3707 Result key = frame_->Pop();
3708 // Explicitly create a constant result.
3709 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
3710 // Call the shared stub to get to arguments[key].
3711 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3712 Result result = frame_->CallStub(&stub, &key, &count);
3713 frame_->Push(&result);
3717 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3718 ASSERT(args->length() == 1);
3720 Result value = frame_->Pop();
3722 ASSERT(value.is_valid());
3723 Condition is_smi = masm_->CheckSmi(value.reg());
3724 destination()->false_target()->Branch(is_smi);
3725 // It is a heap object - get map.
3726 // Check if the object is a JS array or not.
3727 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3729 destination()->Split(equal);
3733 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
3734 ASSERT(args->length() == 1);
3736 Result value = frame_->Pop();
3738 ASSERT(value.is_valid());
3739 Condition is_smi = masm_->CheckSmi(value.reg());
3740 destination()->false_target()->Branch(is_smi);
3741 // It is a heap object - get map.
3742 // Check if the object is a regexp.
3743 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
3745 destination()->Split(equal);
3749 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3750 // This generates a fast version of:
3751 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3752 ASSERT(args->length() == 1);
3754 Result obj = frame_->Pop();
3756 Condition is_smi = masm_->CheckSmi(obj.reg());
3757 destination()->false_target()->Branch(is_smi);
3759 __ Move(kScratchRegister, Factory::null_value());
3760 __ cmpq(obj.reg(), kScratchRegister);
3761 destination()->true_target()->Branch(equal);
3763 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3764 // Undetectable objects behave like undefined when tested with typeof.
3765 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3766 Immediate(1 << Map::kIsUndetectable));
3767 destination()->false_target()->Branch(not_zero);
3768 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3769 destination()->false_target()->Branch(less);
3770 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3772 destination()->Split(less_equal);
3776 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3777 // This generates a fast version of:
3778 // (%_ClassOf(arg) === 'Function')
3779 ASSERT(args->length() == 1);
3781 Result obj = frame_->Pop();
3783 Condition is_smi = masm_->CheckSmi(obj.reg());
3784 destination()->false_target()->Branch(is_smi);
3785 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
3787 destination()->Split(equal);
3791 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3792 ASSERT(args->length() == 1);
3794 Result obj = frame_->Pop();
3796 Condition is_smi = masm_->CheckSmi(obj.reg());
3797 destination()->false_target()->Branch(is_smi);
3798 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3799 __ movzxbl(kScratchRegister,
3800 FieldOperand(kScratchRegister, Map::kBitFieldOffset));
3801 __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
3803 destination()->Split(not_zero);
3807 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3808 ASSERT(args->length() == 0);
3810 // Get the frame pointer for the calling frame.
3811 Result fp = allocator()->Allocate();
3812 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3814 // Skip the arguments adaptor frame if it exists.
3815 Label check_frame_marker;
3816 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3817 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3818 __ j(not_equal, &check_frame_marker);
3819 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3821 // Check the marker in the calling frame.
3822 __ bind(&check_frame_marker);
3823 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3824 Smi::FromInt(StackFrame::CONSTRUCT));
3826 destination()->Split(equal);
3830 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3831 ASSERT(args->length() == 0);
3833 Result fp = allocator_->Allocate();
3834 Result result = allocator_->Allocate();
3835 ASSERT(fp.is_valid() && result.is_valid());
3839 // Get the number of formal parameters.
3840 __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
3842 // Check if the calling frame is an arguments adaptor frame.
3843 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3844 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3845 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3846 __ j(not_equal, &exit);
3848 // Arguments adaptor case: Read the arguments length from the
3850 __ movq(result.reg(),
3851 Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
3854 result.set_type_info(TypeInfo::Smi());
3855 if (FLAG_debug_code) {
3856 __ AbortIfNotSmi(result.reg(), "Computed arguments.length is not a smi.");
3858 frame_->Push(&result);
3862 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3863 Comment(masm_, "[ GenerateFastCharCodeAt");
3864 ASSERT(args->length() == 2);
3868 Label not_a_flat_string;
3869 Label try_again_with_new_string;
3871 Label got_char_code;
3875 Result index = frame_->Pop();
3876 Result object = frame_->Pop();
3878 // Get register rcx to use as shift amount later.
3879 Result shift_amount;
3880 if (object.is_register() && object.reg().is(rcx)) {
3881 Result fresh = allocator_->Allocate();
3882 shift_amount = object;
3884 __ movq(object.reg(), rcx);
3886 if (index.is_register() && index.reg().is(rcx)) {
3887 Result fresh = allocator_->Allocate();
3888 shift_amount = index;
3890 __ movq(index.reg(), rcx);
3892 // There could be references to ecx in the frame. Allocating will
3893 // spill them, otherwise spill explicitly.
3894 if (shift_amount.is_valid()) {
3897 shift_amount = allocator()->Allocate(rcx);
3899 ASSERT(shift_amount.is_register());
3900 ASSERT(shift_amount.reg().is(rcx));
3901 ASSERT(allocator_->count(rcx) == 1);
3903 // We will mutate the index register and possibly the object register.
3904 // The case where they are somehow the same register is handled
3905 // because we only mutate them in the case where the receiver is a
3906 // heap object and the index is not.
3907 object.ToRegister();
3909 frame_->Spill(object.reg());
3910 frame_->Spill(index.reg());
3912 // We need a single extra temporary register.
3913 Result temp = allocator()->Allocate();
3914 ASSERT(temp.is_valid());
3916 // There is no virtual frame effect from here up to the final result
3919 // If the receiver is a smi trigger the slow case.
3920 __ JumpIfSmi(object.reg(), &slow_case);
3922 // If the index is negative or non-smi trigger the slow case.
3923 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3926 __ SmiToInteger32(index.reg(), index.reg());
3928 __ bind(&try_again_with_new_string);
3929 // Fetch the instance type of the receiver into rcx.
3930 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3931 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3932 // If the receiver is not a string trigger the slow case.
3933 __ testb(rcx, Immediate(kIsNotStringMask));
3934 __ j(not_zero, &slow_case);
3936 // Check for index out of range.
3937 __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
3938 __ j(greater_equal, &slow_case);
3939 // Reload the instance type (into the temp register this time)..
3940 __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3941 __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3943 // We need special handling for non-flat strings.
3944 ASSERT_EQ(0, kSeqStringTag);
3945 __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3946 __ j(not_zero, ¬_a_flat_string);
3947 // Check for 1-byte or 2-byte string.
3948 ASSERT_EQ(0, kTwoByteStringTag);
3949 __ testb(temp.reg(), Immediate(kStringEncodingMask));
3950 __ j(not_zero, &ascii_string);
3953 // Load the 2-byte character code into the temp register.
3954 __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3957 SeqTwoByteString::kHeaderSize));
3958 __ jmp(&got_char_code);
3961 __ bind(&ascii_string);
3962 // Load the byte into the temp register.
3963 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3966 SeqAsciiString::kHeaderSize));
3967 __ bind(&got_char_code);
3968 __ Integer32ToSmi(temp.reg(), temp.reg());
3971 // Handle non-flat strings.
3972 __ bind(¬_a_flat_string);
3973 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3974 __ cmpb(temp.reg(), Immediate(kConsStringTag));
3975 __ j(not_equal, &slow_case);
3978 // Check that the right hand side is the empty string (ie if this is really a
3979 // flat string in a cons string). If that is not the case we would rather go
3980 // to the runtime system now, to flatten the string.
3981 __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
3982 __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
3983 __ j(not_equal, &slow_case);
3984 // Get the first of the two strings.
3985 __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3986 __ jmp(&try_again_with_new_string);
3988 __ bind(&slow_case);
3989 // Move the undefined value into the result register, which will
3990 // trigger the slow case.
3991 __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3994 frame_->Push(&temp);
3998 void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
3999 Comment(masm_, "[ GenerateCharFromCode");
4000 ASSERT(args->length() == 1);
4003 Result code = frame_->Pop();
4005 ASSERT(code.is_valid());
4007 Result temp = allocator()->Allocate();
4008 ASSERT(temp.is_valid());
4010 JumpTarget slow_case;
4013 // Fast case of Heap::LookupSingleCharacterStringFromCode.
4014 Condition is_smi = __ CheckSmi(code.reg());
4015 slow_case.Branch(NegateCondition(is_smi), &code, not_taken);
4017 __ SmiToInteger32(kScratchRegister, code.reg());
4018 __ cmpl(kScratchRegister, Immediate(String::kMaxAsciiCharCode));
4019 slow_case.Branch(above, &code, not_taken);
4021 __ Move(temp.reg(), Factory::single_character_string_cache());
4022 __ movq(temp.reg(), FieldOperand(temp.reg(),
4023 kScratchRegister, times_pointer_size,
4024 FixedArray::kHeaderSize));
4025 __ CompareRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
4026 slow_case.Branch(equal, &code, not_taken);
4029 frame_->Push(&temp);
4032 slow_case.Bind(&code);
4033 frame_->Push(&code);
4034 Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
4035 frame_->Push(&result);
4041 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4042 ASSERT(args->length() == 1);
4044 Result value = frame_->Pop();
4046 ASSERT(value.is_valid());
4047 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
4049 destination()->Split(positive_smi);
4053 // Generates the Math.pow method - currently just calls runtime.
4054 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4055 ASSERT(args->length() == 2);
4058 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
4063 // Generates the Math.sqrt method - currently just calls runtime.
4064 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4065 ASSERT(args->length() == 1);
4067 Result res = frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4072 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4073 ASSERT(args->length() == 1);
4075 Result value = frame_->Pop();
4077 ASSERT(value.is_valid());
4078 Condition is_smi = masm_->CheckSmi(value.reg());
4080 destination()->Split(is_smi);
4084 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4085 // Conditionally generate a log call.
4087 // 0 (literal string): The type of logging (corresponds to the flags).
4088 // This is used to determine whether or not to generate the log call.
4089 // 1 (string): Format string. Access the string at argument index 2
4090 // with '%2s' (see Logger::LogRuntime for all the formats).
4091 // 2 (array): Arguments to the format string.
4092 ASSERT_EQ(args->length(), 3);
4093 #ifdef ENABLE_LOGGING_AND_PROFILING
4094 if (ShouldGenerateLog(args->at(0))) {
4097 frame_->CallRuntime(Runtime::kLog, 2);
4100 // Finally, we're expected to leave a value on the top of the stack.
4101 frame_->Push(Factory::undefined_value());
4105 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
4106 ASSERT(args->length() == 2);
4108 // Load the two objects into registers and perform the comparison.
4111 Result right = frame_->Pop();
4112 Result left = frame_->Pop();
4115 __ cmpq(right.reg(), left.reg());
4118 destination()->Split(equal);
4122 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
4123 ASSERT(args->length() == 0);
4124 // RBP value is aligned, so it should be tagged as a smi (without necesarily
4125 // being padded as a smi, so it should not be treated as a smi.).
4126 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4127 Result rbp_as_smi = allocator_->Allocate();
4128 ASSERT(rbp_as_smi.is_valid());
4129 __ movq(rbp_as_smi.reg(), rbp);
4130 frame_->Push(&rbp_as_smi);
4134 void CodeGenerator::GenerateRandomHeapNumber(
4135 ZoneList<Expression*>* args) {
4136 ASSERT(args->length() == 0);
4139 Label slow_allocate_heapnumber;
4140 Label heapnumber_allocated;
4141 __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
4142 __ jmp(&heapnumber_allocated);
4144 __ bind(&slow_allocate_heapnumber);
4145 // To allocate a heap number, and ensure that it is not a smi, we
4146 // call the runtime function FUnaryMinus on 0, returning the double
4147 // -0.0. A new, distinct heap number is returned each time.
4148 __ Push(Smi::FromInt(0));
4149 __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4152 __ bind(&heapnumber_allocated);
4154 // Return a random uint32 number in rax.
4155 // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
4156 __ PrepareCallCFunction(0);
4157 __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4159 // Convert 32 random bits in eax to 0.(32 random bits) in a double
4161 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4162 __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
4165 __ cvtss2sd(xmm1, xmm1);
4166 __ xorpd(xmm0, xmm1);
4167 __ subsd(xmm0, xmm1);
4168 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
4171 Result result = allocator_->Allocate(rax);
4172 frame_->Push(&result);
4176 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4177 ASSERT_EQ(args->length(), 4);
4179 // Load the arguments on the stack and call the runtime system.
4184 RegExpExecStub stub;
4185 Result result = frame_->CallStub(&stub, 4);
4186 frame_->Push(&result);
4190 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4191 // No stub. This code only occurs a few times in regexp.js.
4192 const int kMaxInlineLength = 100;
4193 ASSERT_EQ(3, args->length());
4194 Load(args->at(0)); // Size of array, smi.
4195 Load(args->at(1)); // "index" property value.
4196 Load(args->at(2)); // "input" property value.
4198 VirtualFrame::SpilledScope spilled_scope;
4202 __ movq(r8, Operand(rsp, kPointerSize * 2));
4203 __ JumpIfNotSmi(r8, &slowcase);
4204 __ SmiToInteger32(rbx, r8);
4205 __ cmpl(rbx, Immediate(kMaxInlineLength));
4206 __ j(above, &slowcase);
4207 // Smi-tagging is equivalent to multiplying by 2.
4208 STATIC_ASSERT(kSmiTag == 0);
4209 STATIC_ASSERT(kSmiTagSize == 1);
4210 // Allocate RegExpResult followed by FixedArray with size in ebx.
4211 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4212 // Elements: [Map][Length][..elements..]
4213 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4215 rbx, // In: Number of elements.
4216 rax, // Out: Start of allocation (tagged).
4217 rcx, // Out: End of allocation.
4218 rdx, // Scratch register
4221 // rax: Start of allocated area, object-tagged.
4222 // rbx: Number of array elements as int32.
4223 // r8: Number of array elements as smi.
4225 // Set JSArray map to global.regexp_result_map().
4226 __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
4227 __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
4228 __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
4229 __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
4231 // Set empty properties FixedArray.
4232 __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
4233 Factory::empty_fixed_array());
4235 // Set elements to point to FixedArray allocated right after the JSArray.
4236 __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
4237 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
4239 // Set input, index and length fields from arguments.
4240 __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
4241 __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
4242 __ lea(rsp, Operand(rsp, kPointerSize));
4243 __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
4245 // Fill out the elements FixedArray.
4248 // rbx: Number of elements in array as int32.
4251 __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
4252 Factory::fixed_array_map());
4254 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
4255 // Fill contents of fixed-array with the-hole.
4256 __ Move(rdx, Factory::the_hole_value());
4257 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
4258 // Fill fixed array elements with hole.
4260 // rbx: Number of elements in array that remains to be filled, as int32.
4261 // rcx: Start of elements in FixedArray.
4266 __ j(less_equal, &done); // Jump if ecx is negative or zero.
4267 __ subl(rbx, Immediate(1));
4268 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
4272 __ CallRuntime(Runtime::kRegExpConstructResult, 3);
4281 class DeferredSearchCache: public DeferredCode {
4283 DeferredSearchCache(Register dst, Register cache, Register key)
4284 : dst_(dst), cache_(cache), key_(key) {
4285 set_comment("[ DeferredSearchCache");
4288 virtual void Generate();
4291 Register dst_, cache_, key_;
4295 void DeferredSearchCache::Generate() {
4298 __ CallRuntime(Runtime::kGetFromCache, 2);
4299 if (!dst_.is(rax)) {
4305 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
4306 ASSERT_EQ(2, args->length());
4308 ASSERT_NE(NULL, args->at(0)->AsLiteral());
4309 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
4311 Handle<FixedArray> jsfunction_result_caches(
4312 Top::global_context()->jsfunction_result_caches());
4313 if (jsfunction_result_caches->length() <= cache_id) {
4314 __ Abort("Attempt to use undefined cache.");
4315 frame_->Push(Factory::undefined_value());
4320 Result key = frame_->Pop();
4323 Result cache = allocator()->Allocate();
4324 ASSERT(cache.is_valid());
4325 __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
4326 __ movq(cache.reg(),
4327 FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
4328 __ movq(cache.reg(),
4329 ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
4330 __ movq(cache.reg(),
4331 FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
4333 Result tmp = allocator()->Allocate();
4334 ASSERT(tmp.is_valid());
4336 DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
4340 const int kFingerOffset =
4341 FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
4342 // tmp.reg() now holds finger offset as a smi.
4343 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4344 __ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
4346 masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
4347 __ cmpq(key.reg(), FieldOperand(cache.reg(),
4350 FixedArray::kHeaderSize));
4351 deferred->Branch(not_equal);
4353 __ movq(tmp.reg(), FieldOperand(cache.reg(),
4356 kPointerSize + FixedArray::kHeaderSize));
4358 deferred->BindExit();
4363 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
4364 ASSERT_EQ(args->length(), 1);
4366 // Load the argument on the stack and jump to the runtime.
4369 NumberToStringStub stub;
4370 Result result = frame_->CallStub(&stub, 1);
4371 frame_->Push(&result);
4375 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
4376 Comment cmnt(masm_, "[ GenerateCallFunction");
4378 ASSERT(args->length() >= 2);
4380 int n_args = args->length() - 2; // for receiver and function.
4381 Load(args->at(0)); // receiver
4382 for (int i = 0; i < n_args; i++) {
4383 Load(args->at(i + 1));
4385 Load(args->at(n_args + 1)); // function
4386 Result result = frame_->CallJSFunction(n_args);
4387 frame_->Push(&result);
4391 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
4392 ASSERT_EQ(args->length(), 1);
4393 // Load the argument on the stack and jump to the runtime.
4395 Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
4396 frame_->Push(&answer);
4400 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
4401 ASSERT_EQ(args->length(), 1);
4402 // Load the argument on the stack and jump to the runtime.
4404 Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
4405 frame_->Push(&answer);
4409 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4410 ASSERT_EQ(2, args->length());
4415 StringAddStub stub(NO_STRING_ADD_FLAGS);
4416 Result answer = frame_->CallStub(&stub, 2);
4417 frame_->Push(&answer);
4421 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4422 ASSERT_EQ(3, args->length());
4429 Result answer = frame_->CallStub(&stub, 3);
4430 frame_->Push(&answer);
4434 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4435 ASSERT_EQ(2, args->length());
4440 StringCompareStub stub;
4441 Result answer = frame_->CallStub(&stub, 2);
4442 frame_->Push(&answer);
4446 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4447 ASSERT(args->length() == 1);
4448 JumpTarget leave, null, function, non_function_constructor;
4449 Load(args->at(0)); // Load the object.
4450 Result obj = frame_->Pop();
4452 frame_->Spill(obj.reg());
4454 // If the object is a smi, we return null.
4455 Condition is_smi = masm_->CheckSmi(obj.reg());
4456 null.Branch(is_smi);
4458 // Check that the object is a JS object but take special care of JS
4459 // functions to make sure they have 'Function' as their class.
4461 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4464 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4465 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4466 // LAST_JS_OBJECT_TYPE.
4467 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4468 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4469 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4470 function.Branch(equal);
4472 // Check if the constructor in the map is a function.
4473 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4474 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4475 non_function_constructor.Branch(not_equal);
4477 // The obj register now contains the constructor function. Grab the
4478 // instance class name from there.
4480 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4482 FieldOperand(obj.reg(),
4483 SharedFunctionInfo::kInstanceClassNameOffset));
4487 // Functions have class 'Function'.
4489 frame_->Push(Factory::function_class_symbol());
4492 // Objects with a non-function constructor have class 'Object'.
4493 non_function_constructor.Bind();
4494 frame_->Push(Factory::Object_symbol());
4497 // Non-JS objects have class null.
4499 frame_->Push(Factory::null_value());
4506 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4507 ASSERT(args->length() == 2);
4509 Load(args->at(0)); // Load the object.
4510 Load(args->at(1)); // Load the value.
4511 Result value = frame_->Pop();
4512 Result object = frame_->Pop();
4514 object.ToRegister();
4516 // if (object->IsSmi()) return value.
4517 Condition is_smi = masm_->CheckSmi(object.reg());
4518 leave.Branch(is_smi, &value);
4520 // It is a heap object - get its map.
4521 Result scratch = allocator_->Allocate();
4522 ASSERT(scratch.is_valid());
4523 // if (!object->IsJSValue()) return value.
4524 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4525 leave.Branch(not_equal, &value);
4528 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4529 // Update the write barrier. Save the value as it will be
4530 // overwritten by the write barrier code and is needed afterward.
4531 Result duplicate_value = allocator_->Allocate();
4532 ASSERT(duplicate_value.is_valid());
4533 __ movq(duplicate_value.reg(), value.reg());
4534 // The object register is also overwritten by the write barrier and
4535 // possibly aliased in the frame.
4536 frame_->Spill(object.reg());
4537 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4541 duplicate_value.Unuse();
4545 frame_->Push(&value);
4549 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4550 ASSERT(args->length() == 1);
4552 Load(args->at(0)); // Load the object.
4554 Result object = frame_->Pop();
4555 object.ToRegister();
4556 ASSERT(object.is_valid());
4557 // if (object->IsSmi()) return object.
4558 Condition is_smi = masm_->CheckSmi(object.reg());
4559 leave.Branch(is_smi);
4560 // It is a heap object - get map.
4561 Result temp = allocator()->Allocate();
4562 ASSERT(temp.is_valid());
4563 // if (!object->IsJSValue()) return object.
4564 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4565 leave.Branch(not_equal);
4566 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4568 frame_->SetElementAt(0, &temp);
4573 // -----------------------------------------------------------------------------
4574 // CodeGenerator implementation of Expressions
4576 void CodeGenerator::LoadAndSpill(Expression* expression) {
4577 // TODO(x64): No architecture specific code. Move to shared location.
4578 ASSERT(in_spilled_code());
4579 set_in_spilled_code(false);
4582 set_in_spilled_code(true);
4586 void CodeGenerator::Load(Expression* expr) {
4588 int original_height = frame_->height();
4590 ASSERT(!in_spilled_code());
4591 JumpTarget true_target;
4592 JumpTarget false_target;
4593 ControlDestination dest(&true_target, &false_target, true);
4594 LoadCondition(expr, &dest, false);
4596 if (dest.false_was_fall_through()) {
4597 // The false target was just bound.
4599 frame_->Push(Factory::false_value());
4600 // There may be dangling jumps to the true target.
4601 if (true_target.is_linked()) {
4604 frame_->Push(Factory::true_value());
4608 } else if (dest.is_used()) {
4609 // There is true, and possibly false, control flow (with true as
4610 // the fall through).
4612 frame_->Push(Factory::true_value());
4613 if (false_target.is_linked()) {
4615 false_target.Bind();
4616 frame_->Push(Factory::false_value());
4621 // We have a valid value on top of the frame, but we still may
4622 // have dangling jumps to the true and false targets from nested
4623 // subexpressions (eg, the left subexpressions of the
4624 // short-circuited boolean operators).
4625 ASSERT(has_valid_frame());
4626 if (true_target.is_linked() || false_target.is_linked()) {
4628 loaded.Jump(); // Don't lose the current TOS.
4629 if (true_target.is_linked()) {
4631 frame_->Push(Factory::true_value());
4632 if (false_target.is_linked()) {
4636 if (false_target.is_linked()) {
4637 false_target.Bind();
4638 frame_->Push(Factory::false_value());
4644 ASSERT(has_valid_frame());
4645 ASSERT(frame_->height() == original_height + 1);
4649 // Emit code to load the value of an expression to the top of the
4650 // frame. If the expression is boolean-valued it may be compiled (or
4651 // partially compiled) into control flow to the control destination.
4652 // If force_control is true, control flow is forced.
4653 void CodeGenerator::LoadCondition(Expression* x,
4654 ControlDestination* dest,
4655 bool force_control) {
4656 ASSERT(!in_spilled_code());
4657 int original_height = frame_->height();
4659 { CodeGenState new_state(this, dest);
4662 // If we hit a stack overflow, we may not have actually visited
4663 // the expression. In that case, we ensure that we have a
4664 // valid-looking frame state because we will continue to generate
4665 // code as we unwind the C++ stack.
4667 // It's possible to have both a stack overflow and a valid frame
4668 // state (eg, a subexpression overflowed, visiting it returned
4669 // with a dummied frame state, and visiting this expression
4670 // returned with a normal-looking state).
4671 if (HasStackOverflow() &&
4673 frame_->height() == original_height) {
4678 if (force_control && !dest->is_used()) {
4679 // Convert the TOS value into flow to the control destination.
4680 // TODO(X64): Make control flow to control destinations work.
4684 ASSERT(!(force_control && !dest->is_used()));
4685 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4689 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4690 // convert it to a boolean in the condition code register or jump to
4691 // 'false_target'/'true_target' as appropriate.
4692 void CodeGenerator::ToBoolean(ControlDestination* dest) {
4693 Comment cmnt(masm_, "[ ToBoolean");
4695 // The value to convert should be popped from the frame.
4696 Result value = frame_->Pop();
4699 if (value.is_number()) {
4700 Comment cmnt(masm_, "ONLY_NUMBER");
4701 // Fast case if TypeInfo indicates only numbers.
4702 if (FLAG_debug_code) {
4703 __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
4705 // Smi => false iff zero.
4706 __ SmiCompare(value.reg(), Smi::FromInt(0));
4707 dest->false_target()->Branch(equal);
4708 Condition is_smi = masm_->CheckSmi(value.reg());
4709 dest->true_target()->Branch(is_smi);
4711 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
4714 dest->Split(not_zero);
4716 // Fast case checks.
4717 // 'false' => false.
4718 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4719 dest->false_target()->Branch(equal);
4722 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4723 dest->true_target()->Branch(equal);
4725 // 'undefined' => false.
4726 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4727 dest->false_target()->Branch(equal);
4729 // Smi => false iff zero.
4730 __ SmiCompare(value.reg(), Smi::FromInt(0));
4731 dest->false_target()->Branch(equal);
4732 Condition is_smi = masm_->CheckSmi(value.reg());
4733 dest->true_target()->Branch(is_smi);
4735 // Call the stub for all other cases.
4736 frame_->Push(&value); // Undo the Pop() from above.
4738 Result temp = frame_->CallStub(&stub, 1);
4739 // Convert the result to a condition code.
4740 __ testq(temp.reg(), temp.reg());
4742 dest->Split(not_equal);
4747 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4749 // TODO(X64): Implement security policy for loads of smis.
4753 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4757 //------------------------------------------------------------------------------
4758 // CodeGenerator implementation of variables, lookups, and stores.
4760 Reference::Reference(CodeGenerator* cgen,
4761 Expression* expression,
4762 bool persist_after_get)
4764 expression_(expression),
4766 persist_after_get_(persist_after_get) {
4767 cgen->LoadReference(this);
4771 Reference::~Reference() {
4772 ASSERT(is_unloaded() || is_illegal());
4776 void CodeGenerator::LoadReference(Reference* ref) {
4777 // References are loaded from both spilled and unspilled code. Set the
4778 // state to unspilled to allow that (and explicitly spill after
4779 // construction at the construction sites).
4780 bool was_in_spilled_code = in_spilled_code_;
4781 in_spilled_code_ = false;
4783 Comment cmnt(masm_, "[ LoadReference");
4784 Expression* e = ref->expression();
4785 Property* property = e->AsProperty();
4786 Variable* var = e->AsVariableProxy()->AsVariable();
4788 if (property != NULL) {
4789 // The expression is either a property or a variable proxy that rewrites
4791 Load(property->obj());
4792 if (property->key()->IsPropertyName()) {
4793 ref->set_type(Reference::NAMED);
4795 Load(property->key());
4796 ref->set_type(Reference::KEYED);
4798 } else if (var != NULL) {
4799 // The expression is a variable proxy that does not rewrite to a
4800 // property. Global variables are treated as named property references.
4801 if (var->is_global()) {
4803 ref->set_type(Reference::NAMED);
4805 ASSERT(var->slot() != NULL);
4806 ref->set_type(Reference::SLOT);
4809 // Anything else is a runtime error.
4811 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4814 in_spilled_code_ = was_in_spilled_code;
4818 void CodeGenerator::UnloadReference(Reference* ref) {
4819 // Pop a reference from the stack while preserving TOS.
4820 Comment cmnt(masm_, "[ UnloadReference");
4821 frame_->Nip(ref->size());
4822 ref->set_unloaded();
4826 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4827 // Currently, this assertion will fail if we try to assign to
4828 // a constant variable that is constant because it is read-only
4829 // (such as the variable referring to a named function expression).
4830 // We need to implement assignments to read-only variables.
4831 // Ideally, we should do this during AST generation (by converting
4832 // such assignments into expression statements); however, in general
4833 // we may not be able to make the decision until past AST generation,
4834 // that is when the entire program is known.
4835 ASSERT(slot != NULL);
4836 int index = slot->index();
4837 switch (slot->type()) {
4838 case Slot::PARAMETER:
4839 return frame_->ParameterAt(index);
4842 return frame_->LocalAt(index);
4844 case Slot::CONTEXT: {
4845 // Follow the context chain if necessary.
4846 ASSERT(!tmp.is(rsi)); // do not overwrite context register
4847 Register context = rsi;
4848 int chain_length = scope()->ContextChainLength(slot->var()->scope());
4849 for (int i = 0; i < chain_length; i++) {
4850 // Load the closure.
4851 // (All contexts, even 'with' contexts, have a closure,
4852 // and it is the same for all contexts inside a function.
4853 // There is no need to go to the function context first.)
4854 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4855 // Load the function context (which is the incoming, outer context).
4856 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4859 // We may have a 'with' context now. Get the function context.
4860 // (In fact this mov may never be the needed, since the scope analysis
4861 // may not permit a direct context access in this case and thus we are
4862 // always at a function context. However it is safe to dereference be-
4863 // cause the function context of a function context is itself. Before
4864 // deleting this mov we should try to create a counter-example first,
4866 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4867 return ContextOperand(tmp, index);
4872 return Operand(rsp, 0);
4877 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4880 ASSERT(slot->type() == Slot::CONTEXT);
4881 ASSERT(tmp.is_register());
4882 Register context = rsi;
4884 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4885 if (s->num_heap_slots() > 0) {
4886 if (s->calls_eval()) {
4887 // Check that extension is NULL.
4888 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4890 slow->Branch(not_equal, not_taken);
4892 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4893 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4894 context = tmp.reg();
4897 // Check that last extension is NULL.
4898 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4899 slow->Branch(not_equal, not_taken);
4900 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4901 return ContextOperand(tmp.reg(), slot->index());
4905 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4906 if (slot->type() == Slot::LOOKUP) {
4907 ASSERT(slot->var()->is_dynamic());
4913 // Generate fast-case code for variables that might be shadowed by
4914 // eval-introduced variables. Eval is used a lot without
4915 // introducing variables. In those cases, we do not want to
4916 // perform a runtime call for all variables in the scope
4917 // containing the eval.
4918 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4919 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4920 // If there was no control flow to slow, we can exit early.
4921 if (!slow.is_linked()) {
4922 frame_->Push(&value);
4928 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4929 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4930 // Only generate the fast case for locals that rewrite to slots.
4931 // This rules out argument loads.
4932 if (potential_slot != NULL) {
4933 // Allocate a fresh register to use as a temp in
4934 // ContextSlotOperandCheckExtensions and to hold the result
4936 value = allocator_->Allocate();
4937 ASSERT(value.is_valid());
4938 __ movq(value.reg(),
4939 ContextSlotOperandCheckExtensions(potential_slot,
4942 if (potential_slot->var()->mode() == Variable::CONST) {
4943 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4944 done.Branch(not_equal, &value);
4945 __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4947 // There is always control flow to slow from
4948 // ContextSlotOperandCheckExtensions so we have to jump around
4955 // A runtime call is inevitable. We eagerly sync frame elements
4956 // to memory so that we can push the arguments directly into place
4957 // on top of the frame.
4958 frame_->SyncRange(0, frame_->element_count() - 1);
4959 frame_->EmitPush(rsi);
4960 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4961 frame_->EmitPush(kScratchRegister);
4962 if (typeof_state == INSIDE_TYPEOF) {
4964 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4966 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4970 frame_->Push(&value);
4972 } else if (slot->var()->mode() == Variable::CONST) {
4973 // Const slots may contain 'the hole' value (the constant hasn't been
4974 // initialized yet) which needs to be converted into the 'undefined'
4977 // We currently spill the virtual frame because constants use the
4978 // potentially unsafe direct-frame access of SlotOperand.
4979 VirtualFrame::SpilledScope spilled_scope;
4980 Comment cmnt(masm_, "[ Load const");
4982 __ movq(rcx, SlotOperand(slot, rcx));
4983 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4984 exit.Branch(not_equal);
4985 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4987 frame_->EmitPush(rcx);
4989 } else if (slot->type() == Slot::PARAMETER) {
4990 frame_->PushParameterAt(slot->index());
4992 } else if (slot->type() == Slot::LOCAL) {
4993 frame_->PushLocalAt(slot->index());
4996 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4999 // The use of SlotOperand below is safe for an unspilled frame
5000 // because it will always be a context slot.
5001 ASSERT(slot->type() == Slot::CONTEXT);
5002 Result temp = allocator_->Allocate();
5003 ASSERT(temp.is_valid());
5004 __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
5005 frame_->Push(&temp);
5010 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
5011 TypeofState state) {
5012 LoadFromSlot(slot, state);
5014 // Bail out quickly if we're not using lazy arguments allocation.
5015 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
5017 // ... or if the slot isn't a non-parameter arguments slot.
5018 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
5020 // Pop the loaded value from the stack.
5021 Result value = frame_->Pop();
5023 // If the loaded value is a constant, we know if the arguments
5024 // object has been lazily loaded yet.
5025 if (value.is_constant()) {
5026 if (value.handle()->IsTheHole()) {
5027 Result arguments = StoreArgumentsObject(false);
5028 frame_->Push(&arguments);
5030 frame_->Push(&value);
5035 // The loaded value is in a register. If it is the sentinel that
5036 // indicates that we haven't loaded the arguments object yet, we
5037 // need to do it now.
5039 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
5040 frame_->Push(&value);
5041 exit.Branch(not_equal);
5042 Result arguments = StoreArgumentsObject(false);
5043 frame_->SetElementAt(0, &arguments);
5048 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
5049 if (slot->type() == Slot::LOOKUP) {
5050 ASSERT(slot->var()->is_dynamic());
5052 // For now, just do a runtime call. Since the call is inevitable,
5053 // we eagerly sync the virtual frame so we can directly push the
5054 // arguments into place.
5055 frame_->SyncRange(0, frame_->element_count() - 1);
5057 frame_->EmitPush(rsi);
5058 frame_->EmitPush(slot->var()->name());
5061 if (init_state == CONST_INIT) {
5062 // Same as the case for a normal store, but ignores attribute
5063 // (e.g. READ_ONLY) of context slot so that we can initialize const
5064 // properties (introduced via eval("const foo = (some expr);")). Also,
5065 // uses the current function context instead of the top context.
5067 // Note that we must declare the foo upon entry of eval(), via a
5068 // context slot declaration, but we cannot initialize it at the same
5069 // time, because the const declaration may be at the end of the eval
5070 // code (sigh...) and the const variable may have been used before
5071 // (where its value is 'undefined'). Thus, we can only do the
5072 // initialization when we actually encounter the expression and when
5073 // the expression operands are defined and valid, and thus we need the
5074 // split into 2 operations: declaration of the context slot followed
5075 // by initialization.
5076 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
5078 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
5080 // Storing a variable must keep the (new) value on the expression
5081 // stack. This is necessary for compiling chained assignment
5083 frame_->Push(&value);
5085 ASSERT(!slot->var()->is_dynamic());
5088 if (init_state == CONST_INIT) {
5089 ASSERT(slot->var()->mode() == Variable::CONST);
5090 // Only the first const initialization must be executed (the slot
5091 // still contains 'the hole' value). When the assignment is executed,
5092 // the code is identical to a normal store (see below).
5094 // We spill the frame in the code below because the direct-frame
5095 // access of SlotOperand is potentially unsafe with an unspilled
5097 VirtualFrame::SpilledScope spilled_scope;
5098 Comment cmnt(masm_, "[ Init const");
5099 __ movq(rcx, SlotOperand(slot, rcx));
5100 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
5101 exit.Branch(not_equal);
5104 // We must execute the store. Storing a variable must keep the (new)
5105 // value on the stack. This is necessary for compiling assignment
5108 // Note: We will reach here even with slot->var()->mode() ==
5109 // Variable::CONST because of const declarations which will initialize
5110 // consts to 'the hole' value and by doing so, end up calling this code.
5111 if (slot->type() == Slot::PARAMETER) {
5112 frame_->StoreToParameterAt(slot->index());
5113 } else if (slot->type() == Slot::LOCAL) {
5114 frame_->StoreToLocalAt(slot->index());
5116 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
5118 // The use of SlotOperand below is safe for an unspilled frame
5119 // because the slot is a context slot.
5120 ASSERT(slot->type() == Slot::CONTEXT);
5122 Result value = frame_->Pop();
5124 Result start = allocator_->Allocate();
5125 ASSERT(start.is_valid());
5126 __ movq(SlotOperand(slot, start.reg()), value.reg());
5127 // RecordWrite may destroy the value registers.
5129 // TODO(204): Avoid actually spilling when the value is not
5130 // needed (probably the common case).
5131 frame_->Spill(value.reg());
5132 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
5133 Result temp = allocator_->Allocate();
5134 ASSERT(temp.is_valid());
5135 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
5136 // The results start, value, and temp are unused by going out of
5145 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
5147 TypeofState typeof_state,
5149 // Check that no extension objects have been created by calls to
5150 // eval from the current scope to the global scope.
5151 Register context = rsi;
5152 Result tmp = allocator_->Allocate();
5153 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
5157 if (s->num_heap_slots() > 0) {
5158 if (s->calls_eval()) {
5159 // Check that extension is NULL.
5160 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
5162 slow->Branch(not_equal, not_taken);
5164 // Load next context in chain.
5165 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
5166 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
5167 context = tmp.reg();
5169 // If no outer scope calls eval, we do not need to check more
5170 // context extensions. If we have reached an eval scope, we check
5171 // all extensions from this point.
5172 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
5173 s = s->outer_scope();
5176 if (s->is_eval_scope()) {
5177 // Loop up the context chain. There is no frame effect so it is
5178 // safe to use raw labels here.
5180 if (!context.is(tmp.reg())) {
5181 __ movq(tmp.reg(), context);
5183 // Load map for comparison into register, outside loop.
5184 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
5186 // Terminate at global context.
5187 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
5189 // Check that extension is NULL.
5190 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
5191 slow->Branch(not_equal);
5192 // Load next context in chain.
5193 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
5194 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
5200 // All extension objects were empty and it is safe to use a global
5203 frame_->Push(slot->var()->name());
5204 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
5205 ? RelocInfo::CODE_TARGET
5206 : RelocInfo::CODE_TARGET_CONTEXT;
5207 Result answer = frame_->CallLoadIC(mode);
5208 // A test rax instruction following the call signals that the inobject
5209 // property case was inlined. Ensure that there is not a test rax
5210 // instruction here.
5212 // Discard the global object. The result is in answer.
5218 void CodeGenerator::LoadGlobal() {
5219 if (in_spilled_code()) {
5220 frame_->EmitPush(GlobalObject());
5222 Result temp = allocator_->Allocate();
5223 __ movq(temp.reg(), GlobalObject());
5224 frame_->Push(&temp);
5229 void CodeGenerator::LoadGlobalReceiver() {
5230 Result temp = allocator_->Allocate();
5231 Register reg = temp.reg();
5232 __ movq(reg, GlobalObject());
5233 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
5234 frame_->Push(&temp);
5238 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
5239 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
5240 ASSERT(scope()->arguments_shadow() != NULL);
5241 // We don't want to do lazy arguments allocation for functions that
5242 // have heap-allocated contexts, because it interfers with the
5243 // uninitialized const tracking in the context objects.
5244 return (scope()->num_heap_slots() > 0)
5245 ? EAGER_ARGUMENTS_ALLOCATION
5246 : LAZY_ARGUMENTS_ALLOCATION;
5250 Result CodeGenerator::StoreArgumentsObject(bool initial) {
5251 ArgumentsAllocationMode mode = ArgumentsMode();
5252 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
5254 Comment cmnt(masm_, "[ store arguments object");
5255 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
5256 // When using lazy arguments allocation, we store the hole value
5257 // as a sentinel indicating that the arguments object hasn't been
5259 frame_->Push(Factory::the_hole_value());
5261 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
5262 frame_->PushFunction();
5263 frame_->PushReceiverSlotAddress();
5264 frame_->Push(Smi::FromInt(scope()->num_parameters()));
5265 Result result = frame_->CallStub(&stub, 3);
5266 frame_->Push(&result);
5270 Variable* arguments = scope()->arguments()->var();
5271 Variable* shadow = scope()->arguments_shadow()->var();
5272 ASSERT(arguments != NULL && arguments->slot() != NULL);
5273 ASSERT(shadow != NULL && shadow->slot() != NULL);
5275 bool skip_arguments = false;
5276 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
5277 // We have to skip storing into the arguments slot if it has
5278 // already been written to. This can happen if the a function
5279 // has a local variable named 'arguments'.
5280 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
5281 Result probe = frame_->Pop();
5282 if (probe.is_constant()) {
5283 // We have to skip updating the arguments object if it has been
5284 // assigned a proper value.
5285 skip_arguments = !probe.handle()->IsTheHole();
5287 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
5289 done.Branch(not_equal);
5292 if (!skip_arguments) {
5293 StoreToSlot(arguments->slot(), NOT_CONST_INIT);
5294 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
5296 StoreToSlot(shadow->slot(), NOT_CONST_INIT);
5297 return frame_->Pop();
5301 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
5302 // Special handling of identifiers as subexpressions of typeof.
5303 Variable* variable = expr->AsVariableProxy()->AsVariable();
5304 if (variable != NULL && !variable->is_this() && variable->is_global()) {
5305 // For a global variable we build the property reference
5306 // <global>.<variable> and perform a (regular non-contextual) property
5307 // load to make sure we do not get reference errors.
5308 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
5309 Literal key(variable->name());
5310 Property property(&global, &key, RelocInfo::kNoPosition);
5311 Reference ref(this, &property);
5313 } else if (variable != NULL && variable->slot() != NULL) {
5314 // For a variable that rewrites to a slot, we signal it is the immediate
5315 // subexpression of a typeof.
5316 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
5318 // Anything else can be handled normally.
5324 static bool CouldBeNaN(const Result& result) {
5325 if (result.type_info().IsSmi()) return false;
5326 if (result.type_info().IsInteger32()) return false;
5327 if (!result.is_constant()) return true;
5328 if (!result.handle()->IsHeapNumber()) return false;
5329 return isnan(HeapNumber::cast(*result.handle())->value());
5333 // Convert from signed to unsigned comparison to match the way EFLAGS are set
5334 // by FPU and XMM compare instructions.
5335 static Condition DoubleCondition(Condition cc) {
5337 case less: return below;
5338 case equal: return equal;
5339 case less_equal: return below_equal;
5340 case greater: return above;
5341 case greater_equal: return above_equal;
5342 default: UNREACHABLE();
5349 void CodeGenerator::Comparison(AstNode* node,
5352 ControlDestination* dest) {
5353 // Strict only makes sense for equality comparisons.
5354 ASSERT(!strict || cc == equal);
5358 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
5359 if (cc == greater || cc == less_equal) {
5360 cc = ReverseCondition(cc);
5361 left_side = frame_->Pop();
5362 right_side = frame_->Pop();
5364 right_side = frame_->Pop();
5365 left_side = frame_->Pop();
5367 ASSERT(cc == less || cc == equal || cc == greater_equal);
5369 // If either side is a constant smi, optimize the comparison.
5370 bool left_side_constant_smi = false;
5371 bool left_side_constant_null = false;
5372 bool left_side_constant_1_char_string = false;
5373 if (left_side.is_constant()) {
5374 left_side_constant_smi = left_side.handle()->IsSmi();
5375 left_side_constant_null = left_side.handle()->IsNull();
5376 left_side_constant_1_char_string =
5377 (left_side.handle()->IsString() &&
5378 String::cast(*left_side.handle())->length() == 1 &&
5379 String::cast(*left_side.handle())->IsAsciiRepresentation());
5381 bool right_side_constant_smi = false;
5382 bool right_side_constant_null = false;
5383 bool right_side_constant_1_char_string = false;
5384 if (right_side.is_constant()) {
5385 right_side_constant_smi = right_side.handle()->IsSmi();
5386 right_side_constant_null = right_side.handle()->IsNull();
5387 right_side_constant_1_char_string =
5388 (right_side.handle()->IsString() &&
5389 String::cast(*right_side.handle())->length() == 1 &&
5390 String::cast(*right_side.handle())->IsAsciiRepresentation());
5393 if (left_side_constant_smi || right_side_constant_smi) {
5394 if (left_side_constant_smi && right_side_constant_smi) {
5395 // Trivial case, comparing two constants.
5396 int left_value = Smi::cast(*left_side.handle())->value();
5397 int right_value = Smi::cast(*right_side.handle())->value();
5400 dest->Goto(left_value < right_value);
5403 dest->Goto(left_value == right_value);
5406 dest->Goto(left_value >= right_value);
5412 // Only one side is a constant Smi.
5413 // If left side is a constant Smi, reverse the operands.
5414 // Since one side is a constant Smi, conversion order does not matter.
5415 if (left_side_constant_smi) {
5416 Result temp = left_side;
5417 left_side = right_side;
5419 cc = ReverseCondition(cc);
5420 // This may re-introduce greater or less_equal as the value of cc.
5421 // CompareStub and the inline code both support all values of cc.
5423 // Implement comparison against a constant Smi, inlining the case
5424 // where both sides are Smis.
5425 left_side.ToRegister();
5426 Register left_reg = left_side.reg();
5427 Handle<Object> right_val = right_side.handle();
5429 // Here we split control flow to the stub call and inlined cases
5430 // before finally splitting it to the control destination. We use
5431 // a jump target and branching to duplicate the virtual frame at
5432 // the first split. We manually handle the off-frame references
5433 // by reconstituting them on the non-fall-through path.
5436 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
5437 is_smi.Branch(left_is_smi);
5439 bool is_loop_condition = (node->AsExpression() != NULL) &&
5440 node->AsExpression()->is_loop_condition();
5441 if (!is_loop_condition && right_val->IsSmi()) {
5442 // Right side is a constant smi and left side has been checked
5444 JumpTarget not_number;
5445 __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
5446 Factory::heap_number_map());
5447 not_number.Branch(not_equal, &left_side);
5449 FieldOperand(left_reg, HeapNumber::kValueOffset));
5450 int value = Smi::cast(*right_val)->value();
5452 __ xorpd(xmm0, xmm0);
5454 Result temp = allocator()->Allocate();
5455 __ movl(temp.reg(), Immediate(value));
5456 __ cvtlsi2sd(xmm0, temp.reg());
5459 __ ucomisd(xmm1, xmm0);
5460 // Jump to builtin for NaN.
5461 not_number.Branch(parity_even, &left_side);
5463 dest->true_target()->Branch(DoubleCondition(cc));
5464 dest->false_target()->Jump();
5465 not_number.Bind(&left_side);
5468 // Setup and call the compare stub.
5469 CompareStub stub(cc, strict, kCantBothBeNaN);
5470 Result result = frame_->CallStub(&stub, &left_side, &right_side);
5471 result.ToRegister();
5472 __ testq(result.reg(), result.reg());
5474 dest->true_target()->Branch(cc);
5475 dest->false_target()->Jump();
5478 left_side = Result(left_reg);
5479 right_side = Result(right_val);
5480 // Test smi equality and comparison by signed int comparison.
5481 // Both sides are smis, so we can use an Immediate.
5482 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
5487 } else if (cc == equal &&
5488 (left_side_constant_null || right_side_constant_null)) {
5489 // To make null checks efficient, we check if either the left side or
5490 // the right side is the constant 'null'.
5491 // If so, we optimize the code by inlining a null check instead of
5492 // calling the (very) general runtime routine for checking equality.
5493 Result operand = left_side_constant_null ? right_side : left_side;
5496 operand.ToRegister();
5497 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
5502 // The 'null' value is only equal to 'undefined' if using non-strict
5504 dest->true_target()->Branch(equal);
5505 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
5506 dest->true_target()->Branch(equal);
5507 Condition is_smi = masm_->CheckSmi(operand.reg());
5508 dest->false_target()->Branch(is_smi);
5510 // It can be an undetectable object.
5511 // Use a scratch register in preference to spilling operand.reg().
5512 Result temp = allocator()->Allocate();
5513 ASSERT(temp.is_valid());
5515 FieldOperand(operand.reg(), HeapObject::kMapOffset));
5516 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
5517 Immediate(1 << Map::kIsUndetectable));
5520 dest->Split(not_zero);
5522 } else if (left_side_constant_1_char_string ||
5523 right_side_constant_1_char_string) {
5524 if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
5525 // Trivial case, comparing two constants.
5526 int left_value = String::cast(*left_side.handle())->Get(0);
5527 int right_value = String::cast(*right_side.handle())->Get(0);
5530 dest->Goto(left_value < right_value);
5533 dest->Goto(left_value == right_value);
5536 dest->Goto(left_value >= right_value);
5542 // Only one side is a constant 1 character string.
5543 // If left side is a constant 1-character string, reverse the operands.
5544 // Since one side is a constant string, conversion order does not matter.
5545 if (left_side_constant_1_char_string) {
5546 Result temp = left_side;
5547 left_side = right_side;
5549 cc = ReverseCondition(cc);
5550 // This may reintroduce greater or less_equal as the value of cc.
5551 // CompareStub and the inline code both support all values of cc.
5553 // Implement comparison against a constant string, inlining the case
5554 // where both sides are strings.
5555 left_side.ToRegister();
5557 // Here we split control flow to the stub call and inlined cases
5558 // before finally splitting it to the control destination. We use
5559 // a jump target and branching to duplicate the virtual frame at
5560 // the first split. We manually handle the off-frame references
5561 // by reconstituting them on the non-fall-through path.
5562 JumpTarget is_not_string, is_string;
5563 Register left_reg = left_side.reg();
5564 Handle<Object> right_val = right_side.handle();
5565 ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
5566 Condition is_smi = masm()->CheckSmi(left_reg);
5567 is_not_string.Branch(is_smi, &left_side);
5568 Result temp = allocator_->Allocate();
5569 ASSERT(temp.is_valid());
5571 FieldOperand(left_reg, HeapObject::kMapOffset));
5572 __ movzxbl(temp.reg(),
5573 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
5574 // If we are testing for equality then make use of the symbol shortcut.
5575 // Check if the left hand side has the same type as the right hand
5576 // side (which is always a symbol).
5579 ASSERT(kSymbolTag != 0);
5580 // Ensure that no non-strings have the symbol bit set.
5581 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5582 __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
5583 __ j(zero, ¬_a_symbol);
5584 // They are symbols, so do identity compare.
5585 __ Cmp(left_reg, right_side.handle());
5586 dest->true_target()->Branch(equal);
5587 dest->false_target()->Branch(not_equal);
5588 __ bind(¬_a_symbol);
5590 // Call the compare stub if the left side is not a flat ascii string.
5592 Immediate(kIsNotStringMask |
5593 kStringRepresentationMask |
5594 kStringEncodingMask));
5596 Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
5598 is_string.Branch(equal, &left_side);
5600 // Setup and call the compare stub.
5601 is_not_string.Bind(&left_side);
5602 CompareStub stub(cc, strict, kCantBothBeNaN);
5603 Result result = frame_->CallStub(&stub, &left_side, &right_side);
5604 result.ToRegister();
5605 __ SmiCompare(result.reg(), Smi::FromInt(0));
5607 dest->true_target()->Branch(cc);
5608 dest->false_target()->Jump();
5610 is_string.Bind(&left_side);
5611 // left_side is a sequential ASCII string.
5612 ASSERT(left_side.reg().is(left_reg));
5613 right_side = Result(right_val);
5614 Result temp2 = allocator_->Allocate();
5615 ASSERT(temp2.is_valid());
5616 // Test string equality and comparison.
5618 Label comparison_done;
5619 __ cmpl(FieldOperand(left_side.reg(), String::kLengthOffset),
5621 __ j(not_equal, &comparison_done);
5622 uint8_t char_value =
5623 static_cast<uint8_t>(String::cast(*right_val)->Get(0));
5624 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
5625 Immediate(char_value));
5626 __ bind(&comparison_done);
5628 __ movl(temp2.reg(),
5629 FieldOperand(left_side.reg(), String::kLengthOffset));
5630 __ subl(temp2.reg(), Immediate(1));
5632 // If the length is 0 then the subtraction gave -1 which compares less
5633 // than any character.
5634 __ j(negative, &comparison);
5635 // Otherwise load the first character.
5636 __ movzxbl(temp2.reg(),
5637 FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
5638 __ bind(&comparison);
5639 // Compare the first character of the string with the
5640 // constant 1-character string.
5641 uint8_t char_value =
5642 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
5643 __ cmpb(temp2.reg(), Immediate(char_value));
5644 Label characters_were_different;
5645 __ j(not_equal, &characters_were_different);
5646 // If the first character is the same then the long string sorts after
5648 __ cmpl(FieldOperand(left_side.reg(), String::kLengthOffset),
5650 __ bind(&characters_were_different);
5658 // Neither side is a constant Smi, constant 1-char string, or constant null.
5659 // If either side is a non-smi constant, skip the smi check.
5660 bool known_non_smi =
5661 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
5662 (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
5663 left_side.type_info().IsDouble() ||
5664 right_side.type_info().IsDouble();
5666 NaNInformation nan_info =
5667 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
5671 // Inline number comparison handling any combination of smi's and heap
5673 // code is in a loop
5674 // the compare operation is different from equal
5675 // compare is not a for-loop comparison
5676 // The reason for excluding equal is that it will most likely be done
5677 // with smi's (not heap numbers) and the code to comparing smi's is inlined
5678 // separately. The same reason applies for for-loop comparison which will
5679 // also most likely be smi comparisons.
5680 bool is_loop_condition = (node->AsExpression() != NULL)
5681 && node->AsExpression()->is_loop_condition();
5682 bool inline_number_compare =
5683 loop_nesting() > 0 && cc != equal && !is_loop_condition;
5685 left_side.ToRegister();
5686 right_side.ToRegister();
5688 if (known_non_smi) {
5689 // Inlined equality check:
5690 // If at least one of the objects is not NaN, then if the objects
5691 // are identical, they are equal.
5692 if (nan_info == kCantBothBeNaN && cc == equal) {
5693 __ cmpq(left_side.reg(), right_side.reg());
5694 dest->true_target()->Branch(equal);
5697 // Inlined number comparison:
5698 if (inline_number_compare) {
5699 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
5702 // Call the compare stub.
5703 // TODO(whesse@chromium.org): Enable the inlining flag once
5704 // GenerateInlineNumberComparison is implemented.
5705 CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
5706 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5707 // The result is a Smi, which is negative, zero, or positive.
5708 __ SmiTest(answer.reg()); // Sets both zero and sign flag.
5712 // Here we split control flow to the stub call and inlined cases
5713 // before finally splitting it to the control destination. We use
5714 // a jump target and branching to duplicate the virtual frame at
5715 // the first split. We manually handle the off-frame references
5716 // by reconstituting them on the non-fall-through path.
5718 Register left_reg = left_side.reg();
5719 Register right_reg = right_side.reg();
5721 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5722 is_smi.Branch(both_smi);
5724 // Inline the equality check if both operands can't be a NaN. If both
5725 // objects are the same they are equal.
5726 if (nan_info == kCantBothBeNaN && cc == equal) {
5727 __ cmpq(left_side.reg(), right_side.reg());
5728 dest->true_target()->Branch(equal);
5731 // Inlined number comparison:
5732 if (inline_number_compare) {
5733 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
5736 // Call the compare stub.
5737 // TODO(whesse@chromium.org): Enable the inlining flag once
5738 // GenerateInlineNumberComparison is implemented.
5739 CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
5740 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5741 __ SmiTest(answer.reg()); // Sets both zero and sign flags.
5743 dest->true_target()->Branch(cc);
5744 dest->false_target()->Jump();
5747 left_side = Result(left_reg);
5748 right_side = Result(right_reg);
5749 __ SmiCompare(left_side.reg(), right_side.reg());
5758 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
5761 ControlDestination* dest) {
5762 ASSERT(left_side->is_register());
5763 ASSERT(right_side->is_register());
5764 // TODO(whesse@chromium.org): Implement this function, and enable the
5765 // corresponding flags in the CompareStub.
5769 class DeferredInlineBinaryOperation: public DeferredCode {
5771 DeferredInlineBinaryOperation(Token::Value op,
5776 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5777 set_comment("[ DeferredInlineBinaryOperation");
5780 virtual void Generate();
5787 OverwriteMode mode_;
5791 void DeferredInlineBinaryOperation::Generate() {
5792 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
5793 stub.GenerateCall(masm_, left_, right_);
5794 if (!dst_.is(rax)) __ movq(dst_, rax);
5798 static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
5800 const Result& right,
5801 const Result& left) {
5802 // Set TypeInfo of result according to the operation performed.
5803 // We rely on the fact that smis have a 32 bit payload on x64.
5804 STATIC_ASSERT(kSmiValueSize == 32);
5807 return right.type_info();
5810 // Result type can be either of the two input types.
5811 return operands_type;
5813 case Token::BIT_XOR:
5814 case Token::BIT_AND:
5815 // Result is always a smi.
5816 return TypeInfo::Smi();
5819 // Result is always a smi.
5820 return TypeInfo::Smi();
5822 // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
5823 return (right.is_constant() && right.handle()->IsSmi()
5824 && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
5826 : TypeInfo::Number();
5828 if (operands_type.IsNumber()) {
5829 return TypeInfo::Number();
5830 } else if (left.type_info().IsString() || right.type_info().IsString()) {
5831 return TypeInfo::String();
5833 return TypeInfo::Unknown();
5839 // Result is always a number.
5840 return TypeInfo::Number();
5845 return TypeInfo::Unknown();
5849 void CodeGenerator::GenericBinaryOperation(Token::Value op,
5851 OverwriteMode overwrite_mode) {
5852 Comment cmnt(masm_, "[ BinaryOperation");
5853 Comment cmnt_token(masm_, Token::String(op));
5855 if (op == Token::COMMA) {
5856 // Simply discard left value.
5861 Result right = frame_->Pop();
5862 Result left = frame_->Pop();
5864 if (op == Token::ADD) {
5865 const bool left_is_string = left.type_info().IsString();
5866 const bool right_is_string = right.type_info().IsString();
5867 // Make sure constant strings have string type info.
5868 ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
5870 ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
5872 if (left_is_string || right_is_string) {
5873 frame_->Push(&left);
5874 frame_->Push(&right);
5876 if (left_is_string) {
5877 if (right_is_string) {
5878 // TODO(lrn): if both are constant strings
5879 // -- do a compile time cons, if allocation during codegen is allowed.
5880 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
5881 answer = frame_->CallStub(&stub, 2);
5884 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5886 } else if (right_is_string) {
5888 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5890 answer.set_type_info(TypeInfo::String());
5891 frame_->Push(&answer);
5894 // Neither operand is known to be a string.
5897 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
5898 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
5899 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
5900 bool right_is_non_smi_constant =
5901 right.is_constant() && !right.handle()->IsSmi();
5903 if (left_is_smi_constant && right_is_smi_constant) {
5904 // Compute the constant result at compile time, and leave it on the frame.
5905 int left_int = Smi::cast(*left.handle())->value();
5906 int right_int = Smi::cast(*right.handle())->value();
5907 if (FoldConstantSmis(op, left_int, right_int)) return;
5910 // Get number type of left and right sub-expressions.
5911 TypeInfo operands_type =
5912 TypeInfo::Combine(left.type_info(), right.type_info());
5914 TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
5917 if (left_is_non_smi_constant || right_is_non_smi_constant) {
5918 GenericBinaryOpStub stub(op,
5920 NO_SMI_CODE_IN_STUB,
5922 answer = stub.GenerateCall(masm_, frame_, &left, &right);
5923 } else if (right_is_smi_constant) {
5924 answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
5925 type, false, overwrite_mode);
5926 } else if (left_is_smi_constant) {
5927 answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
5928 type, true, overwrite_mode);
5930 // Set the flags based on the operation, type and loop nesting level.
5931 // Bit operations always assume they likely operate on Smis. Still only
5932 // generate the inline Smi check code if this operation is part of a loop.
5933 // For all other operations only inline the Smi check code for likely smis
5934 // if the operation is part of a loop.
5935 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
5936 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5938 GenericBinaryOpStub stub(op,
5940 NO_GENERIC_BINARY_FLAGS,
5942 answer = stub.GenerateCall(masm_, frame_, &left, &right);
5946 answer.set_type_info(result_type);
5947 frame_->Push(&answer);
5951 // Emit a LoadIC call to get the value from receiver and leave it in
5952 // dst. The receiver register is restored after the call.
5953 class DeferredReferenceGetNamedValue: public DeferredCode {
5955 DeferredReferenceGetNamedValue(Register dst,
5957 Handle<String> name)
5958 : dst_(dst), receiver_(receiver), name_(name) {
5959 set_comment("[ DeferredReferenceGetNamedValue");
5962 virtual void Generate();
5964 Label* patch_site() { return &patch_site_; }
5970 Handle<String> name_;
5974 void DeferredReferenceGetNamedValue::Generate() {
5976 __ Move(rcx, name_);
5977 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5978 __ Call(ic, RelocInfo::CODE_TARGET);
5979 // The call must be followed by a test rax instruction to indicate
5980 // that the inobject property case was inlined.
5982 // Store the delta to the map check instruction here in the test
5983 // instruction. Use masm_-> instead of the __ macro since the
5984 // latter can't return a value.
5985 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5986 // Here we use masm_-> instead of the __ macro because this is the
5987 // instruction that gets patched and coverage code gets in the way.
5988 masm_->testl(rax, Immediate(-delta_to_patch_site));
5989 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5991 if (!dst_.is(rax)) __ movq(dst_, rax);
5996 void DeferredInlineSmiAdd::Generate() {
5997 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5998 igostub.GenerateCall(masm_, dst_, value_);
5999 if (!dst_.is(rax)) __ movq(dst_, rax);
6003 void DeferredInlineSmiAddReversed::Generate() {
6004 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
6005 igostub.GenerateCall(masm_, value_, dst_);
6006 if (!dst_.is(rax)) __ movq(dst_, rax);
6010 void DeferredInlineSmiSub::Generate() {
6011 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
6012 igostub.GenerateCall(masm_, dst_, value_);
6013 if (!dst_.is(rax)) __ movq(dst_, rax);
6017 void DeferredInlineSmiOperation::Generate() {
6018 // For mod we don't generate all the Smi code inline.
6019 GenericBinaryOpStub stub(
6022 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
6023 stub.GenerateCall(masm_, src_, value_);
6024 if (!dst_.is(rax)) __ movq(dst_, rax);
6028 Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
6030 Handle<Object> value,
6033 OverwriteMode overwrite_mode) {
6034 // NOTE: This is an attempt to inline (a bit) more of the code for
6035 // some possible smi operations (like + and -) when (at least) one
6036 // of the operands is a constant smi.
6037 // Consumes the argument "operand".
6039 // TODO(199): Optimize some special cases of operations involving a
6040 // smi literal (multiply by 2, shift by 0, etc.).
6041 if (IsUnsafeSmi(value)) {
6042 Result unsafe_operand(value);
6044 return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
6047 return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
6052 // Get the literal value.
6053 Smi* smi_value = Smi::cast(*value);
6054 int int_value = smi_value->value();
6059 operand->ToRegister();
6060 frame_->Spill(operand->reg());
6061 DeferredCode* deferred = NULL;
6063 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
6067 deferred = new DeferredInlineSmiAdd(operand->reg(),
6071 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6072 __ SmiAddConstant(operand->reg(),
6075 deferred->entry_label());
6076 deferred->BindExit();
6083 Result constant_operand(value);
6084 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
6087 operand->ToRegister();
6088 frame_->Spill(operand->reg());
6089 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
6092 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6093 // A smi currently fits in a 32-bit Immediate.
6094 __ SmiSubConstant(operand->reg(),
6097 deferred->entry_label());
6098 deferred->BindExit();
6106 Result constant_operand(value);
6107 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
6110 // Only the least significant 5 bits of the shift value are used.
6111 // In the slow case, this masking is done inside the runtime call.
6112 int shift_value = int_value & 0x1f;
6113 operand->ToRegister();
6114 frame_->Spill(operand->reg());
6115 DeferredInlineSmiOperation* deferred =
6116 new DeferredInlineSmiOperation(op,
6121 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6122 __ SmiShiftArithmeticRightConstant(operand->reg(),
6125 deferred->BindExit();
6132 Result constant_operand(value);
6133 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
6136 // Only the least significant 5 bits of the shift value are used.
6137 // In the slow case, this masking is done inside the runtime call.
6138 int shift_value = int_value & 0x1f;
6139 operand->ToRegister();
6140 answer = allocator()->Allocate();
6141 ASSERT(answer.is_valid());
6142 DeferredInlineSmiOperation* deferred =
6143 new DeferredInlineSmiOperation(op,
6148 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6149 __ SmiShiftLogicalRightConstant(answer.reg(),
6152 deferred->entry_label());
6153 deferred->BindExit();
6160 Result constant_operand(value);
6161 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
6164 // Only the least significant 5 bits of the shift value are used.
6165 // In the slow case, this masking is done inside the runtime call.
6166 int shift_value = int_value & 0x1f;
6167 operand->ToRegister();
6168 if (shift_value == 0) {
6169 // Spill operand so it can be overwritten in the slow case.
6170 frame_->Spill(operand->reg());
6171 DeferredInlineSmiOperation* deferred =
6172 new DeferredInlineSmiOperation(op,
6177 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6178 deferred->BindExit();
6181 // Use a fresh temporary for nonzero shift values.
6182 answer = allocator()->Allocate();
6183 ASSERT(answer.is_valid());
6184 DeferredInlineSmiOperation* deferred =
6185 new DeferredInlineSmiOperation(op,
6190 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6191 __ SmiShiftLeftConstant(answer.reg(),
6194 deferred->entry_label());
6195 deferred->BindExit();
6202 case Token::BIT_XOR:
6203 case Token::BIT_AND: {
6204 operand->ToRegister();
6205 frame_->Spill(operand->reg());
6207 // Bit operations with a constant smi are commutative.
6208 // We can swap left and right operands with no problem.
6209 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
6210 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
6212 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
6217 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
6218 if (op == Token::BIT_AND) {
6219 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
6220 } else if (op == Token::BIT_XOR) {
6221 if (int_value != 0) {
6222 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
6225 ASSERT(op == Token::BIT_OR);
6226 if (int_value != 0) {
6227 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
6230 deferred->BindExit();
6235 // Generate inline code for mod of powers of 2 and negative powers of 2.
6239 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
6240 operand->ToRegister();
6241 frame_->Spill(operand->reg());
6242 DeferredCode* deferred =
6243 new DeferredInlineSmiOperation(op,
6248 // Check for negative or non-Smi left hand side.
6249 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
6250 if (int_value < 0) int_value = -int_value;
6251 if (int_value == 1) {
6252 __ Move(operand->reg(), Smi::FromInt(0));
6254 __ SmiAndConstant(operand->reg(),
6256 Smi::FromInt(int_value - 1));
6258 deferred->BindExit();
6260 break; // This break only applies if we generated code for MOD.
6262 // Fall through if we did not find a power of 2 on the right hand side!
6263 // The next case must be the default.
6266 Result constant_operand(value);
6268 answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
6271 answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
6277 ASSERT(answer.is_valid());
6281 Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
6284 OverwriteMode overwrite_mode) {
6286 // Special handling of div and mod because they use fixed registers.
6287 if (op == Token::DIV || op == Token::MOD) {
6288 // We need rax as the quotient register, rdx as the remainder
6289 // register, neither left nor right in rax or rdx, and left copied
6293 bool left_is_in_rax = false;
6294 // Step 1: get rax for quotient.
6295 if ((left->is_register() && left->reg().is(rax)) ||
6296 (right->is_register() && right->reg().is(rax))) {
6297 // One or both is in rax. Use a fresh non-rdx register for
6299 Result fresh = allocator_->Allocate();
6300 ASSERT(fresh.is_valid());
6301 if (fresh.reg().is(rdx)) {
6303 fresh = allocator_->Allocate();
6304 ASSERT(fresh.is_valid());
6306 if (left->is_register() && left->reg().is(rax)) {
6309 left_is_in_rax = true;
6311 if (right->is_register() && right->reg().is(rax)) {
6315 __ movq(fresh.reg(), rax);
6317 // Neither left nor right is in rax.
6318 quotient = allocator_->Allocate(rax);
6320 ASSERT(quotient.is_register() && quotient.reg().is(rax));
6321 ASSERT(!(left->is_register() && left->reg().is(rax)));
6322 ASSERT(!(right->is_register() && right->reg().is(rax)));
6324 // Step 2: get rdx for remainder if necessary.
6325 if (!remainder.is_valid()) {
6326 if ((left->is_register() && left->reg().is(rdx)) ||
6327 (right->is_register() && right->reg().is(rdx))) {
6328 Result fresh = allocator_->Allocate();
6329 ASSERT(fresh.is_valid());
6330 if (left->is_register() && left->reg().is(rdx)) {
6334 if (right->is_register() && right->reg().is(rdx)) {
6338 __ movq(fresh.reg(), rdx);
6340 // Neither left nor right is in rdx.
6341 remainder = allocator_->Allocate(rdx);
6344 ASSERT(remainder.is_register() && remainder.reg().is(rdx));
6345 ASSERT(!(left->is_register() && left->reg().is(rdx)));
6346 ASSERT(!(right->is_register() && right->reg().is(rdx)));
6349 right->ToRegister();
6353 // Check that left and right are smi tagged.
6354 DeferredInlineBinaryOperation* deferred =
6355 new DeferredInlineBinaryOperation(op,
6356 (op == Token::DIV) ? rax : rdx,
6360 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
6362 if (op == Token::DIV) {
6363 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
6364 deferred->BindExit();
6369 ASSERT(op == Token::MOD);
6370 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
6371 deferred->BindExit();
6376 ASSERT(answer.is_valid());
6380 // Special handling of shift operations because they use fixed
6382 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
6383 // Move left out of rcx if necessary.
6384 if (left->is_register() && left->reg().is(rcx)) {
6385 *left = allocator_->Allocate();
6386 ASSERT(left->is_valid());
6387 __ movq(left->reg(), rcx);
6389 right->ToRegister(rcx);
6391 ASSERT(left->is_register() && !left->reg().is(rcx));
6392 ASSERT(right->is_register() && right->reg().is(rcx));
6394 // We will modify right, it must be spilled.
6397 // Use a fresh answer register to avoid spilling the left operand.
6398 answer = allocator_->Allocate();
6399 ASSERT(answer.is_valid());
6400 // Check that both operands are smis using the answer register as a
6402 DeferredInlineBinaryOperation* deferred =
6403 new DeferredInlineBinaryOperation(op,
6408 __ movq(answer.reg(), left->reg());
6409 __ or_(answer.reg(), rcx);
6410 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
6412 // Perform the operation.
6415 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
6418 __ SmiShiftLogicalRight(answer.reg(),
6421 deferred->entry_label());
6425 __ SmiShiftLeft(answer.reg(),
6428 deferred->entry_label());
6434 deferred->BindExit();
6437 ASSERT(answer.is_valid());
6441 // Handle the other binary operations.
6443 right->ToRegister();
6444 // A newly allocated register answer is used to hold the answer. The
6445 // registers containing left and right are not modified so they don't
6446 // need to be spilled in the fast case.
6447 answer = allocator_->Allocate();
6448 ASSERT(answer.is_valid());
6450 // Perform the smi tag check.
6451 DeferredInlineBinaryOperation* deferred =
6452 new DeferredInlineBinaryOperation(op,
6457 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
6461 __ SmiAdd(answer.reg(),
6464 deferred->entry_label());
6468 __ SmiSub(answer.reg(),
6471 deferred->entry_label());
6475 __ SmiMul(answer.reg(),
6478 deferred->entry_label());
6483 __ SmiOr(answer.reg(), left->reg(), right->reg());
6486 case Token::BIT_AND:
6487 __ SmiAnd(answer.reg(), left->reg(), right->reg());
6490 case Token::BIT_XOR:
6491 __ SmiXor(answer.reg(), left->reg(), right->reg());
6498 deferred->BindExit();
6501 ASSERT(answer.is_valid());
6506 Result CodeGenerator::EmitKeyedLoad(bool is_global) {
6507 Comment cmnt(masm_, "[ Load from keyed Property");
6508 // Inline array load code if inside of a loop. We do not know
6509 // the receiver map yet, so we initially generate the code with
6510 // a check against an invalid map. In the inline cache code, we
6511 // patch the map check if appropriate.
6512 if (loop_nesting() > 0) {
6513 Comment cmnt(masm_, "[ Inlined load from keyed Property");
6515 Result key = frame_->Pop();
6516 Result receiver = frame_->Pop();
6518 receiver.ToRegister();
6520 // Use a fresh temporary to load the elements without destroying
6521 // the receiver which is needed for the deferred slow case.
6522 Result elements = allocator()->Allocate();
6523 ASSERT(elements.is_valid());
6525 // Use a fresh temporary for the index and later the loaded
6527 Result index = allocator()->Allocate();
6528 ASSERT(index.is_valid());
6530 DeferredReferenceGetKeyedValue* deferred =
6531 new DeferredReferenceGetKeyedValue(index.reg(),
6536 // Check that the receiver is not a smi (only needed if this
6537 // is not a load from the global context) and that it has the
6540 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6543 // Initially, use an invalid map. The map is patched in the IC
6544 // initialization code.
6545 __ bind(deferred->patch_site());
6546 // Use masm-> here instead of the double underscore macro since extra
6547 // coverage code can interfere with the patching. Do not use
6548 // root array to load null_value, since it must be patched with
6549 // the expected receiver map.
6550 masm_->movq(kScratchRegister, Factory::null_value(),
6551 RelocInfo::EMBEDDED_OBJECT);
6552 masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6554 deferred->Branch(not_equal);
6556 // Check that the key is a non-negative smi.
6557 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6559 // Get the elements array from the receiver and check that it
6560 // is not a dictionary.
6561 __ movq(elements.reg(),
6562 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6563 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
6564 Factory::fixed_array_map());
6565 deferred->Branch(not_equal);
6567 // Shift the key to get the actual index value and check that
6568 // it is within bounds.
6569 __ SmiToInteger32(index.reg(), key.reg());
6570 __ cmpl(index.reg(),
6571 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
6572 deferred->Branch(above_equal);
6574 // The index register holds the un-smi-tagged key. It has been
6575 // zero-extended to 64-bits, so it can be used directly as index in the
6577 // Load and check that the result is not the hole. We could
6578 // reuse the index or elements register for the value.
6580 // TODO(206): Consider whether it makes sense to try some
6581 // heuristic about which register to reuse. For example, if
6582 // one is rax, the we can reuse that one because the value
6583 // coming from the deferred code will be in rax.
6584 Result value = index;
6585 __ movq(value.reg(),
6586 Operand(elements.reg(),
6589 FixedArray::kHeaderSize - kHeapObjectTag));
6592 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
6593 deferred->Branch(equal);
6594 __ IncrementCounter(&Counters::keyed_load_inline, 1);
6596 deferred->BindExit();
6597 // Restore the receiver and key to the frame and push the
6598 // result on top of it.
6599 frame_->Push(&receiver);
6604 Comment cmnt(masm_, "[ Load from keyed Property");
6605 RelocInfo::Mode mode = is_global
6606 ? RelocInfo::CODE_TARGET_CONTEXT
6607 : RelocInfo::CODE_TARGET;
6608 Result answer = frame_->CallKeyedLoadIC(mode);
6609 // Make sure that we do not have a test instruction after the
6610 // call. A test instruction after the call is used to
6611 // indicate that we have generated an inline version of the
6612 // keyed load. The explicit nop instruction is here because
6613 // the push that follows might be peep-hole optimized away.
6621 #define __ ACCESS_MASM(masm)
6624 Handle<String> Reference::GetName() {
6625 ASSERT(type_ == NAMED);
6626 Property* property = expression_->AsProperty();
6627 if (property == NULL) {
6628 // Global variable reference treated as a named property reference.
6629 VariableProxy* proxy = expression_->AsVariableProxy();
6630 ASSERT(proxy->AsVariable() != NULL);
6631 ASSERT(proxy->AsVariable()->is_global());
6632 return proxy->name();
6634 Literal* raw_name = property->key()->AsLiteral();
6635 ASSERT(raw_name != NULL);
6636 return Handle<String>(String::cast(*raw_name->handle()));
6641 void Reference::GetValue() {
6642 ASSERT(!cgen_->in_spilled_code());
6643 ASSERT(cgen_->HasValidEntryRegisters());
6644 ASSERT(!is_illegal());
6645 MacroAssembler* masm = cgen_->masm();
6647 // Record the source position for the property load.
6648 Property* property = expression_->AsProperty();
6649 if (property != NULL) {
6650 cgen_->CodeForSourcePosition(property->position());
6655 Comment cmnt(masm, "[ Load from Slot");
6656 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6657 ASSERT(slot != NULL);
6658 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6663 Variable* var = expression_->AsVariableProxy()->AsVariable();
6664 bool is_global = var != NULL;
6665 ASSERT(!is_global || var->is_global());
6667 // Do not inline the inobject property case for loads from the global
6668 // object. Also do not inline for unoptimized code. This saves time
6669 // in the code generator. Unoptimized code is toplevel code or code
6670 // that is not in a loop.
6672 cgen_->scope()->is_global_scope() ||
6673 cgen_->loop_nesting() == 0) {
6674 Comment cmnt(masm, "[ Load from named Property");
6675 cgen_->frame()->Push(GetName());
6677 RelocInfo::Mode mode = is_global
6678 ? RelocInfo::CODE_TARGET_CONTEXT
6679 : RelocInfo::CODE_TARGET;
6680 Result answer = cgen_->frame()->CallLoadIC(mode);
6681 // A test rax instruction following the call signals that the
6682 // inobject property case was inlined. Ensure that there is not
6683 // a test rax instruction here.
6685 cgen_->frame()->Push(&answer);
6687 // Inline the inobject property case.
6688 Comment cmnt(masm, "[ Inlined named property load");
6689 Result receiver = cgen_->frame()->Pop();
6690 receiver.ToRegister();
6691 Result value = cgen_->allocator()->Allocate();
6692 ASSERT(value.is_valid());
6693 // Cannot use r12 for receiver, because that changes
6694 // the distance between a call and a fixup location,
6695 // due to a special encoding of r12 as r/m in a ModR/M byte.
6696 if (receiver.reg().is(r12)) {
6697 // Swap receiver and value.
6698 __ movq(value.reg(), receiver.reg());
6699 Result temp = receiver;
6702 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
6705 DeferredReferenceGetNamedValue* deferred =
6706 new DeferredReferenceGetNamedValue(value.reg(),
6710 // Check that the receiver is a heap object.
6711 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6713 __ bind(deferred->patch_site());
6714 // This is the map check instruction that will be patched (so we can't
6715 // use the double underscore macro that may insert instructions).
6716 // Initially use an invalid map to force a failure.
6717 masm->Move(kScratchRegister, Factory::null_value());
6718 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6720 // This branch is always a forwards branch so it's always a fixed
6721 // size which allows the assert below to succeed and patching to work.
6722 // Don't use deferred->Branch(...), since that might add coverage code.
6723 masm->j(not_equal, deferred->entry_label());
6725 // The delta from the patch label to the load offset must be
6726 // statically known.
6727 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
6728 LoadIC::kOffsetToLoadInstruction);
6729 // The initial (invalid) offset has to be large enough to force
6730 // a 32-bit instruction encoding to allow patching with an
6731 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
6732 int offset = kMaxInt;
6733 masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
6735 __ IncrementCounter(&Counters::named_load_inline, 1);
6736 deferred->BindExit();
6737 cgen_->frame()->Push(&receiver);
6738 cgen_->frame()->Push(&value);
6744 Comment cmnt(masm, "[ Load from keyed Property");
6745 Variable* var = expression_->AsVariableProxy()->AsVariable();
6746 bool is_global = var != NULL;
6747 ASSERT(!is_global || var->is_global());
6749 Result value = cgen_->EmitKeyedLoad(is_global);
6750 cgen_->frame()->Push(&value);
6758 if (!persist_after_get_) {
6759 cgen_->UnloadReference(this);
6764 void Reference::TakeValue() {
6765 // TODO(X64): This function is completely architecture independent. Move
6766 // it somewhere shared.
6768 // For non-constant frame-allocated slots, we invalidate the value in the
6769 // slot. For all others, we fall back on GetValue.
6770 ASSERT(!cgen_->in_spilled_code());
6771 ASSERT(!is_illegal());
6772 if (type_ != SLOT) {
6777 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6778 ASSERT(slot != NULL);
6779 if (slot->type() == Slot::LOOKUP ||
6780 slot->type() == Slot::CONTEXT ||
6781 slot->var()->mode() == Variable::CONST ||
6782 slot->is_arguments()) {
6787 // Only non-constant, frame-allocated parameters and locals can reach
6788 // here. Be careful not to use the optimizations for arguments
6789 // object access since it may not have been initialized yet.
6790 ASSERT(!slot->is_arguments());
6791 if (slot->type() == Slot::PARAMETER) {
6792 cgen_->frame()->TakeParameterAt(slot->index());
6794 ASSERT(slot->type() == Slot::LOCAL);
6795 cgen_->frame()->TakeLocalAt(slot->index());
6798 ASSERT(persist_after_get_);
6799 // Do not unload the reference, because it is used in SetValue.
6803 void Reference::SetValue(InitState init_state) {
6804 ASSERT(cgen_->HasValidEntryRegisters());
6805 ASSERT(!is_illegal());
6806 MacroAssembler* masm = cgen_->masm();
6809 Comment cmnt(masm, "[ Store to Slot");
6810 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6811 ASSERT(slot != NULL);
6812 cgen_->StoreToSlot(slot, init_state);
6813 cgen_->UnloadReference(this);
6818 Comment cmnt(masm, "[ Store to named Property");
6819 cgen_->frame()->Push(GetName());
6820 Result answer = cgen_->frame()->CallStoreIC();
6821 cgen_->frame()->Push(&answer);
6827 Comment cmnt(masm, "[ Store to keyed Property");
6829 // Generate inlined version of the keyed store if the code is in
6830 // a loop and the key is likely to be a smi.
6831 Property* property = expression()->AsProperty();
6832 ASSERT(property != NULL);
6833 StaticType* key_smi_analysis = property->key()->type();
6835 if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6836 Comment cmnt(masm, "[ Inlined store to keyed Property");
6838 // Get the receiver, key and value into registers.
6839 Result value = cgen_->frame()->Pop();
6840 Result key = cgen_->frame()->Pop();
6841 Result receiver = cgen_->frame()->Pop();
6843 Result tmp = cgen_->allocator_->Allocate();
6844 ASSERT(tmp.is_valid());
6846 // Determine whether the value is a constant before putting it
6848 bool value_is_constant = value.is_constant();
6850 // Make sure that value, key and receiver are in registers.
6853 receiver.ToRegister();
6855 DeferredReferenceSetKeyedValue* deferred =
6856 new DeferredReferenceSetKeyedValue(value.reg(),
6860 // Check that the value is a smi if it is not a constant.
6861 // We can skip the write barrier for smis and constants.
6862 if (!value_is_constant) {
6863 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6866 // Check that the key is a non-negative smi.
6867 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6869 // Check that the receiver is not a smi.
6870 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6872 // Check that the receiver is a JSArray.
6873 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6874 deferred->Branch(not_equal);
6876 // Check that the key is within bounds. Both the key and the
6877 // length of the JSArray are smis.
6878 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
6880 deferred->Branch(less_equal);
6882 // Get the elements array from the receiver and check that it
6883 // is a flat array (not a dictionary).
6885 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6886 // Bind the deferred code patch site to be able to locate the
6887 // fixed array map comparison. When debugging, we patch this
6888 // comparison to always fail so that we will hit the IC call
6889 // in the deferred code which will allow the debugger to
6890 // break for fast case stores.
6891 __ bind(deferred->patch_site());
6892 // Avoid using __ to ensure the distance from patch_site
6893 // to the map address is always the same.
6894 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6895 RelocInfo::EMBEDDED_OBJECT);
6896 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6898 deferred->Branch(not_equal);
6902 masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6903 __ movq(Operand(tmp.reg(),
6906 FixedArray::kHeaderSize - kHeapObjectTag),
6908 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6910 deferred->BindExit();
6912 cgen_->frame()->Push(&receiver);
6913 cgen_->frame()->Push(&key);
6914 cgen_->frame()->Push(&value);
6916 Result answer = cgen_->frame()->CallKeyedStoreIC();
6917 // Make sure that we do not have a test instruction after the
6918 // call. A test instruction after the call is used to
6919 // indicate that we have generated an inline version of the
6922 cgen_->frame()->Push(&answer);
6924 cgen_->UnloadReference(this);
6934 void FastNewClosureStub::Generate(MacroAssembler* masm) {
6935 // Create a new closure from the given function info in new
6936 // space. Set the context to the current context in rsi.
6938 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
6940 // Get the function info from the stack.
6941 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6943 // Compute the function map in the current global context and set that
6944 // as the map of the allocated object.
6945 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6946 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
6947 __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6948 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
6950 // Initialize the rest of the function. We don't have to update the
6951 // write barrier because the allocated object is in new space.
6952 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
6953 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
6954 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
6955 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
6956 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
6957 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
6958 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
6959 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
6961 // Return and remove the on-stack parameter.
6962 __ ret(1 * kPointerSize);
6964 // Create a new closure through the slower runtime call.
6966 __ pop(rcx); // Temporarily remove return address.
6970 __ push(rcx); // Restore return address.
6971 __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
6975 void FastNewContextStub::Generate(MacroAssembler* masm) {
6976 // Try to allocate the context in new space.
6978 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6979 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
6980 rax, rbx, rcx, &gc, TAG_OBJECT);
6982 // Get the function from the stack.
6983 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
6985 // Setup the object header.
6986 __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
6987 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
6988 __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
6990 // Setup the fixed slots.
6991 __ xor_(rbx, rbx); // Set to NULL.
6992 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
6993 __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
6994 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
6995 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
6997 // Copy the global object from the surrounding context.
6998 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6999 __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
7001 // Initialize the rest of the slots to undefined.
7002 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
7003 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
7004 __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
7007 // Return and remove the on-stack parameter.
7009 __ ret(1 * kPointerSize);
7011 // Need to collect. Call into runtime system.
7013 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
7017 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
7018 // Stack layout on entry:
7020 // [rsp + kPointerSize]: constant elements.
7021 // [rsp + (2 * kPointerSize)]: literal index.
7022 // [rsp + (3 * kPointerSize)]: literals array.
7024 // All sizes here are multiples of kPointerSize.
7025 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
7026 int size = JSArray::kSize + elements_size;
7028 // Load boilerplate object into rcx and check if we need to create a
7031 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
7032 __ movq(rax, Operand(rsp, 2 * kPointerSize));
7033 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
7035 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
7036 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
7037 __ j(equal, &slow_case);
7039 // Allocate both the JS array and the elements array in one big
7040 // allocation. This avoids multiple limit checks.
7041 __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
7043 // Copy the JS array part.
7044 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
7045 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
7046 __ movq(rbx, FieldOperand(rcx, i));
7047 __ movq(FieldOperand(rax, i), rbx);
7052 // Get hold of the elements array of the boilerplate and setup the
7053 // elements pointer in the resulting object.
7054 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
7055 __ lea(rdx, Operand(rax, JSArray::kSize));
7056 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
7058 // Copy the elements array.
7059 for (int i = 0; i < elements_size; i += kPointerSize) {
7060 __ movq(rbx, FieldOperand(rcx, i));
7061 __ movq(FieldOperand(rdx, i), rbx);
7065 // Return and remove the on-stack parameters.
7066 __ ret(3 * kPointerSize);
7068 __ bind(&slow_case);
7069 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
7073 void ToBooleanStub::Generate(MacroAssembler* masm) {
7074 Label false_result, true_result, not_string;
7075 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7078 __ CompareRoot(rax, Heap::kNullValueRootIndex);
7079 __ j(equal, &false_result);
7081 // Get the map and type of the heap object.
7082 // We don't use CmpObjectType because we manipulate the type field.
7083 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
7084 __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
7086 // Undetectable => false.
7087 __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
7088 __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
7089 __ j(not_zero, &false_result);
7091 // JavaScript object => true.
7092 __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
7093 __ j(above_equal, &true_result);
7095 // String value => false iff empty.
7096 __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
7097 __ j(above_equal, ¬_string);
7098 __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
7100 __ j(zero, &false_result);
7101 __ jmp(&true_result);
7103 __ bind(¬_string);
7104 // HeapNumber => false iff +0, -0, or NaN.
7105 // These three cases set C3 when compared to zero in the FPU.
7106 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
7107 __ j(not_equal, &true_result);
7108 __ fldz(); // Load zero onto fp stack
7109 // Load heap-number double value onto fp stack
7110 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
7112 __ j(zero, &false_result);
7113 // Fall through to |true_result|.
7115 // Return 1/0 for true/false in rax.
7116 __ bind(&true_result);
7117 __ movq(rax, Immediate(1));
7118 __ ret(1 * kPointerSize);
7119 __ bind(&false_result);
7121 __ ret(1 * kPointerSize);
7125 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
7126 Object* answer_object = Heap::undefined_value();
7129 // Use intptr_t to detect overflow of 32-bit int.
7130 if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
7131 answer_object = Smi::FromInt(left + right);
7135 // Use intptr_t to detect overflow of 32-bit int.
7136 if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
7137 answer_object = Smi::FromInt(left - right);
7141 double answer = static_cast<double>(left) * right;
7142 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
7143 // If the product is zero and the non-zero factor is negative,
7144 // the spec requires us to return floating point negative zero.
7145 if (answer != 0 || (left + right) >= 0) {
7146 answer_object = Smi::FromInt(static_cast<int>(answer));
7155 answer_object = Smi::FromInt(left | right);
7157 case Token::BIT_AND:
7158 answer_object = Smi::FromInt(left & right);
7160 case Token::BIT_XOR:
7161 answer_object = Smi::FromInt(left ^ right);
7165 int shift_amount = right & 0x1F;
7166 if (Smi::IsValid(left << shift_amount)) {
7167 answer_object = Smi::FromInt(left << shift_amount);
7172 int shift_amount = right & 0x1F;
7173 unsigned int unsigned_left = left;
7174 unsigned_left >>= shift_amount;
7175 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
7176 answer_object = Smi::FromInt(unsigned_left);
7181 int shift_amount = right & 0x1F;
7182 unsigned int unsigned_left = left;
7184 // Perform arithmetic shift of a negative number by
7185 // complementing number, logical shifting, complementing again.
7186 unsigned_left = ~unsigned_left;
7187 unsigned_left >>= shift_amount;
7188 unsigned_left = ~unsigned_left;
7190 unsigned_left >>= shift_amount;
7192 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
7193 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
7200 if (answer_object == Heap::undefined_value()) {
7203 frame_->Push(Handle<Object>(answer_object));
7208 // End of CodeGenerator implementation.
7210 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
7211 // is faster than using the built-in instructions on floating point registers.
7212 // Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
7213 // trashed registers.
7214 void IntegerConvert(MacroAssembler* masm,
7217 Label* conversion_failure) {
7218 ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
7219 Label done, right_exponent, normal_exponent;
7220 Register scratch = rbx;
7221 Register scratch2 = rdi;
7222 // Get exponent word.
7223 __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
7224 // Get exponent alone in scratch2.
7225 __ movl(scratch2, scratch);
7226 __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
7228 CpuFeatures::Scope scope(SSE3);
7229 // Check whether the exponent is too big for a 64 bit signed integer.
7230 static const uint32_t kTooBigExponent =
7231 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
7232 __ cmpl(scratch2, Immediate(kTooBigExponent));
7233 __ j(greater_equal, conversion_failure);
7234 // Load x87 register with heap number.
7235 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
7236 // Reserve space for 64 bit answer.
7237 __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
7238 // Do conversion, which cannot fail because we checked the exponent.
7239 __ fisttp_d(Operand(rsp, 0));
7240 __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
7241 __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
7243 // Load rcx with zero. We use this either for the final shift or
7246 // Check whether the exponent matches a 32 bit signed int that cannot be
7247 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
7248 // exponent is 30 (biased). This is the exponent that we are fastest at and
7249 // also the highest exponent we can handle here.
7250 const uint32_t non_smi_exponent =
7251 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
7252 __ cmpl(scratch2, Immediate(non_smi_exponent));
7253 // If we have a match of the int32-but-not-Smi exponent then skip some
7255 __ j(equal, &right_exponent);
7256 // If the exponent is higher than that then go to slow case. This catches
7257 // numbers that don't fit in a signed int32, infinities and NaNs.
7258 __ j(less, &normal_exponent);
7261 // Handle a big exponent. The only reason we have this code is that the
7262 // >>> operator has a tendency to generate numbers with an exponent of 31.
7263 const uint32_t big_non_smi_exponent =
7264 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
7265 __ cmpl(scratch2, Immediate(big_non_smi_exponent));
7266 __ j(not_equal, conversion_failure);
7267 // We have the big exponent, typically from >>>. This means the number is
7268 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
7269 __ movl(scratch2, scratch);
7270 __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
7271 // Put back the implicit 1.
7272 __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
7273 // Shift up the mantissa bits to take up the space the exponent used to
7274 // take. We just orred in the implicit bit so that took care of one and
7275 // we want to use the full unsigned range so we subtract 1 bit from the
7277 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
7278 __ shl(scratch2, Immediate(big_shift_distance));
7279 // Get the second half of the double.
7280 __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
7281 // Shift down 21 bits to get the most significant 11 bits or the low
7283 __ shr(rcx, Immediate(32 - big_shift_distance));
7284 __ or_(rcx, scratch2);
7285 // We have the answer in rcx, but we may need to negate it.
7286 __ testl(scratch, scratch);
7287 __ j(positive, &done);
7292 __ bind(&normal_exponent);
7293 // Exponent word in scratch, exponent part of exponent word in scratch2.
7295 // We know the exponent is smaller than 30 (biased). If it is less than
7296 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7297 // it rounds to zero.
7298 const uint32_t zero_exponent =
7299 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
7300 __ subl(scratch2, Immediate(zero_exponent));
7301 // rcx already has a Smi zero.
7304 // We have a shifted exponent between 0 and 30 in scratch2.
7305 __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
7306 __ movl(rcx, Immediate(30));
7307 __ subl(rcx, scratch2);
7309 __ bind(&right_exponent);
7310 // Here rcx is the shift, scratch is the exponent word.
7311 // Get the top bits of the mantissa.
7312 __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
7313 // Put back the implicit 1.
7314 __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
7315 // Shift up the mantissa bits to take up the space the exponent used to
7316 // take. We have kExponentShift + 1 significant bits int he low end of the
7317 // word. Shift them to the top bits.
7318 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7319 __ shl(scratch, Immediate(shift_distance));
7320 // Get the second half of the double. For some exponents we don't
7321 // actually need this because the bits get shifted out again, but
7322 // it's probably slower to test than just to do it.
7323 __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
7324 // Shift down 22 bits to get the most significant 10 bits or the low
7326 __ shr(scratch2, Immediate(32 - shift_distance));
7327 __ or_(scratch2, scratch);
7328 // Move down according to the exponent.
7329 __ shr_cl(scratch2);
7330 // Now the unsigned answer is in scratch2. We need to move it to rcx and
7331 // we may need to fix the sign.
7334 __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
7335 __ j(greater, &negative);
7336 __ movl(rcx, scratch2);
7339 __ subl(rcx, scratch2);
7345 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
7348 if (op_ == Token::SUB) {
7349 // Check whether the value is a smi.
7351 __ JumpIfNotSmi(rax, &try_float);
7353 // Enter runtime system if the value of the smi is zero
7354 // to make sure that we switch between 0 and -0.
7355 // Also enter it if the value of the smi is Smi::kMinValue.
7356 __ SmiNeg(rax, rax, &done);
7358 // Either zero or Smi::kMinValue, neither of which become a smi when
7360 __ SmiCompare(rax, Smi::FromInt(0));
7361 __ j(not_equal, &slow);
7362 __ Move(rax, Factory::minus_zero_value());
7365 // Try floating point case.
7366 __ bind(&try_float);
7367 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
7368 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
7369 __ j(not_equal, &slow);
7370 // Operand is a float, negate its value by flipping sign bit.
7371 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
7372 __ movq(kScratchRegister, Immediate(0x01));
7373 __ shl(kScratchRegister, Immediate(63));
7374 __ xor_(rdx, kScratchRegister); // Flip sign.
7375 // rdx is value to store.
7377 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
7379 __ AllocateHeapNumber(rcx, rbx, &slow);
7380 // rcx: allocated 'empty' number
7381 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
7384 } else if (op_ == Token::BIT_NOT) {
7385 // Check if the operand is a heap number.
7386 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
7387 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
7388 __ j(not_equal, &slow);
7390 // Convert the heap number in rax to an untagged integer in rcx.
7391 IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
7393 // Do the bitwise operation and check if the result fits in a smi.
7396 // Tag the result as a smi and we're done.
7397 ASSERT(kSmiTagSize == 1);
7398 __ Integer32ToSmi(rax, rcx);
7401 // Return from the stub.
7405 // Handle the slow case by jumping to the JavaScript builtin.
7407 __ pop(rcx); // pop return address
7409 __ push(rcx); // push return address
7412 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
7414 case Token::BIT_NOT:
7415 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
7423 void RegExpExecStub::Generate(MacroAssembler* masm) {
7424 // Just jump directly to runtime if native RegExp is not selected at compile
7425 // time or if regexp entry in generated code is turned off runtime switch or
7427 #ifdef V8_INTERPRETED_REGEXP
7428 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
7429 #else // V8_INTERPRETED_REGEXP
7430 if (!FLAG_regexp_entry_native) {
7431 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
7435 // Stack frame on entry.
7436 // esp[0]: return address
7437 // esp[8]: last_match_info (expected JSArray)
7438 // esp[16]: previous index
7439 // esp[24]: subject string
7440 // esp[32]: JSRegExp object
7442 static const int kLastMatchInfoOffset = 1 * kPointerSize;
7443 static const int kPreviousIndexOffset = 2 * kPointerSize;
7444 static const int kSubjectOffset = 3 * kPointerSize;
7445 static const int kJSRegExpOffset = 4 * kPointerSize;
7449 // Ensure that a RegExp stack is allocated.
7450 ExternalReference address_of_regexp_stack_memory_address =
7451 ExternalReference::address_of_regexp_stack_memory_address();
7452 ExternalReference address_of_regexp_stack_memory_size =
7453 ExternalReference::address_of_regexp_stack_memory_size();
7454 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
7455 __ movq(kScratchRegister, Operand(kScratchRegister, 0));
7456 __ testq(kScratchRegister, kScratchRegister);
7457 __ j(zero, &runtime);
7460 // Check that the first argument is a JSRegExp object.
7461 __ movq(rax, Operand(rsp, kJSRegExpOffset));
7462 __ JumpIfSmi(rax, &runtime);
7463 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
7464 __ j(not_equal, &runtime);
7465 // Check that the RegExp has been compiled (data contains a fixed array).
7466 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
7467 if (FLAG_debug_code) {
7468 Condition is_smi = masm->CheckSmi(rcx);
7469 __ Check(NegateCondition(is_smi),
7470 "Unexpected type for RegExp data, FixedArray expected");
7471 __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
7472 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
7475 // rcx: RegExp data (FixedArray)
7476 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
7477 __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
7478 __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
7479 __ j(not_equal, &runtime);
7481 // rcx: RegExp data (FixedArray)
7482 // Check that the number of captures fit in the static offsets vector buffer.
7483 __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
7484 // Calculate number of capture registers (number_of_captures + 1) * 2.
7485 __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
7486 __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
7487 // Check that the static offsets vector buffer is large enough.
7488 __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
7489 __ j(above, &runtime);
7491 // rcx: RegExp data (FixedArray)
7492 // rdx: Number of capture registers
7493 // Check that the second argument is a string.
7494 __ movq(rax, Operand(rsp, kSubjectOffset));
7495 __ JumpIfSmi(rax, &runtime);
7496 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
7497 __ j(NegateCondition(is_string), &runtime);
7498 // Get the length of the string to rbx.
7499 __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
7501 // rbx: Length of subject string
7502 // rcx: RegExp data (FixedArray)
7503 // rdx: Number of capture registers
7504 // Check that the third argument is a positive smi less than the string
7505 // length. A negative value will be greater (unsigned comparison).
7506 __ movq(rax, Operand(rsp, kPreviousIndexOffset));
7507 __ SmiToInteger32(rax, rax);
7509 __ j(above, &runtime);
7511 // rcx: RegExp data (FixedArray)
7512 // rdx: Number of capture registers
7513 // Check that the fourth object is a JSArray object.
7514 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
7515 __ JumpIfSmi(rax, &runtime);
7516 __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
7517 __ j(not_equal, &runtime);
7518 // Check that the JSArray is in fast case.
7519 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
7520 __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
7521 __ Cmp(rax, Factory::fixed_array_map());
7522 __ j(not_equal, &runtime);
7523 // Check that the last match info has space for the capture registers and the
7524 // additional information. Ensure no overflow in add.
7525 ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
7526 __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
7527 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
7529 __ j(greater, &runtime);
7531 // ecx: RegExp data (FixedArray)
7532 // Check the representation and encoding of the subject string.
7533 Label seq_string, seq_two_byte_string, check_code;
7534 const int kStringRepresentationEncodingMask =
7535 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
7536 __ movq(rax, Operand(rsp, kSubjectOffset));
7537 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
7538 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
7539 __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
7540 // First check for sequential string.
7541 ASSERT_EQ(0, kStringTag);
7542 ASSERT_EQ(0, kSeqStringTag);
7543 __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
7544 __ j(zero, &seq_string);
7546 // Check for flat cons string.
7547 // A flat cons string is a cons string where the second part is the empty
7548 // string. In that case the subject string is just the first part of the cons
7549 // string. Also in this case the first part of the cons string is known to be
7550 // a sequential string or an external string.
7551 __ andb(rbx, Immediate(kStringRepresentationMask));
7552 __ cmpb(rbx, Immediate(kConsStringTag));
7553 __ j(not_equal, &runtime);
7554 __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
7555 __ Cmp(rdx, Factory::empty_string());
7556 __ j(not_equal, &runtime);
7557 __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
7558 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
7559 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
7560 ASSERT_EQ(0, kSeqStringTag);
7561 __ testb(rbx, Immediate(kStringRepresentationMask));
7562 __ j(not_zero, &runtime);
7563 __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
7565 __ bind(&seq_string);
7566 // rax: subject string (sequential either ascii to two byte)
7567 // rbx: suject string type & kStringRepresentationEncodingMask
7568 // rcx: RegExp data (FixedArray)
7569 // Check that the irregexp code has been generated for an ascii string. If
7570 // it has, the field contains a code object otherwise it contains the hole.
7571 const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
7572 __ cmpb(rbx, Immediate(kSeqTwoByteString));
7573 __ j(equal, &seq_two_byte_string);
7574 if (FLAG_debug_code) {
7575 __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
7576 __ Check(equal, "Expected sequential ascii string");
7578 __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
7579 __ Set(rdi, 1); // Type is ascii.
7580 __ jmp(&check_code);
7582 __ bind(&seq_two_byte_string);
7583 // rax: subject string
7584 // rcx: RegExp data (FixedArray)
7585 __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
7586 __ Set(rdi, 0); // Type is two byte.
7588 __ bind(&check_code);
7589 // Check that the irregexp code has been generated for the actual string
7590 // encoding. If it has, the field contains a code object otherwise it contains
7592 __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
7593 __ j(not_equal, &runtime);
7595 // rax: subject string
7596 // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
7598 // Load used arguments before starting to push arguments for call to native
7599 // RegExp code to avoid handling changing stack height.
7600 __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
7601 __ SmiToInteger64(rbx, rbx); // Previous index from smi.
7603 // rax: subject string
7604 // rbx: previous index
7605 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
7607 // All checks done. Now push arguments for native regexp code.
7608 __ IncrementCounter(&Counters::regexp_entry_native, 1);
7610 // rsi is caller save on Windows and used to pass parameter on Linux.
7613 static const int kRegExpExecuteArguments = 7;
7614 __ PrepareCallCFunction(kRegExpExecuteArguments);
7615 int argument_slots_on_stack =
7616 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
7618 // Argument 7: Indicate that this is a direct call from JavaScript.
7619 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
7622 // Argument 6: Start (high end) of backtracking stack memory area.
7623 __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
7624 __ movq(r9, Operand(kScratchRegister, 0));
7625 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
7626 __ addq(r9, Operand(kScratchRegister, 0));
7627 // Argument 6 passed in r9 on Linux and on the stack on Windows.
7629 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
7632 // Argument 5: static offsets vector buffer.
7633 __ movq(r8, ExternalReference::address_of_static_offsets_vector());
7634 // Argument 5 passed in r8 on Linux and on the stack on Windows.
7636 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
7639 // First four arguments are passed in registers on both Linux and Windows.
7643 Register arg2 = rdx;
7644 Register arg1 = rcx;
7646 Register arg4 = rcx;
7647 Register arg3 = rdx;
7648 Register arg2 = rsi;
7649 Register arg1 = rdi;
7652 // Keep track on aliasing between argX defined above and the registers used.
7653 // rax: subject string
7654 // rbx: previous index
7655 // rdi: encoding of subject string (1 if ascii 0 if two_byte);
7658 // Argument 4: End of string data
7659 // Argument 3: Start of string data
7660 Label setup_two_byte, setup_rest;
7662 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
7663 __ j(zero, &setup_two_byte);
7664 __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
7665 __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
7666 __ jmp(&setup_rest);
7667 __ bind(&setup_two_byte);
7668 __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
7669 __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
7671 __ bind(&setup_rest);
7672 // Argument 2: Previous index.
7675 // Argument 1: Subject string.
7678 // Locate the code entry and call it.
7679 __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
7680 __ CallCFunction(r12, kRegExpExecuteArguments);
7682 // rsi is caller save, as it is used to pass parameter.
7685 // Check the result.
7687 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
7688 __ j(equal, &success);
7690 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
7691 __ j(equal, &failure);
7692 __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
7693 // If not exception it can only be retry. Handle that in the runtime system.
7694 __ j(not_equal, &runtime);
7695 // Result must now be exception. If there is no pending exception already a
7696 // stack overflow (on the backtrack stack) was detected in RegExp code but
7697 // haven't created the exception yet. Handle that in the runtime system.
7698 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
7699 ExternalReference pending_exception_address(Top::k_pending_exception_address);
7700 __ movq(kScratchRegister, pending_exception_address);
7701 __ Cmp(kScratchRegister, Factory::the_hole_value());
7702 __ j(equal, &runtime);
7704 // For failure and exception return null.
7705 __ Move(rax, Factory::null_value());
7706 __ ret(4 * kPointerSize);
7708 // Load RegExp data.
7710 __ movq(rax, Operand(rsp, kJSRegExpOffset));
7711 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
7712 __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
7713 // Calculate number of capture registers (number_of_captures + 1) * 2.
7714 __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
7715 __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
7717 // rdx: Number of capture registers
7718 // Load last_match_info which is still known to be a fast case JSArray.
7719 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
7720 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
7722 // rbx: last_match_info backing store (FixedArray)
7723 // rdx: number of capture registers
7724 // Store the capture count.
7725 __ Integer32ToSmi(kScratchRegister, rdx);
7726 __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
7728 // Store last subject and last input.
7729 __ movq(rax, Operand(rsp, kSubjectOffset));
7730 __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
7732 __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
7733 __ movq(rax, Operand(rsp, kSubjectOffset));
7734 __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
7736 __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
7738 // Get the static offsets vector filled by the native regexp code.
7739 __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
7741 // rbx: last_match_info backing store (FixedArray)
7742 // rcx: offsets vector
7743 // rdx: number of capture registers
7744 Label next_capture, done;
7745 // Capture register counter starts from number of capture registers and
7746 // counts down until wraping after zero.
7747 __ bind(&next_capture);
7748 __ subq(rdx, Immediate(1));
7749 __ j(negative, &done);
7750 // Read the value from the static offsets vector buffer and make it a smi.
7751 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
7752 __ Integer32ToSmi(rdi, rdi, &runtime);
7753 // Store the smi value in the last match info.
7754 __ movq(FieldOperand(rbx,
7757 RegExpImpl::kFirstCaptureOffset),
7759 __ jmp(&next_capture);
7762 // Return last match info.
7763 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
7764 __ ret(4 * kPointerSize);
7766 // Do the runtime call to execute the regexp.
7768 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
7769 #endif // V8_INTERPRETED_REGEXP
7773 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
7776 __ and_(hash, mask);
7777 // Each entry in string cache consists of two pointer sized fields,
7778 // but times_twice_pointer_size (multiplication by 16) scale factor
7779 // is not supported by addrmode on x64 platform.
7780 // So we have to premultiply entry index before lookup.
7781 __ shl(hash, Immediate(kPointerSizeLog2 + 1));
7785 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
7792 // Use of registers. Register result is used as a temporary.
7793 Register number_string_cache = result;
7794 Register mask = scratch1;
7795 Register scratch = scratch2;
7797 // Load the number string cache.
7798 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
7800 // Make the hash mask from the length of the number string cache. It
7801 // contains two elements (number and string) for each cache entry.
7802 __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
7803 __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
7804 __ subl(mask, Immediate(1)); // Make mask.
7806 // Calculate the entry in the number string cache. The hash value in the
7807 // number string cache for smis is just the smi value, and the hash for
7808 // doubles is the xor of the upper and lower words. See
7809 // Heap::GetNumberStringCache.
7811 Label load_result_from_cache;
7812 if (!object_is_smi) {
7813 __ JumpIfSmi(object, &is_smi);
7814 __ CheckMap(object, Factory::heap_number_map(), not_found, true);
7816 ASSERT_EQ(8, kDoubleSize);
7817 __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
7818 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
7819 GenerateConvertHashCodeToIndex(masm, scratch, mask);
7821 Register index = scratch;
7822 Register probe = mask;
7824 FieldOperand(number_string_cache,
7827 FixedArray::kHeaderSize));
7828 __ JumpIfSmi(probe, not_found);
7829 ASSERT(CpuFeatures::IsSupported(SSE2));
7830 CpuFeatures::Scope fscope(SSE2);
7831 __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
7832 __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
7833 __ comisd(xmm0, xmm1);
7834 __ j(parity_even, not_found); // Bail out if NaN is involved.
7835 __ j(not_equal, not_found); // The cache did not contain this value.
7836 __ jmp(&load_result_from_cache);
7840 __ movq(scratch, object);
7841 __ SmiToInteger32(scratch, scratch);
7842 GenerateConvertHashCodeToIndex(masm, scratch, mask);
7844 Register index = scratch;
7845 // Check if the entry is the smi we are looking for.
7847 FieldOperand(number_string_cache,
7850 FixedArray::kHeaderSize));
7851 __ j(not_equal, not_found);
7853 // Get the result from the cache.
7854 __ bind(&load_result_from_cache);
7856 FieldOperand(number_string_cache,
7859 FixedArray::kHeaderSize + kPointerSize));
7860 __ IncrementCounter(&Counters::number_to_string_native, 1);
7864 void NumberToStringStub::Generate(MacroAssembler* masm) {
7867 __ movq(rbx, Operand(rsp, kPointerSize));
7869 // Generate code to lookup number in the number string cache.
7870 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
7871 __ ret(1 * kPointerSize);
7874 // Handle number to string in the runtime system if not found in the cache.
7875 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
7879 static int NegativeComparisonResult(Condition cc) {
7880 ASSERT(cc != equal);
7881 ASSERT((cc == less) || (cc == less_equal)
7882 || (cc == greater) || (cc == greater_equal));
7883 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
7886 void CompareStub::Generate(MacroAssembler* masm) {
7887 Label call_builtin, done;
7889 // NOTICE! This code is only reached after a smi-fast-case check, so
7890 // it is certain that at least one operand isn't a smi.
7892 // Identical objects can be compared fast, but there are some tricky cases
7893 // for NaN and undefined.
7895 Label not_identical;
7897 __ j(not_equal, ¬_identical);
7900 // Check for undefined. undefined OP undefined is false even though
7901 // undefined == undefined.
7902 Label check_for_nan;
7903 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
7904 __ j(not_equal, &check_for_nan);
7905 __ Set(rax, NegativeComparisonResult(cc_));
7907 __ bind(&check_for_nan);
7910 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7911 // so we do the second best thing - test it ourselves.
7912 // Note: if cc_ != equal, never_nan_nan_ is not used.
7913 if (never_nan_nan_ && (cc_ == equal)) {
7919 // If it's not a heap number, then return equal.
7920 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
7921 Factory::heap_number_map());
7922 __ j(equal, &heap_number);
7923 __ bind(&return_equal);
7927 __ bind(&heap_number);
7928 // It is a heap number, so return non-equal if it's NaN and equal if
7930 // The representation of NaN values has all exponent bits (52..62) set,
7931 // and not all mantissa bits (0..51) clear.
7932 // We only allow QNaNs, which have bit 51 set (which also rules out
7933 // the value being Infinity).
7935 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
7936 // all bits in the mask are set. We only need to check the word
7937 // that contains the exponent and high bit of the mantissa.
7938 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
7939 __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
7941 __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
7942 __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
7944 __ setcc(above_equal, rax);
7948 __ j(above_equal, &nan);
7952 __ Set(rax, NegativeComparisonResult(cc_));
7957 __ bind(¬_identical);
7960 if (cc_ == equal) { // Both strict and non-strict.
7961 Label slow; // Fallthrough label.
7963 // If we're doing a strict equality comparison, we don't have to do
7964 // type conversion, so we generate code to do fast comparison for objects
7965 // and oddballs. Non-smi numbers and strings still go through the usual
7968 // If either is a Smi (we know that not both are), then they can only
7969 // be equal if the other is a HeapNumber. If so, use the slow case.
7972 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
7974 // Check if the non-smi operand is a heap number.
7975 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
7976 Factory::heap_number_map());
7977 // If heap number, handle it in the slow case.
7979 // Return non-equal. ebx (the lower half of rbx) is not zero.
7986 // If either operand is a JSObject or an oddball value, then they are not
7987 // equal since their pointers are different
7988 // There is no test for undetectability in strict equality.
7990 // If the first object is a JS object, we have done pointer comparison.
7991 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7992 Label first_non_object;
7993 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
7994 __ j(below, &first_non_object);
7995 // Return non-zero (eax (not rax) is not zero)
7996 Label return_not_equal;
7997 ASSERT(kHeapObjectTag != 0);
7998 __ bind(&return_not_equal);
8001 __ bind(&first_non_object);
8002 // Check for oddballs: true, false, null, undefined.
8003 __ CmpInstanceType(rcx, ODDBALL_TYPE);
8004 __ j(equal, &return_not_equal);
8006 __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
8007 __ j(above_equal, &return_not_equal);
8009 // Check for oddballs: true, false, null, undefined.
8010 __ CmpInstanceType(rcx, ODDBALL_TYPE);
8011 __ j(equal, &return_not_equal);
8013 // Fall through to the general case.
8018 // Push arguments below the return address to prepare jump to builtin.
8024 // Generate the number comparison code.
8025 if (include_number_compare_) {
8026 Label non_number_comparison;
8028 FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
8029 &non_number_comparison);
8030 FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
8031 &non_number_comparison);
8033 __ comisd(xmm0, xmm1);
8035 // Don't base result on EFLAGS when a NaN is involved.
8036 __ j(parity_even, &unordered);
8037 // Return a result of -1, 0, or 1, based on EFLAGS.
8038 __ movq(rax, Immediate(0)); // equal
8039 __ movq(rcx, Immediate(1));
8040 __ cmovq(above, rax, rcx);
8041 __ movq(rcx, Immediate(-1));
8042 __ cmovq(below, rax, rcx);
8043 __ ret(2 * kPointerSize); // rax, rdx were pushed
8045 // If one of the numbers was NaN, then the result is always false.
8046 // The cc is never not-equal.
8047 __ bind(&unordered);
8048 ASSERT(cc_ != not_equal);
8049 if (cc_ == less || cc_ == less_equal) {
8054 __ ret(2 * kPointerSize); // rax, rdx were pushed
8056 // The number comparison code did not provide a valid result.
8057 __ bind(&non_number_comparison);
8060 // Fast negative check for symbol-to-symbol equality.
8061 Label check_for_strings;
8063 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
8064 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
8066 // We've already checked for object identity, so if both operands
8067 // are symbols they aren't equal. Register eax (not rax) already holds a
8068 // non-zero value, which indicates not equal, so just return.
8069 __ ret(2 * kPointerSize);
8072 __ bind(&check_for_strings);
8074 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
8076 // Inline comparison of ascii strings.
8077 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
8086 __ Abort("Unexpected fall-through from string comparison");
8089 __ bind(&call_builtin);
8090 // must swap argument order
8097 // Figure out which native to call and setup the arguments.
8098 Builtins::JavaScript builtin;
8100 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
8102 builtin = Builtins::COMPARE;
8103 __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
8106 // Restore return address on the stack.
8109 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
8110 // tagged as a small integer.
8111 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
8115 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
8119 __ JumpIfSmi(object, label);
8120 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
8122 FieldOperand(scratch, Map::kInstanceTypeOffset));
8123 // Ensure that no non-strings have the symbol bit set.
8124 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
8125 ASSERT(kSymbolTag != 0);
8126 __ testb(scratch, Immediate(kIsSymbolMask));
8131 // Call the function just below TOS on the stack with the given
8132 // arguments. The receiver is the TOS.
8133 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
8134 CallFunctionFlags flags,
8136 // Push the arguments ("left-to-right") on the stack.
8137 int arg_count = args->length();
8138 for (int i = 0; i < arg_count; i++) {
8142 // Record the position for debugging purposes.
8143 CodeForSourcePosition(position);
8145 // Use the shared code stub to call the function.
8146 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
8147 CallFunctionStub call_function(arg_count, in_loop, flags);
8148 Result answer = frame_->CallStub(&call_function, arg_count + 1);
8149 // Restore context and replace function on the stack with the
8150 // result of the stub invocation.
8151 frame_->RestoreContextRegister();
8152 frame_->SetElementAt(0, &answer);
8156 void InstanceofStub::Generate(MacroAssembler* masm) {
8157 // Implements "value instanceof function" operator.
8158 // Expected input state:
8159 // rsp[0] : return address
8160 // rsp[1] : function pointer
8163 // Get the object - go slow case if it's a smi.
8165 __ movq(rax, Operand(rsp, 2 * kPointerSize));
8166 __ JumpIfSmi(rax, &slow);
8168 // Check that the left hand is a JS object. Leave its map in rax.
8169 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
8171 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
8174 // Get the prototype of the function.
8175 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
8176 __ TryGetFunctionPrototype(rdx, rbx, &slow);
8178 // Check that the function prototype is a JS object.
8179 __ JumpIfSmi(rbx, &slow);
8180 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
8182 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
8185 // Register mapping: rax is object map and rbx is function prototype.
8186 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
8188 // Loop through the prototype chain looking for the function prototype.
8189 Label loop, is_instance, is_not_instance;
8190 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
8193 __ j(equal, &is_instance);
8194 __ cmpq(rcx, kScratchRegister);
8195 __ j(equal, &is_not_instance);
8196 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
8197 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
8200 __ bind(&is_instance);
8202 __ ret(2 * kPointerSize);
8204 __ bind(&is_not_instance);
8205 __ movl(rax, Immediate(1));
8206 __ ret(2 * kPointerSize);
8208 // Slow-case: Go through the JavaScript implementation.
8210 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
8214 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
8215 // rsp[0] : return address
8216 // rsp[8] : number of parameters
8217 // rsp[16] : receiver displacement
8218 // rsp[24] : function
8220 // The displacement is used for skipping the return address and the
8221 // frame pointer on the stack. It is the offset of the last
8222 // parameter (if any) relative to the frame pointer.
8223 static const int kDisplacement = 2 * kPointerSize;
8225 // Check if the calling frame is an arguments adaptor frame.
8226 Label adaptor_frame, try_allocate, runtime;
8227 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
8228 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
8229 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
8230 __ j(equal, &adaptor_frame);
8232 // Get the length from the frame.
8233 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
8234 __ jmp(&try_allocate);
8236 // Patch the arguments.length and the parameters pointer.
8237 __ bind(&adaptor_frame);
8238 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8239 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
8240 // Do not clobber the length index for the indexing operation since
8241 // it is used compute the size for allocation later.
8242 SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
8243 __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
8244 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
8246 // Try the new space allocation. Start out with computing the size of
8247 // the arguments object and the elements array.
8248 Label add_arguments_object;
8249 __ bind(&try_allocate);
8251 __ j(zero, &add_arguments_object);
8252 index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
8253 __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
8254 __ bind(&add_arguments_object);
8255 __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
8257 // Do the allocation of both objects in one go.
8258 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
8260 // Get the arguments boilerplate from the current (global) context.
8261 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
8262 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
8263 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
8264 __ movq(rdi, Operand(rdi, offset));
8266 // Copy the JS object part.
8267 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
8268 __ movq(kScratchRegister, FieldOperand(rdi, i));
8269 __ movq(FieldOperand(rax, i), kScratchRegister);
8272 // Setup the callee in-object property.
8273 ASSERT(Heap::arguments_callee_index == 0);
8274 __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
8275 __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
8277 // Get the length (smi tagged) and set that as an in-object property too.
8278 ASSERT(Heap::arguments_length_index == 1);
8279 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
8280 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
8282 // If there are no actual arguments, we're done.
8287 // Get the parameters pointer from the stack and untag the length.
8288 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
8289 __ SmiToInteger32(rcx, rcx);
8291 // Setup the elements pointer in the allocated arguments object and
8292 // initialize the header in the elements fixed array.
8293 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
8294 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
8295 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
8296 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
8297 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
8299 // Copy the fixed array slots.
8302 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
8303 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
8304 __ addq(rdi, Immediate(kPointerSize));
8305 __ subq(rdx, Immediate(kPointerSize));
8307 __ j(not_zero, &loop);
8309 // Return and remove the on-stack parameters.
8311 __ ret(3 * kPointerSize);
8313 // Do the runtime call to allocate the arguments object.
8315 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
8319 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
8320 // The key is in rdx and the parameter count is in rax.
8322 // The displacement is used for skipping the frame pointer on the
8323 // stack. It is the offset of the last parameter (if any) relative
8324 // to the frame pointer.
8325 static const int kDisplacement = 1 * kPointerSize;
8327 // Check that the key is a smi.
8329 __ JumpIfNotSmi(rdx, &slow);
8331 // Check if the calling frame is an arguments adaptor frame.
8333 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
8334 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
8335 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
8336 __ j(equal, &adaptor);
8338 // Check index against formal parameters count limit passed in
8339 // through register rax. Use unsigned comparison to get negative
8342 __ j(above_equal, &slow);
8344 // Read the argument from the stack and return it.
8345 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
8346 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
8347 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
8348 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
8351 // Arguments adaptor case: Check index against actual arguments
8352 // limit found in the arguments adaptor frame. Use unsigned
8353 // comparison to get negative check for free.
8355 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
8357 __ j(above_equal, &slow);
8359 // Read the argument from the stack and return it.
8360 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
8361 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
8362 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
8363 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
8366 // Slow-case: Handle non-smi or out-of-bounds access to arguments
8367 // by calling the runtime system.
8369 __ pop(rbx); // Return address.
8372 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
8376 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
8377 // Check that stack should contain next handler, frame pointer, state and
8378 // return address in that order.
8379 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
8380 StackHandlerConstants::kStateOffset);
8381 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
8382 StackHandlerConstants::kPCOffset);
8384 ExternalReference handler_address(Top::k_handler_address);
8385 __ movq(kScratchRegister, handler_address);
8386 __ movq(rsp, Operand(kScratchRegister, 0));
8387 // get next in chain
8389 __ movq(Operand(kScratchRegister, 0), rcx);
8390 __ pop(rbp); // pop frame pointer
8391 __ pop(rdx); // remove state
8393 // Before returning we restore the context from the frame pointer if not NULL.
8394 // The frame pointer is NULL in the exception handler of a JS entry frame.
8395 __ xor_(rsi, rsi); // tentatively set context pointer to NULL
8397 __ cmpq(rbp, Immediate(0));
8399 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
8405 void CEntryStub::GenerateCore(MacroAssembler* masm,
8406 Label* throw_normal_exception,
8407 Label* throw_termination_exception,
8408 Label* throw_out_of_memory_exception,
8410 bool always_allocate_scope,
8411 int /* alignment_skew */) {
8412 // rax: result parameter for PerformGC, if any.
8413 // rbx: pointer to C function (C callee-saved).
8414 // rbp: frame pointer (restored after C call).
8415 // rsp: stack pointer (restored after C call).
8416 // r14: number of arguments including receiver (C callee-saved).
8417 // r15: pointer to the first argument (C callee-saved).
8418 // This pointer is reused in LeaveExitFrame(), so it is stored in a
8419 // callee-saved register.
8421 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
8422 // Complex results must be written to address passed as first argument.
8423 // AMD64 calling convention: a struct of two pointers in rax+rdx
8425 // Check stack alignment.
8426 if (FLAG_debug_code) {
8427 __ CheckStackAlignment();
8431 // Pass failure code returned from last attempt as first argument to
8432 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
8433 // stack is known to be aligned. This function takes one argument which is
8434 // passed in register.
8440 __ movq(kScratchRegister,
8441 FUNCTION_ADDR(Runtime::PerformGC),
8442 RelocInfo::RUNTIME_ENTRY);
8443 __ call(kScratchRegister);
8446 ExternalReference scope_depth =
8447 ExternalReference::heap_always_allocate_scope_depth();
8448 if (always_allocate_scope) {
8449 __ movq(kScratchRegister, scope_depth);
8450 __ incl(Operand(kScratchRegister, 0));
8455 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
8456 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
8457 __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
8458 __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
8459 if (result_size_ < 2) {
8460 // Pass a pointer to the Arguments object as the first argument.
8461 // Return result in single register (rax).
8462 __ lea(rcx, Operand(rsp, 4 * kPointerSize));
8464 ASSERT_EQ(2, result_size_);
8465 // Pass a pointer to the result location as the first argument.
8466 __ lea(rcx, Operand(rsp, 6 * kPointerSize));
8467 // Pass a pointer to the Arguments object as the second argument.
8468 __ lea(rdx, Operand(rsp, 4 * kPointerSize));
8472 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
8473 __ movq(rdi, r14); // argc.
8474 __ movq(rsi, r15); // argv.
8477 // Result is in rax - do not destroy this register!
8479 if (always_allocate_scope) {
8480 __ movq(kScratchRegister, scope_depth);
8481 __ decl(Operand(kScratchRegister, 0));
8484 // Check for failure result.
8485 Label failure_returned;
8486 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
8488 // If return value is on the stack, pop it to registers.
8489 if (result_size_ > 1) {
8490 ASSERT_EQ(2, result_size_);
8491 // Read result values stored on stack. Result is stored
8492 // above the four argument mirror slots and the two
8493 // Arguments object slots.
8494 __ movq(rax, Operand(rsp, 6 * kPointerSize));
8495 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
8498 __ lea(rcx, Operand(rax, 1));
8499 // Lower 2 bits of rcx are 0 iff rax has failure tag.
8500 __ testl(rcx, Immediate(kFailureTagMask));
8501 __ j(zero, &failure_returned);
8503 // Exit the JavaScript to C++ exit frame.
8504 __ LeaveExitFrame(mode_, result_size_);
8507 // Handling of failure.
8508 __ bind(&failure_returned);
8511 // If the returned exception is RETRY_AFTER_GC continue at retry label
8512 ASSERT(Failure::RETRY_AFTER_GC == 0);
8513 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
8516 // Special handling of out of memory exceptions.
8517 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
8518 __ cmpq(rax, kScratchRegister);
8519 __ j(equal, throw_out_of_memory_exception);
8521 // Retrieve the pending exception and clear the variable.
8522 ExternalReference pending_exception_address(Top::k_pending_exception_address);
8523 __ movq(kScratchRegister, pending_exception_address);
8524 __ movq(rax, Operand(kScratchRegister, 0));
8525 __ movq(rdx, ExternalReference::the_hole_value_location());
8526 __ movq(rdx, Operand(rdx, 0));
8527 __ movq(Operand(kScratchRegister, 0), rdx);
8529 // Special handling of termination exceptions which are uncatchable
8530 // by javascript code.
8531 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
8532 __ j(equal, throw_termination_exception);
8534 // Handle normal exception.
8535 __ jmp(throw_normal_exception);
8542 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
8543 UncatchableExceptionType type) {
8544 // Fetch top stack handler.
8545 ExternalReference handler_address(Top::k_handler_address);
8546 __ movq(kScratchRegister, handler_address);
8547 __ movq(rsp, Operand(kScratchRegister, 0));
8549 // Unwind the handlers until the ENTRY handler is found.
8552 // Load the type of the current stack handler.
8553 const int kStateOffset = StackHandlerConstants::kStateOffset;
8554 __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
8556 // Fetch the next handler in the list.
8557 const int kNextOffset = StackHandlerConstants::kNextOffset;
8558 __ movq(rsp, Operand(rsp, kNextOffset));
8562 // Set the top handler address to next handler past the current ENTRY handler.
8563 __ movq(kScratchRegister, handler_address);
8564 __ pop(Operand(kScratchRegister, 0));
8566 if (type == OUT_OF_MEMORY) {
8567 // Set external caught exception to false.
8568 ExternalReference external_caught(Top::k_external_caught_exception_address);
8569 __ movq(rax, Immediate(false));
8570 __ store_rax(external_caught);
8572 // Set pending exception and rax to out of memory exception.
8573 ExternalReference pending_exception(Top::k_pending_exception_address);
8574 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
8575 __ store_rax(pending_exception);
8578 // Clear the context pointer.
8581 // Restore registers from handler.
8582 ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
8583 StackHandlerConstants::kFPOffset);
8585 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
8586 StackHandlerConstants::kStateOffset);
8587 __ pop(rdx); // State
8589 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
8590 StackHandlerConstants::kPCOffset);
8595 void CallFunctionStub::Generate(MacroAssembler* masm) {
8598 // If the receiver might be a value (string, number or boolean) check for this
8599 // and box it if it is.
8600 if (ReceiverMightBeValue()) {
8601 // Get the receiver from the stack.
8602 // +1 ~ return address
8603 Label receiver_is_value, receiver_is_js_object;
8604 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
8606 // Check if receiver is a smi (which is a number value).
8607 __ JumpIfSmi(rax, &receiver_is_value);
8609 // Check if the receiver is a valid JS object.
8610 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
8611 __ j(above_equal, &receiver_is_js_object);
8613 // Call the runtime to box the value.
8614 __ bind(&receiver_is_value);
8615 __ EnterInternalFrame();
8617 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
8618 __ LeaveInternalFrame();
8619 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
8621 __ bind(&receiver_is_js_object);
8624 // Get the function to call from the stack.
8625 // +2 ~ receiver, return address
8626 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
8628 // Check that the function really is a JavaScript function.
8629 __ JumpIfSmi(rdi, &slow);
8630 // Goto slow case if we do not have a function.
8631 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
8632 __ j(not_equal, &slow);
8634 // Fast-case: Just invoke the function.
8635 ParameterCount actual(argc_);
8636 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
8638 // Slow-case: Non-function called.
8640 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
8641 // of the original receiver from the call site).
8642 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
8645 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
8646 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
8647 __ Jump(adaptor, RelocInfo::CODE_TARGET);
8651 void CEntryStub::Generate(MacroAssembler* masm) {
8652 // rax: number of arguments including receiver
8653 // rbx: pointer to C function (C callee-saved)
8654 // rbp: frame pointer of calling JS frame (restored after C call)
8655 // rsp: stack pointer (restored after C call)
8656 // rsi: current context (restored)
8658 // NOTE: Invocations of builtins may return failure objects
8659 // instead of a proper result. The builtin entry handles
8660 // this by performing a garbage collection and retrying the
8663 // Enter the exit frame that transitions from JavaScript to C++.
8664 __ EnterExitFrame(mode_, result_size_);
8666 // rax: Holds the context at this point, but should not be used.
8667 // On entry to code generated by GenerateCore, it must hold
8668 // a failure result if the collect_garbage argument to GenerateCore
8669 // is true. This failure result can be the result of code
8670 // generated by a previous call to GenerateCore. The value
8671 // of rax is then passed to Runtime::PerformGC.
8672 // rbx: pointer to builtin function (C callee-saved).
8673 // rbp: frame pointer of exit frame (restored after C call).
8674 // rsp: stack pointer (restored after C call).
8675 // r14: number of arguments including receiver (C callee-saved).
8676 // r15: argv pointer (C callee-saved).
8678 Label throw_normal_exception;
8679 Label throw_termination_exception;
8680 Label throw_out_of_memory_exception;
8682 // Call into the runtime system.
8684 &throw_normal_exception,
8685 &throw_termination_exception,
8686 &throw_out_of_memory_exception,
8690 // Do space-specific GC and retry runtime call.
8692 &throw_normal_exception,
8693 &throw_termination_exception,
8694 &throw_out_of_memory_exception,
8698 // Do full GC and retry runtime call one final time.
8699 Failure* failure = Failure::InternalError();
8700 __ movq(rax, failure, RelocInfo::NONE);
8702 &throw_normal_exception,
8703 &throw_termination_exception,
8704 &throw_out_of_memory_exception,
8708 __ bind(&throw_out_of_memory_exception);
8709 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
8711 __ bind(&throw_termination_exception);
8712 GenerateThrowUncatchable(masm, TERMINATION);
8714 __ bind(&throw_normal_exception);
8715 GenerateThrowTOS(masm);
8719 void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
8724 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
8726 #ifdef ENABLE_LOGGING_AND_PROFILING
8727 Label not_outermost_js, not_outermost_js_2;
8734 // Push the stack frame type marker twice.
8735 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
8736 __ Push(Smi::FromInt(marker)); // context slot
8737 __ Push(Smi::FromInt(marker)); // function slot
8738 // Save callee-saved registers (X64 calling conventions).
8746 // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
8747 // callee-save in JS code as well.
8749 // Save copies of the top frame descriptor on the stack.
8750 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
8751 __ load_rax(c_entry_fp);
8754 #ifdef ENABLE_LOGGING_AND_PROFILING
8755 // If this is the outermost JS call, set js_entry_sp value.
8756 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
8757 __ load_rax(js_entry_sp);
8759 __ j(not_zero, ¬_outermost_js);
8761 __ store_rax(js_entry_sp);
8762 __ bind(¬_outermost_js);
8765 // Call a faked try-block that does the invoke.
8768 // Caught exception: Store result (exception) in the pending
8769 // exception field in the JSEnv and return a failure sentinel.
8770 ExternalReference pending_exception(Top::k_pending_exception_address);
8771 __ store_rax(pending_exception);
8772 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
8775 // Invoke: Link this frame into the handler chain.
8777 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
8779 // Clear any pending exceptions.
8780 __ load_rax(ExternalReference::the_hole_value_location());
8781 __ store_rax(pending_exception);
8783 // Fake a receiver (NULL).
8784 __ push(Immediate(0)); // receiver
8786 // Invoke the function by calling through JS entry trampoline
8787 // builtin and pop the faked function when we return. We load the address
8788 // from an external reference instead of inlining the call target address
8789 // directly in the code, because the builtin stubs may not have been
8790 // generated yet at the time this code is generated.
8792 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
8793 __ load_rax(construct_entry);
8795 ExternalReference entry(Builtins::JSEntryTrampoline);
8798 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
8799 __ call(kScratchRegister);
8801 // Unlink this frame from the handler chain.
8802 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
8803 __ pop(Operand(kScratchRegister, 0));
8805 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
8807 #ifdef ENABLE_LOGGING_AND_PROFILING
8808 // If current EBP value is the same as js_entry_sp value, it means that
8809 // the current function is the outermost.
8810 __ movq(kScratchRegister, js_entry_sp);
8811 __ cmpq(rbp, Operand(kScratchRegister, 0));
8812 __ j(not_equal, ¬_outermost_js_2);
8813 __ movq(Operand(kScratchRegister, 0), Immediate(0));
8814 __ bind(¬_outermost_js_2);
8817 // Restore the top frame descriptor from the stack.
8819 __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
8820 __ pop(Operand(kScratchRegister, 0));
8822 // Restore callee-saved registers (X64 conventions).
8830 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
8832 // Restore frame pointer and return.
8838 // -----------------------------------------------------------------------------
8839 // Implementation of stubs.
8841 // Stub classes have public member named masm, not masm_.
8843 void StackCheckStub::Generate(MacroAssembler* masm) {
8844 // Because builtins always remove the receiver from the stack, we
8845 // have to fake one to avoid underflowing the stack. The receiver
8846 // must be inserted below the return address on the stack so we
8847 // temporarily store that in a register.
8849 __ Push(Smi::FromInt(0));
8852 // Do tail-call to runtime routine.
8853 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
8857 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8859 Label load_smi, done;
8861 __ JumpIfSmi(number, &load_smi);
8862 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
8866 __ SmiToInteger32(number, number);
8868 __ fild_s(Operand(rsp, 0));
8875 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8878 Label load_smi, done;
8880 __ JumpIfSmi(src, &load_smi);
8881 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
8885 __ SmiToInteger32(src, src);
8886 __ cvtlsi2sd(dst, src);
8892 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8895 Label* not_number) {
8896 Label load_smi, done;
8897 ASSERT(!src.is(kScratchRegister));
8898 __ JumpIfSmi(src, &load_smi);
8899 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
8900 __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
8901 __ j(not_equal, not_number);
8902 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
8906 __ SmiToInteger32(kScratchRegister, src);
8907 __ cvtlsi2sd(dst, kScratchRegister);
8913 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8916 __ movq(kScratchRegister, rdx);
8917 LoadFloatOperand(masm, kScratchRegister, dst1);
8918 __ movq(kScratchRegister, rax);
8919 LoadFloatOperand(masm, kScratchRegister, dst2);
8923 void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
8926 __ SmiToInteger32(kScratchRegister, rdx);
8927 __ cvtlsi2sd(dst1, kScratchRegister);
8928 __ SmiToInteger32(kScratchRegister, rax);
8929 __ cvtlsi2sd(dst2, kScratchRegister);
8933 // Input: rdx, rax are the left and right objects of a bit op.
8934 // Output: rax, rcx are left and right integers for a bit op.
8935 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
8937 Label* conversion_failure) {
8938 // Check float operands.
8939 Label arg1_is_object, check_undefined_arg1;
8940 Label arg2_is_object, check_undefined_arg2;
8941 Label load_arg2, done;
8943 __ JumpIfNotSmi(rdx, &arg1_is_object);
8944 __ SmiToInteger32(rdx, rdx);
8947 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8948 __ bind(&check_undefined_arg1);
8949 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
8950 __ j(not_equal, conversion_failure);
8951 __ movl(rdx, Immediate(0));
8954 __ bind(&arg1_is_object);
8955 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
8956 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
8957 __ j(not_equal, &check_undefined_arg1);
8958 // Get the untagged integer version of the edx heap number in rcx.
8959 IntegerConvert(masm, rdx, use_sse3, conversion_failure);
8962 // Here edx has the untagged integer, eax has a Smi or a heap number.
8963 __ bind(&load_arg2);
8964 // Test if arg2 is a Smi.
8965 __ JumpIfNotSmi(rax, &arg2_is_object);
8966 __ SmiToInteger32(rax, rax);
8970 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
8971 __ bind(&check_undefined_arg2);
8972 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
8973 __ j(not_equal, conversion_failure);
8974 __ movl(rcx, Immediate(0));
8977 __ bind(&arg2_is_object);
8978 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
8979 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
8980 __ j(not_equal, &check_undefined_arg2);
8981 // Get the untagged integer version of the eax heap number in ecx.
8982 IntegerConvert(masm, rax, use_sse3, conversion_failure);
8988 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8991 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
8992 __ JumpIfSmi(lhs, &load_smi_lhs);
8993 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
8994 __ bind(&done_load_lhs);
8996 __ JumpIfSmi(rhs, &load_smi_rhs);
8997 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
9000 __ bind(&load_smi_lhs);
9001 __ SmiToInteger64(kScratchRegister, lhs);
9002 __ push(kScratchRegister);
9003 __ fild_d(Operand(rsp, 0));
9004 __ pop(kScratchRegister);
9005 __ jmp(&done_load_lhs);
9007 __ bind(&load_smi_rhs);
9008 __ SmiToInteger64(kScratchRegister, rhs);
9009 __ push(kScratchRegister);
9010 __ fild_d(Operand(rsp, 0));
9011 __ pop(kScratchRegister);
9017 void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
9019 Label test_other, done;
9020 // Test if both operands are numbers (heap_numbers or smis).
9021 // If not, jump to label non_float.
9022 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
9023 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
9024 __ j(not_equal, non_float); // The argument in rdx is not a number.
9026 __ bind(&test_other);
9027 __ JumpIfSmi(rax, &done); // argument in rax is OK
9028 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
9029 __ j(not_equal, non_float); // The argument in rax is not a number.
9031 // Fall-through: Both operands are numbers.
9036 const char* GenericBinaryOpStub::GetName() {
9037 if (name_ != NULL) return name_;
9038 const int len = 100;
9039 name_ = Bootstrapper::AllocateAutoDeletedArray(len);
9040 if (name_ == NULL) return "OOM";
9041 const char* op_name = Token::Name(op_);
9042 const char* overwrite_name;
9044 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
9045 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
9046 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
9047 default: overwrite_name = "UnknownOverwrite"; break;
9050 OS::SNPrintF(Vector<char>(name_, len),
9051 "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
9054 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
9055 args_in_registers_ ? "RegArgs" : "StackArgs",
9056 args_reversed_ ? "_R" : "",
9057 use_sse3_ ? "SSE3" : "SSE2",
9058 static_operands_type_.ToString(),
9059 BinaryOpIC::GetName(runtime_operands_type_));
9064 void GenericBinaryOpStub::GenerateCall(
9065 MacroAssembler* masm,
9068 if (!ArgsInRegistersSupported()) {
9069 // Pass arguments on the stack.
9073 // The calling convention with registers is left in rdx and right in rax.
9074 Register left_arg = rdx;
9075 Register right_arg = rax;
9076 if (!(left.is(left_arg) && right.is(right_arg))) {
9077 if (left.is(right_arg) && right.is(left_arg)) {
9078 if (IsOperationCommutative()) {
9081 __ xchg(left, right);
9083 } else if (left.is(left_arg)) {
9084 __ movq(right_arg, right);
9085 } else if (right.is(right_arg)) {
9086 __ movq(left_arg, left);
9087 } else if (left.is(right_arg)) {
9088 if (IsOperationCommutative()) {
9089 __ movq(left_arg, right);
9092 // Order of moves important to avoid destroying left argument.
9093 __ movq(left_arg, left);
9094 __ movq(right_arg, right);
9096 } else if (right.is(left_arg)) {
9097 if (IsOperationCommutative()) {
9098 __ movq(right_arg, left);
9101 // Order of moves important to avoid destroying right argument.
9102 __ movq(right_arg, right);
9103 __ movq(left_arg, left);
9106 // Order of moves is not important.
9107 __ movq(left_arg, left);
9108 __ movq(right_arg, right);
9112 // Update flags to indicate that arguments are in registers.
9113 SetArgsInRegisters();
9114 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
9122 void GenericBinaryOpStub::GenerateCall(
9123 MacroAssembler* masm,
9126 if (!ArgsInRegistersSupported()) {
9127 // Pass arguments on the stack.
9131 // The calling convention with registers is left in rdx and right in rax.
9132 Register left_arg = rdx;
9133 Register right_arg = rax;
9134 if (left.is(left_arg)) {
9135 __ Move(right_arg, right);
9136 } else if (left.is(right_arg) && IsOperationCommutative()) {
9137 __ Move(left_arg, right);
9140 // For non-commutative operations, left and right_arg might be
9141 // the same register. Therefore, the order of the moves is
9142 // important here in order to not overwrite left before moving
9144 __ movq(left_arg, left);
9145 __ Move(right_arg, right);
9148 // Update flags to indicate that arguments are in registers.
9149 SetArgsInRegisters();
9150 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
9158 void GenericBinaryOpStub::GenerateCall(
9159 MacroAssembler* masm,
9162 if (!ArgsInRegistersSupported()) {
9163 // Pass arguments on the stack.
9167 // The calling convention with registers is left in rdx and right in rax.
9168 Register left_arg = rdx;
9169 Register right_arg = rax;
9170 if (right.is(right_arg)) {
9171 __ Move(left_arg, left);
9172 } else if (right.is(left_arg) && IsOperationCommutative()) {
9173 __ Move(right_arg, left);
9176 // For non-commutative operations, right and left_arg might be
9177 // the same register. Therefore, the order of the moves is
9178 // important here in order to not overwrite right before moving
9180 __ movq(right_arg, right);
9181 __ Move(left_arg, left);
9183 // Update flags to indicate that arguments are in registers.
9184 SetArgsInRegisters();
9185 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
9193 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
9194 VirtualFrame* frame,
9197 if (ArgsInRegistersSupported()) {
9198 SetArgsInRegisters();
9199 return frame->CallStub(this, left, right);
9203 return frame->CallStub(this, 2);
9208 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
9209 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
9210 // dividend in rax and rdx free for the division. Use rax, rbx for those.
9211 Comment load_comment(masm, "-- Load arguments");
9212 Register left = rdx;
9213 Register right = rax;
9214 if (op_ == Token::DIV || op_ == Token::MOD) {
9217 if (HasArgsInRegisters()) {
9222 if (!HasArgsInRegisters()) {
9223 __ movq(right, Operand(rsp, 1 * kPointerSize));
9224 __ movq(left, Operand(rsp, 2 * kPointerSize));
9227 // 2. Smi check both operands. Skip the check for OR as it is better combined
9228 // with the actual operation.
9230 if (op_ != Token::BIT_OR) {
9231 Comment smi_check_comment(masm, "-- Smi check arguments");
9232 __ JumpIfNotBothSmi(left, right, ¬_smis);
9235 // 3. Operands are both smis (except for OR), perform the operation leaving
9236 // the result in rax and check the result if necessary.
9237 Comment perform_smi(masm, "-- Perform smi operation");
9238 Label use_fp_on_smis;
9241 ASSERT(right.is(rax));
9242 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
9247 __ SmiSub(left, left, right, &use_fp_on_smis);
9253 ASSERT(right.is(rax));
9254 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
9258 ASSERT(left.is(rax));
9259 __ SmiDiv(left, left, right, &use_fp_on_smis);
9263 ASSERT(left.is(rax));
9264 __ SmiMod(left, left, right, slow);
9268 ASSERT(right.is(rax));
9269 __ movq(rcx, right); // Save the right operand.
9270 __ SmiOr(right, right, left); // BIT_OR is commutative.
9271 __ testb(right, Immediate(kSmiTagMask));
9272 __ j(not_zero, ¬_smis);
9275 case Token::BIT_AND:
9276 ASSERT(right.is(rax));
9277 __ SmiAnd(right, right, left); // BIT_AND is commutative.
9280 case Token::BIT_XOR:
9281 ASSERT(right.is(rax));
9282 __ SmiXor(right, right, left); // BIT_XOR is commutative.
9290 __ SmiShiftArithmeticRight(left, left, right);
9293 __ SmiShiftLogicalRight(left, left, right, slow);
9296 __ SmiShiftLeft(left, left, right, slow);
9309 // 4. Emit return of result in rax.
9310 GenerateReturn(masm);
9312 // 5. For some operations emit inline code to perform floating point
9313 // operations on known smis (e.g., if the result of the operation
9314 // overflowed the smi range).
9320 __ bind(&use_fp_on_smis);
9321 if (op_ == Token::DIV) {
9325 // left is rdx, right is rax.
9326 __ AllocateHeapNumber(rbx, rcx, slow);
9327 FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
9329 case Token::ADD: __ addsd(xmm4, xmm5); break;
9330 case Token::SUB: __ subsd(xmm4, xmm5); break;
9331 case Token::MUL: __ mulsd(xmm4, xmm5); break;
9332 case Token::DIV: __ divsd(xmm4, xmm5); break;
9333 default: UNREACHABLE();
9335 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
9337 GenerateReturn(masm);
9343 // 6. Non-smi operands, fall out to the non-smi code with the operands in
9345 Comment done_comment(masm, "-- Enter non-smi code");
9351 // Operands are in rax, rbx at this point.
9357 // Right operand is saved in rcx and rax was destroyed by the smi
9368 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
9371 if (ShouldGenerateSmiCode()) {
9372 GenerateSmiCode(masm, &call_runtime);
9373 } else if (op_ != Token::MOD) {
9374 if (!HasArgsInRegisters()) {
9375 GenerateLoadArguments(masm);
9378 // Floating point case.
9379 if (ShouldGenerateFPCode()) {
9385 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
9386 HasSmiCodeInStub()) {
9387 // Execution reaches this point when the first non-smi argument occurs
9388 // (and only if smi code is generated). This is the right moment to
9389 // patch to HEAP_NUMBERS state. The transition is attempted only for
9390 // the four basic operations. The stub stays in the DEFAULT state
9391 // forever for all other operations (also if smi code is skipped).
9392 GenerateTypeTransition(masm);
9398 if (static_operands_type_.IsNumber()) {
9399 if (FLAG_debug_code) {
9400 // Assert at runtime that inputs are only numbers.
9401 __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
9402 __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
9405 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
9407 // Fast-case: Both operands are numbers.
9408 // xmm4 and xmm5 are volatile XMM registers.
9409 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
9412 case Token::ADD: __ addsd(xmm4, xmm5); break;
9413 case Token::SUB: __ subsd(xmm4, xmm5); break;
9414 case Token::MUL: __ mulsd(xmm4, xmm5); break;
9415 case Token::DIV: __ divsd(xmm4, xmm5); break;
9416 default: UNREACHABLE();
9418 // Allocate a heap number, if needed.
9419 Label skip_allocation;
9420 OverwriteMode mode = mode_;
9421 if (HasArgsReversed()) {
9422 if (mode == OVERWRITE_RIGHT) {
9423 mode = OVERWRITE_LEFT;
9424 } else if (mode == OVERWRITE_LEFT) {
9425 mode = OVERWRITE_RIGHT;
9429 case OVERWRITE_LEFT:
9430 __ JumpIfNotSmi(rdx, &skip_allocation);
9431 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
9433 __ bind(&skip_allocation);
9436 case OVERWRITE_RIGHT:
9437 // If the argument in rax is already an object, we skip the
9438 // allocation of a heap number.
9439 __ JumpIfNotSmi(rax, &skip_allocation);
9442 // Allocate a heap number for the result. Keep rax and rdx intact
9443 // for the possible runtime call.
9444 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
9446 __ bind(&skip_allocation);
9448 default: UNREACHABLE();
9450 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
9451 GenerateReturn(masm);
9452 __ bind(¬_floats);
9453 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
9454 !HasSmiCodeInStub()) {
9455 // Execution reaches this point when the first non-number argument
9456 // occurs (and only if smi code is skipped from the stub, otherwise
9457 // the patching has already been done earlier in this case branch).
9458 // A perfect moment to try patching to STRINGS for ADD operation.
9459 if (op_ == Token::ADD) {
9460 GenerateTypeTransition(masm);
9466 // For MOD we go directly to runtime in the non-smi case.
9470 case Token::BIT_AND:
9471 case Token::BIT_XOR:
9475 Label skip_allocation, non_smi_result;
9476 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
9478 case Token::BIT_OR: __ orl(rax, rcx); break;
9479 case Token::BIT_AND: __ andl(rax, rcx); break;
9480 case Token::BIT_XOR: __ xorl(rax, rcx); break;
9481 case Token::SAR: __ sarl_cl(rax); break;
9482 case Token::SHL: __ shll_cl(rax); break;
9483 case Token::SHR: __ shrl_cl(rax); break;
9484 default: UNREACHABLE();
9486 if (op_ == Token::SHR) {
9487 // Check if result is non-negative. This can only happen for a shift
9488 // by zero, which also doesn't update the sign flag.
9490 __ j(negative, &non_smi_result);
9492 __ JumpIfNotValidSmiValue(rax, &non_smi_result);
9493 // Tag smi result, if possible, and return.
9494 __ Integer32ToSmi(rax, rax);
9495 GenerateReturn(masm);
9497 // All ops except SHR return a signed int32 that we load in
9499 if (op_ != Token::SHR && non_smi_result.is_linked()) {
9500 __ bind(&non_smi_result);
9501 // Allocate a heap number if needed.
9502 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
9504 case OVERWRITE_LEFT:
9505 case OVERWRITE_RIGHT:
9506 // If the operand was an object, we skip the
9507 // allocation of a heap number.
9508 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
9509 1 * kPointerSize : 2 * kPointerSize));
9510 __ JumpIfNotSmi(rax, &skip_allocation);
9513 __ AllocateHeapNumber(rax, rcx, &call_runtime);
9514 __ bind(&skip_allocation);
9516 default: UNREACHABLE();
9518 // Store the result in the HeapNumber and return.
9519 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
9520 __ fild_s(Operand(rsp, 1 * kPointerSize));
9521 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
9522 GenerateReturn(masm);
9525 // SHR should return uint32 - go to runtime for non-smi/negative result.
9526 if (op_ == Token::SHR) {
9527 __ bind(&non_smi_result);
9531 default: UNREACHABLE(); break;
9535 // If all else fails, use the runtime system to get the correct
9536 // result. If arguments was passed in registers now place them on the
9537 // stack in the correct order below the return address.
9538 __ bind(&call_runtime);
9540 if (HasArgsInRegisters()) {
9541 GenerateRegisterArgsPush(masm);
9546 // Registers containing left and right operands respectively.
9549 if (HasArgsReversed()) {
9557 // Test for string arguments before calling runtime.
9558 Label not_strings, both_strings, not_string1, string1, string1_smi2;
9560 // If this stub has already generated FP-specific code then the arguments
9561 // are already in rdx, rax
9562 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
9563 GenerateLoadArguments(masm);
9567 is_smi = masm->CheckSmi(lhs);
9568 __ j(is_smi, ¬_string1);
9569 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
9570 __ j(above_equal, ¬_string1);
9572 // First argument is a a string, test second.
9573 is_smi = masm->CheckSmi(rhs);
9574 __ j(is_smi, &string1_smi2);
9575 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
9576 __ j(above_equal, &string1);
9578 // First and second argument are strings.
9579 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
9580 __ TailCallStub(&string_add_stub);
9582 __ bind(&string1_smi2);
9583 // First argument is a string, second is a smi. Try to lookup the number
9584 // string for the smi in the number string cache.
9585 NumberToStringStub::GenerateLookupNumberStringCache(
9586 masm, rhs, rbx, rcx, r8, true, &string1);
9588 // Replace second argument on stack and tailcall string add stub to make
9590 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
9591 __ TailCallStub(&string_add_stub);
9593 // Only first argument is a string.
9595 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
9597 // First argument was not a string, test second.
9598 __ bind(¬_string1);
9599 is_smi = masm->CheckSmi(rhs);
9600 __ j(is_smi, ¬_strings);
9601 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
9602 __ j(above_equal, ¬_strings);
9604 // Only second argument is a string.
9605 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
9607 __ bind(¬_strings);
9608 // Neither argument is a string.
9609 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
9613 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
9616 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
9619 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
9622 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
9625 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
9627 case Token::BIT_AND:
9628 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
9630 case Token::BIT_XOR:
9631 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
9634 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
9637 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
9640 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
9648 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
9649 ASSERT(!HasArgsInRegisters());
9650 __ movq(rax, Operand(rsp, 1 * kPointerSize));
9651 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
9655 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
9656 // If arguments are not passed in registers remove them from the stack before
9658 if (!HasArgsInRegisters()) {
9659 __ ret(2 * kPointerSize); // Remove both operands
9666 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
9667 ASSERT(HasArgsInRegisters());
9669 if (HasArgsReversed()) {
9680 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
9683 // Keep a copy of operands on the stack and make sure they are also in
9685 if (HasArgsInRegisters()) {
9686 GenerateRegisterArgsPush(masm);
9688 GenerateLoadArguments(masm);
9691 // Internal frame is necessary to handle exceptions properly.
9692 __ EnterInternalFrame();
9694 // Push arguments on stack if the stub expects them there.
9695 if (!HasArgsInRegisters()) {
9699 // Call the stub proper to get the result in rax.
9700 __ call(&get_result);
9701 __ LeaveInternalFrame();
9703 // Left and right arguments are already on stack.
9705 // Push the operation result. The tail call to BinaryOp_Patch will
9706 // return it to the original caller..
9709 // Push this stub's key.
9710 __ movq(rax, Immediate(MinorKey()));
9711 __ Integer32ToSmi(rax, rax);
9714 // Although the operation and the type info are encoded into the key,
9715 // the encoding is opaque, so push them too.
9716 __ movq(rax, Immediate(op_));
9717 __ Integer32ToSmi(rax, rax);
9720 __ movq(rax, Immediate(runtime_operands_type_));
9721 __ Integer32ToSmi(rax, rax);
9726 // Perform patching to an appropriate fast case and return the result.
9727 __ TailCallExternalReference(
9728 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
9732 // The entry point for the result calculation is assumed to be immediately
9733 // after this sequence.
9734 __ bind(&get_result);
9738 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
9739 GenericBinaryOpStub stub(key, type_info);
9740 return stub.GetCode();
9744 int CompareStub::MinorKey() {
9745 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
9746 // stubs the never NaN NaN condition is only taken into account if the
9747 // condition is equals.
9748 ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
9749 return ConditionField::encode(static_cast<unsigned>(cc_))
9750 | StrictField::encode(strict_)
9751 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
9752 | IncludeNumberCompareField::encode(include_number_compare_);
9756 // Unfortunately you have to run without snapshots to see most of these
9757 // names in the profile since most compare stubs end up in the snapshot.
9758 const char* CompareStub::GetName() {
9759 if (name_ != NULL) return name_;
9760 const int kMaxNameLength = 100;
9761 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
9762 if (name_ == NULL) return "OOM";
9764 const char* cc_name;
9766 case less: cc_name = "LT"; break;
9767 case greater: cc_name = "GT"; break;
9768 case less_equal: cc_name = "LE"; break;
9769 case greater_equal: cc_name = "GE"; break;
9770 case equal: cc_name = "EQ"; break;
9771 case not_equal: cc_name = "NE"; break;
9772 default: cc_name = "UnknownCondition"; break;
9775 const char* strict_name = "";
9776 if (strict_ && (cc_ == equal || cc_ == not_equal)) {
9777 strict_name = "_STRICT";
9780 const char* never_nan_nan_name = "";
9781 if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
9782 never_nan_nan_name = "_NO_NAN";
9785 const char* include_number_compare_name = "";
9786 if (!include_number_compare_) {
9787 include_number_compare_name = "_NO_NUMBER";
9790 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
9791 "CompareStub_%s%s%s%s",
9795 include_number_compare_name);
9800 void StringAddStub::Generate(MacroAssembler* masm) {
9801 Label string_add_runtime;
9803 // Load the two arguments.
9804 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
9805 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
9807 // Make sure that both arguments are strings if not known in advance.
9808 if (string_check_) {
9810 is_smi = masm->CheckSmi(rax);
9811 __ j(is_smi, &string_add_runtime);
9812 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
9813 __ j(above_equal, &string_add_runtime);
9815 // First argument is a a string, test second.
9816 is_smi = masm->CheckSmi(rdx);
9817 __ j(is_smi, &string_add_runtime);
9818 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
9819 __ j(above_equal, &string_add_runtime);
9822 // Both arguments are strings.
9823 // rax: first string
9824 // rdx: second string
9825 // Check if either of the strings are empty. In that case return the other.
9826 Label second_not_zero_length, both_not_zero_length;
9827 __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
9829 __ j(not_zero, &second_not_zero_length);
9830 // Second string is empty, result is first string which is already in rax.
9831 __ IncrementCounter(&Counters::string_add_native, 1);
9832 __ ret(2 * kPointerSize);
9833 __ bind(&second_not_zero_length);
9834 __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
9836 __ j(not_zero, &both_not_zero_length);
9837 // First string is empty, result is second string which is in rdx.
9839 __ IncrementCounter(&Counters::string_add_native, 1);
9840 __ ret(2 * kPointerSize);
9842 // Both strings are non-empty.
9843 // rax: first string
9844 // rbx: length of first string
9845 // rcx: length of second string
9846 // rdx: second string
9847 // r8: map of first string if string check was performed above
9848 // r9: map of second string if string check was performed above
9849 Label string_add_flat_result, longer_than_two;
9850 __ bind(&both_not_zero_length);
9852 // If arguments where known to be strings, maps are not loaded to r8 and r9
9853 // by the code above.
9854 if (!string_check_) {
9855 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
9856 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
9858 // Get the instance types of the two strings as they will be needed soon.
9859 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
9860 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
9862 // Look at the length of the result of adding the two strings.
9864 // Use the runtime system when adding two one character strings, as it
9865 // contains optimizations for this specific case using the symbol table.
9866 __ cmpl(rbx, Immediate(2));
9867 __ j(not_equal, &longer_than_two);
9869 // Check that both strings are non-external ascii strings.
9870 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
9871 &string_add_runtime);
9873 // Get the two characters forming the sub string.
9874 __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
9875 __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
9877 // Try to lookup two character string in symbol table. If it is not found
9878 // just allocate a new one.
9879 Label make_two_character_string, make_flat_ascii_string;
9880 GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, r14, r12, rdi, r15,
9881 &make_two_character_string);
9882 __ IncrementCounter(&Counters::string_add_native, 1);
9883 __ ret(2 * kPointerSize);
9885 __ bind(&make_two_character_string);
9887 __ jmp(&make_flat_ascii_string);
9889 __ bind(&longer_than_two);
9890 // Check if resulting string will be flat.
9891 __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
9892 __ j(below, &string_add_flat_result);
9893 // Handle exceptionally long strings in the runtime system.
9894 ASSERT((String::kMaxLength & 0x80000000) == 0);
9895 __ cmpl(rbx, Immediate(String::kMaxLength));
9896 __ j(above, &string_add_runtime);
9898 // If result is not supposed to be flat, allocate a cons string object. If
9899 // both strings are ascii the result is an ascii cons string.
9900 // rax: first string
9901 // ebx: length of resulting flat string
9902 // rdx: second string
9903 // r8: instance type of first string
9904 // r9: instance type of second string
9905 Label non_ascii, allocated;
9908 ASSERT(kStringEncodingMask == kAsciiStringTag);
9909 __ testl(rcx, Immediate(kAsciiStringTag));
9910 __ j(zero, &non_ascii);
9911 // Allocate an acsii cons string.
9912 __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
9913 __ bind(&allocated);
9914 // Fill the fields of the cons string.
9915 __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
9916 __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
9917 Immediate(String::kEmptyHashField));
9918 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
9919 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
9921 __ IncrementCounter(&Counters::string_add_native, 1);
9922 __ ret(2 * kPointerSize);
9923 __ bind(&non_ascii);
9924 // Allocate a two byte cons string.
9925 __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
9928 // Handle creating a flat result. First check that both strings are not
9929 // external strings.
9930 // rax: first string
9931 // ebx: length of resulting flat string
9932 // rdx: second string
9933 // r8: instance type of first string
9934 // r9: instance type of first string
9935 __ bind(&string_add_flat_result);
9937 __ and_(rcx, Immediate(kStringRepresentationMask));
9938 __ cmpl(rcx, Immediate(kExternalStringTag));
9939 __ j(equal, &string_add_runtime);
9941 __ and_(rcx, Immediate(kStringRepresentationMask));
9942 __ cmpl(rcx, Immediate(kExternalStringTag));
9943 __ j(equal, &string_add_runtime);
9944 // Now check if both strings are ascii strings.
9945 // rax: first string
9946 // ebx: length of resulting flat string
9947 // rdx: second string
9948 // r8: instance type of first string
9949 // r9: instance type of second string
9950 Label non_ascii_string_add_flat_result;
9951 ASSERT(kStringEncodingMask == kAsciiStringTag);
9952 __ testl(r8, Immediate(kAsciiStringTag));
9953 __ j(zero, &non_ascii_string_add_flat_result);
9954 __ testl(r9, Immediate(kAsciiStringTag));
9955 __ j(zero, &string_add_runtime);
9957 __ bind(&make_flat_ascii_string);
9958 // Both strings are ascii strings. As they are short they are both flat.
9959 __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
9960 // rcx: result string
9962 // Locate first character of result.
9963 __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9964 // Locate first character of first argument
9965 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
9966 __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9967 // rax: first char of first argument
9968 // rbx: result string
9969 // rcx: first character of result
9970 // rdx: second string
9971 // rdi: length of first argument
9972 GenerateCopyCharacters(masm, rcx, rax, rdi, true);
9973 // Locate first character of second argument.
9974 __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
9975 __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9976 // rbx: result string
9977 // rcx: next character of result
9978 // rdx: first char of second argument
9979 // rdi: length of second argument
9980 GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
9982 __ IncrementCounter(&Counters::string_add_native, 1);
9983 __ ret(2 * kPointerSize);
9985 // Handle creating a flat two byte result.
9986 // rax: first string - known to be two byte
9987 // rbx: length of resulting flat string
9988 // rdx: second string
9989 // r8: instance type of first string
9990 // r9: instance type of first string
9991 __ bind(&non_ascii_string_add_flat_result);
9992 __ and_(r9, Immediate(kAsciiStringTag));
9993 __ j(not_zero, &string_add_runtime);
9994 // Both strings are two byte strings. As they are short they are both
9996 __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
9997 // rcx: result string
9999 // Locate first character of result.
10000 __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10001 // Locate first character of first argument.
10002 __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
10003 __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10004 // rax: first char of first argument
10005 // rbx: result string
10006 // rcx: first character of result
10007 // rdx: second argument
10008 // rdi: length of first argument
10009 GenerateCopyCharacters(masm, rcx, rax, rdi, false);
10010 // Locate first character of second argument.
10011 __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
10012 __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10013 // rbx: result string
10014 // rcx: next character of result
10015 // rdx: first char of second argument
10016 // rdi: length of second argument
10017 GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
10019 __ IncrementCounter(&Counters::string_add_native, 1);
10020 __ ret(2 * kPointerSize);
10022 // Just jump to runtime to add the two strings.
10023 __ bind(&string_add_runtime);
10024 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10028 void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
10035 // This loop just copies one character at a time, as it is only used for very
10038 __ movb(kScratchRegister, Operand(src, 0));
10039 __ movb(Operand(dest, 0), kScratchRegister);
10040 __ addq(src, Immediate(1));
10041 __ addq(dest, Immediate(1));
10043 __ movzxwl(kScratchRegister, Operand(src, 0));
10044 __ movw(Operand(dest, 0), kScratchRegister);
10045 __ addq(src, Immediate(2));
10046 __ addq(dest, Immediate(2));
10048 __ subl(count, Immediate(1));
10049 __ j(not_zero, &loop);
10053 void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
10058 // Copy characters using rep movs of doublewords. Align destination on 4 byte
10059 // boundary before starting rep movs. Copy remaining characters after running
10061 ASSERT(dest.is(rdi)); // rep movs destination
10062 ASSERT(src.is(rsi)); // rep movs source
10063 ASSERT(count.is(rcx)); // rep movs count
10065 // Nothing to do for zero characters.
10067 __ testq(count, count);
10070 // Make count the number of bytes to copy.
10072 ASSERT_EQ(2, sizeof(uc16)); // NOLINT
10073 __ addq(count, count);
10076 // Don't enter the rep movs if there are less than 4 bytes to copy.
10078 __ testq(count, Immediate(~7));
10079 __ j(zero, &last_bytes);
10081 // Copy from edi to esi using rep movs instruction.
10082 __ movq(kScratchRegister, count);
10083 __ sar(count, Immediate(3)); // Number of doublewords to copy.
10086 // Find number of bytes left.
10087 __ movq(count, kScratchRegister);
10088 __ and_(count, Immediate(7));
10090 // Check if there are more bytes to copy.
10091 __ bind(&last_bytes);
10092 __ testq(count, count);
10095 // Copy remaining characters.
10098 __ movb(kScratchRegister, Operand(src, 0));
10099 __ movb(Operand(dest, 0), kScratchRegister);
10100 __ addq(src, Immediate(1));
10101 __ addq(dest, Immediate(1));
10102 __ subq(count, Immediate(1));
10103 __ j(not_zero, &loop);
10108 void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
10115 Label* not_found) {
10116 // Register scratch3 is the general scratch register in this function.
10117 Register scratch = scratch3;
10119 // Make sure that both characters are not digits as such strings has a
10120 // different hash algorithm. Don't try to look for these in the symbol table.
10121 Label not_array_index;
10122 __ movq(scratch, c1);
10123 __ subq(scratch, Immediate(static_cast<int>('0')));
10124 __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
10125 __ j(above, ¬_array_index);
10126 __ movq(scratch, c2);
10127 __ subq(scratch, Immediate(static_cast<int>('0')));
10128 __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
10129 __ j(below_equal, not_found);
10131 __ bind(¬_array_index);
10132 // Calculate the two character string hash.
10133 Register hash = scratch1;
10134 GenerateHashInit(masm, hash, c1, scratch);
10135 GenerateHashAddCharacter(masm, hash, c2, scratch);
10136 GenerateHashGetHash(masm, hash, scratch);
10138 // Collect the two characters in a register.
10139 Register chars = c1;
10140 __ shl(c2, Immediate(kBitsPerByte));
10143 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10144 // hash: hash of two character string.
10146 // Load the symbol table.
10147 Register symbol_table = c2;
10148 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
10150 // Calculate capacity mask from the symbol table capacity.
10151 Register mask = scratch2;
10152 __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
10153 __ SmiToInteger32(mask, mask);
10156 Register undefined = scratch4;
10157 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
10160 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10161 // hash: hash of two character string (32-bit int)
10162 // symbol_table: symbol table
10163 // mask: capacity mask (32-bit int)
10164 // undefined: undefined value
10167 // Perform a number of probes in the symbol table.
10168 static const int kProbes = 4;
10169 Label found_in_symbol_table;
10170 Label next_probe[kProbes];
10171 for (int i = 0; i < kProbes; i++) {
10172 // Calculate entry in symbol table.
10173 __ movl(scratch, hash);
10175 __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
10177 __ andl(scratch, mask);
10179 // Load the entry from the symble table.
10180 Register candidate = scratch; // Scratch register contains candidate.
10181 ASSERT_EQ(1, SymbolTable::kEntrySize);
10183 FieldOperand(symbol_table,
10185 times_pointer_size,
10186 SymbolTable::kElementsStartOffset));
10188 // If entry is undefined no string with this hash can be found.
10189 __ cmpq(candidate, undefined);
10190 __ j(equal, not_found);
10192 // If length is not 2 the string is not a candidate.
10193 __ cmpl(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
10194 __ j(not_equal, &next_probe[i]);
10196 // We use kScratchRegister as a temporary register in assumption that
10197 // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
10198 Register temp = kScratchRegister;
10200 // Check that the candidate is a non-external ascii string.
10201 __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
10202 __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
10203 __ JumpIfInstanceTypeIsNotSequentialAscii(
10204 temp, temp, &next_probe[i]);
10206 // Check if the two characters match.
10207 __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
10208 __ andl(temp, Immediate(0x0000ffff));
10209 __ cmpl(chars, temp);
10210 __ j(equal, &found_in_symbol_table);
10211 __ bind(&next_probe[i]);
10214 // No matching 2 character string found by probing.
10217 // Scratch register contains result when we fall through to here.
10218 Register result = scratch;
10219 __ bind(&found_in_symbol_table);
10220 if (!result.is(rax)) {
10221 __ movq(rax, result);
10226 void StringStubBase::GenerateHashInit(MacroAssembler* masm,
10228 Register character,
10229 Register scratch) {
10230 // hash = character + (character << 10);
10231 __ movl(hash, character);
10232 __ shll(hash, Immediate(10));
10233 __ addl(hash, character);
10234 // hash ^= hash >> 6;
10235 __ movl(scratch, hash);
10236 __ sarl(scratch, Immediate(6));
10237 __ xorl(hash, scratch);
10241 void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
10243 Register character,
10244 Register scratch) {
10245 // hash += character;
10246 __ addl(hash, character);
10247 // hash += hash << 10;
10248 __ movl(scratch, hash);
10249 __ shll(scratch, Immediate(10));
10250 __ addl(hash, scratch);
10251 // hash ^= hash >> 6;
10252 __ movl(scratch, hash);
10253 __ sarl(scratch, Immediate(6));
10254 __ xorl(hash, scratch);
10258 void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
10260 Register scratch) {
10261 // hash += hash << 3;
10262 __ movl(scratch, hash);
10263 __ shll(scratch, Immediate(3));
10264 __ addl(hash, scratch);
10265 // hash ^= hash >> 11;
10266 __ movl(scratch, hash);
10267 __ sarl(scratch, Immediate(11));
10268 __ xorl(hash, scratch);
10269 // hash += hash << 15;
10270 __ movl(scratch, hash);
10271 __ shll(scratch, Immediate(15));
10272 __ addl(hash, scratch);
10274 // if (hash == 0) hash = 27;
10275 Label hash_not_zero;
10276 __ testl(hash, hash);
10277 __ j(not_zero, &hash_not_zero);
10278 __ movl(hash, Immediate(27));
10279 __ bind(&hash_not_zero);
10282 void SubStringStub::Generate(MacroAssembler* masm) {
10285 // Stack frame on entry.
10286 // rsp[0]: return address
10291 const int kToOffset = 1 * kPointerSize;
10292 const int kFromOffset = kToOffset + kPointerSize;
10293 const int kStringOffset = kFromOffset + kPointerSize;
10294 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
10296 // Make sure first argument is a string.
10297 __ movq(rax, Operand(rsp, kStringOffset));
10298 ASSERT_EQ(0, kSmiTag);
10299 __ testl(rax, Immediate(kSmiTagMask));
10300 __ j(zero, &runtime);
10301 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
10302 __ j(NegateCondition(is_string), &runtime);
10305 // rbx: instance type
10306 // Calculate length of sub string using the smi values.
10307 Label result_longer_than_two;
10308 __ movq(rcx, Operand(rsp, kToOffset));
10309 __ movq(rdx, Operand(rsp, kFromOffset));
10310 __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
10312 __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
10313 __ j(negative, &runtime);
10314 // Special handling of sub-strings of length 1 and 2. One character strings
10315 // are handled in the runtime system (looked up in the single character
10316 // cache). Two character strings are looked for in the symbol cache.
10317 __ SmiToInteger32(rcx, rcx);
10318 __ cmpl(rcx, Immediate(2));
10319 __ j(greater, &result_longer_than_two);
10320 __ j(less, &runtime);
10322 // Sub string of length 2 requested.
10324 // rbx: instance type
10325 // rcx: sub string length (value is 2)
10326 // rdx: from index (smi)
10327 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
10329 // Get the two characters forming the sub string.
10330 __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
10331 __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
10333 FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
10335 // Try to lookup two character string in symbol table.
10336 Label make_two_character_string;
10337 GenerateTwoCharacterSymbolTableProbe(masm, rbx, rcx, rax, rdx, rdi, r14,
10338 &make_two_character_string);
10339 __ ret(3 * kPointerSize);
10341 __ bind(&make_two_character_string);
10342 // Setup registers for allocating the two character string.
10343 __ movq(rax, Operand(rsp, kStringOffset));
10344 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
10345 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
10348 __ bind(&result_longer_than_two);
10351 // rbx: instance type
10352 // rcx: result string length
10353 // Check for flat ascii string
10354 Label non_ascii_flat;
10355 __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
10357 // Allocate the result.
10358 __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
10360 // rax: result string
10361 // rcx: result string length
10362 __ movq(rdx, rsi); // esi used by following code.
10363 // Locate first character of result.
10364 __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
10365 // Load string argument and locate character of sub string start.
10366 __ movq(rsi, Operand(rsp, kStringOffset));
10367 __ movq(rbx, Operand(rsp, kFromOffset));
10369 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
10370 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
10371 SeqAsciiString::kHeaderSize - kHeapObjectTag));
10374 // rax: result string
10375 // rcx: result length
10376 // rdx: original value of rsi
10377 // rdi: first character of result
10378 // rsi: character of sub string start
10379 GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
10380 __ movq(rsi, rdx); // Restore rsi.
10381 __ IncrementCounter(&Counters::sub_string_native, 1);
10382 __ ret(kArgumentsSize);
10384 __ bind(&non_ascii_flat);
10386 // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
10387 // rcx: result string length
10388 // Check for sequential two byte string
10389 __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
10390 __ j(not_equal, &runtime);
10392 // Allocate the result.
10393 __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
10395 // rax: result string
10396 // rcx: result string length
10397 __ movq(rdx, rsi); // esi used by following code.
10398 // Locate first character of result.
10399 __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
10400 // Load string argument and locate character of sub string start.
10401 __ movq(rsi, Operand(rsp, kStringOffset));
10402 __ movq(rbx, Operand(rsp, kFromOffset));
10404 SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
10405 __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
10406 SeqAsciiString::kHeaderSize - kHeapObjectTag));
10409 // rax: result string
10410 // rcx: result length
10411 // rdx: original value of rsi
10412 // rdi: first character of result
10413 // rsi: character of sub string start
10414 GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
10415 __ movq(rsi, rdx); // Restore esi.
10416 __ IncrementCounter(&Counters::sub_string_native, 1);
10417 __ ret(kArgumentsSize);
10419 // Just jump to runtime to create the sub string.
10421 __ TailCallRuntime(Runtime::kSubString, 3, 1);
10425 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
10431 Register scratch4) {
10432 // Ensure that you can always subtract a string length from a non-negative
10433 // number (e.g. another length).
10434 ASSERT(String::kMaxLength < 0x7fffffff);
10436 // Find minimum length and length difference.
10437 __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
10438 __ movl(scratch4, scratch1);
10439 __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
10440 // Register scratch4 now holds left.length - right.length.
10441 const Register length_difference = scratch4;
10442 Label left_shorter;
10443 __ j(less, &left_shorter);
10444 // The right string isn't longer that the left one.
10445 // Get the right string's length by subtracting the (non-negative) difference
10446 // from the left string's length.
10447 __ subl(scratch1, length_difference);
10448 __ bind(&left_shorter);
10449 // Register scratch1 now holds Min(left.length, right.length).
10450 const Register min_length = scratch1;
10452 Label compare_lengths;
10453 // If min-length is zero, go directly to comparing lengths.
10454 __ testl(min_length, min_length);
10455 __ j(zero, &compare_lengths);
10457 // Registers scratch2 and scratch3 are free.
10458 Label result_not_equal;
10461 // Check characters 0 .. min_length - 1 in a loop.
10462 // Use scratch3 as loop index, min_length as limit and scratch2
10463 // for computation.
10464 const Register index = scratch3;
10465 __ movl(index, Immediate(0)); // Index into strings.
10467 // Compare characters.
10468 // TODO(lrn): Could we load more than one character at a time?
10469 __ movb(scratch2, FieldOperand(left,
10472 SeqAsciiString::kHeaderSize));
10473 // Increment index and use -1 modifier on next load to give
10474 // the previous load extra time to complete.
10475 __ addl(index, Immediate(1));
10476 __ cmpb(scratch2, FieldOperand(right,
10479 SeqAsciiString::kHeaderSize - 1));
10480 __ j(not_equal, &result_not_equal);
10481 __ cmpl(index, min_length);
10482 __ j(not_equal, &loop);
10484 // Completed loop without finding different characters.
10485 // Compare lengths (precomputed).
10486 __ bind(&compare_lengths);
10487 __ testl(length_difference, length_difference);
10488 __ j(not_zero, &result_not_equal);
10490 // Result is EQUAL.
10491 __ Move(rax, Smi::FromInt(EQUAL));
10492 __ ret(2 * kPointerSize);
10494 Label result_greater;
10495 __ bind(&result_not_equal);
10496 // Unequal comparison of left to right, either character or length.
10497 __ j(greater, &result_greater);
10500 __ Move(rax, Smi::FromInt(LESS));
10501 __ ret(2 * kPointerSize);
10503 // Result is GREATER.
10504 __ bind(&result_greater);
10505 __ Move(rax, Smi::FromInt(GREATER));
10506 __ ret(2 * kPointerSize);
10510 void StringCompareStub::Generate(MacroAssembler* masm) {
10513 // Stack frame on entry.
10514 // rsp[0]: return address
10515 // rsp[8]: right string
10516 // rsp[16]: left string
10518 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
10519 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
10521 // Check for identity.
10524 __ j(not_equal, ¬_same);
10525 __ Move(rax, Smi::FromInt(EQUAL));
10526 __ IncrementCounter(&Counters::string_compare_native, 1);
10527 __ ret(2 * kPointerSize);
10529 __ bind(¬_same);
10531 // Check that both are sequential ASCII strings.
10532 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
10534 // Inline comparison of ascii strings.
10535 __ IncrementCounter(&Counters::string_compare_native, 1);
10536 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
10538 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
10539 // tagged as a small integer.
10541 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
10549 typedef double (*ModuloFunction)(double, double);
10550 // Define custom fmod implementation.
10551 ModuloFunction CreateModuloFunction() {
10552 size_t actual_size;
10553 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
10557 Assembler masm(buffer, static_cast<int>(actual_size));
10558 // Generated code is put into a fixed, unmovable, buffer, and not into
10559 // the V8 heap. We can't, and don't, refer to any relocatable addresses
10560 // (e.g. the JavaScript nan-object).
10562 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
10563 // returns result in xmm0.
10564 // Argument backing space is allocated on the stack above
10565 // the return address.
10567 // Compute x mod y.
10568 // Load y and x (use argument backing store as temporary storage).
10569 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
10570 __ movsd(Operand(rsp, kPointerSize), xmm0);
10571 __ fld_d(Operand(rsp, kPointerSize * 2));
10572 __ fld_d(Operand(rsp, kPointerSize));
10574 // Clear exception flags before operation.
10576 Label no_exceptions;
10579 // Clear if Illegal Operand or Zero Division exceptions are set.
10580 __ testb(rax, Immediate(5));
10581 __ j(zero, &no_exceptions);
10583 __ bind(&no_exceptions);
10586 // Compute st(0) % st(1)
10588 Label partial_remainder_loop;
10589 __ bind(&partial_remainder_loop);
10593 __ testl(rax, Immediate(0x400 /* C2 */));
10594 // If C2 is set, computation only has partial result. Loop to
10595 // continue computation.
10596 __ j(not_zero, &partial_remainder_loop);
10599 Label valid_result;
10600 Label return_result;
10601 // If Invalid Operand or Zero Division exceptions are set,
10603 __ testb(rax, Immediate(5));
10604 __ j(zero, &valid_result);
10605 __ fstp(0); // Drop result in st(0).
10606 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
10607 __ movq(rcx, kNaNValue, RelocInfo::NONE);
10608 __ movq(Operand(rsp, kPointerSize), rcx);
10609 __ movsd(xmm0, Operand(rsp, kPointerSize));
10610 __ jmp(&return_result);
10612 // If result is valid, return that.
10613 __ bind(&valid_result);
10614 __ fstp_d(Operand(rsp, kPointerSize));
10615 __ movsd(xmm0, Operand(rsp, kPointerSize));
10617 // Clean up FPU stack and exceptions and return xmm0
10618 __ bind(&return_result);
10619 __ fstp(0); // Unload y.
10621 Label clear_exceptions;
10622 __ testb(rax, Immediate(0x3f /* Any Exception*/));
10623 __ j(not_zero, &clear_exceptions);
10625 __ bind(&clear_exceptions);
10630 masm.GetCode(&desc);
10631 // Call the function from C++.
10632 return FUNCTION_CAST<ModuloFunction>(buffer);
10640 } } // namespace v8::internal