1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_X64
32 #include "x64/lithium-codegen-x64.h"
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 #include "hydrogen-osr.h"
41 // When invoking builtins, we need to record the safepoint in the middle of
42 // the invoke instruction sequence generated by the macro assembler.
43 class SafepointGenerator V8_FINAL : public CallWrapper {
45 SafepointGenerator(LCodeGen* codegen,
46 LPointerMap* pointers,
47 Safepoint::DeoptMode mode)
51 virtual ~SafepointGenerator() {}
53 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
55 virtual void AfterCall() const V8_OVERRIDE {
56 codegen_->RecordSafepoint(pointers_, deopt_mode_);
61 LPointerMap* pointers_;
62 Safepoint::DeoptMode deopt_mode_;
68 bool LCodeGen::GenerateCode() {
69 LPhase phase("Z_Code generation", chunk());
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // MANUAL indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code> code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
91 PopulateDeoptimizationData(code);
92 info()->CommitDependencies(code);
96 void LChunkBuilder::Abort(BailoutReason reason) {
97 info()->set_bailout_reason(reason);
103 void LCodeGen::MakeSureStackPagesMapped(int offset) {
104 const int kPageSize = 4 * KB;
105 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
106 __ movp(Operand(rsp, offset), rax);
112 void LCodeGen::SaveCallerDoubles() {
113 ASSERT(info()->saves_caller_doubles());
114 ASSERT(NeedsEagerFrame());
115 Comment(";;; Save clobbered callee double registers");
117 BitVector* doubles = chunk()->allocated_double_registers();
118 BitVector::Iterator save_iterator(doubles);
119 while (!save_iterator.Done()) {
120 __ movsd(MemOperand(rsp, count * kDoubleSize),
121 XMMRegister::FromAllocationIndex(save_iterator.Current()));
122 save_iterator.Advance();
128 void LCodeGen::RestoreCallerDoubles() {
129 ASSERT(info()->saves_caller_doubles());
130 ASSERT(NeedsEagerFrame());
131 Comment(";;; Restore clobbered callee double registers");
132 BitVector* doubles = chunk()->allocated_double_registers();
133 BitVector::Iterator save_iterator(doubles);
135 while (!save_iterator.Done()) {
136 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
137 MemOperand(rsp, count * kDoubleSize));
138 save_iterator.Advance();
144 bool LCodeGen::GeneratePrologue() {
145 ASSERT(is_generating());
147 if (info()->IsOptimizing()) {
148 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
151 if (strlen(FLAG_stop_at) > 0 &&
152 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
157 // Sloppy mode functions need to replace the receiver with the global proxy
158 // when called as functions (without an explicit receiver object).
159 if (info_->this_has_uses() &&
160 info_->strict_mode() == SLOPPY &&
161 !info_->is_native()) {
163 StackArgumentsAccessor args(rsp, scope()->num_parameters());
164 __ movp(rcx, args.GetReceiverOperand());
166 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
167 __ j(not_equal, &ok, Label::kNear);
169 __ movp(rcx, GlobalObjectOperand());
170 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
172 __ movp(args.GetReceiverOperand(), rcx);
178 info()->set_prologue_offset(masm_->pc_offset());
179 if (NeedsEagerFrame()) {
180 ASSERT(!frame_is_built_);
181 frame_is_built_ = true;
182 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
183 info()->AddNoFrameRange(0, masm_->pc_offset());
186 // Reserve space for the stack slots needed by the code.
187 int slots = GetStackSlotCount();
189 if (FLAG_debug_code) {
190 __ subp(rsp, Immediate(slots * kPointerSize));
192 MakeSureStackPagesMapped(slots * kPointerSize);
196 __ movq(kScratchRegister, kSlotsZapValue);
199 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
202 __ j(not_zero, &loop);
205 __ subp(rsp, Immediate(slots * kPointerSize));
207 MakeSureStackPagesMapped(slots * kPointerSize);
211 if (info()->saves_caller_doubles()) {
216 // Possibly allocate a local context.
217 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
218 if (heap_slots > 0) {
219 Comment(";;; Allocate local context");
220 // Argument to NewContext is the function, which is still in rdi.
221 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
222 FastNewContextStub stub(heap_slots);
226 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
228 RecordSafepoint(Safepoint::kNoLazyDeopt);
229 // Context is returned in rax. It replaces the context passed to us.
230 // It's saved in the stack and kept live in rsi.
232 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
234 // Copy any necessary parameters into the context.
235 int num_parameters = scope()->num_parameters();
236 for (int i = 0; i < num_parameters; i++) {
237 Variable* var = scope()->parameter(i);
238 if (var->IsContextSlot()) {
239 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
240 (num_parameters - 1 - i) * kPointerSize;
241 // Load parameter from stack.
242 __ movp(rax, Operand(rbp, parameter_offset));
243 // Store it in the context.
244 int context_offset = Context::SlotOffset(var->index());
245 __ movp(Operand(rsi, context_offset), rax);
246 // Update the write barrier. This clobbers rax and rbx.
247 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
250 Comment(";;; End allocate local context");
254 if (FLAG_trace && info()->IsOptimizing()) {
255 __ CallRuntime(Runtime::kTraceEnter, 0);
257 return !is_aborted();
261 void LCodeGen::GenerateOsrPrologue() {
262 // Generate the OSR entry prologue at the first unknown OSR value, or if there
263 // are none, at the OSR entrypoint instruction.
264 if (osr_pc_offset_ >= 0) return;
266 osr_pc_offset_ = masm()->pc_offset();
268 // Adjust the frame size, subsuming the unoptimized frame into the
270 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
272 __ subp(rsp, Immediate(slots * kPointerSize));
276 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
277 if (instr->IsCall()) {
278 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
280 if (!instr->IsLazyBailout() && !instr->IsGap()) {
281 safepoints_.BumpLastLazySafepointIndex();
286 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
287 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
288 instr->hydrogen_value()->representation().IsInteger32() &&
289 instr->result()->IsRegister()) {
290 __ AssertZeroExtended(ToRegister(instr->result()));
293 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
294 if (instr->result()->IsRegister()) {
295 Register result_reg = ToRegister(instr->result());
296 __ movsxlq(result_reg, result_reg);
298 // Sign extend the 32bit result in the stack slots.
299 ASSERT(instr->result()->IsStackSlot());
300 Operand src = ToOperand(instr->result());
301 __ movsxlq(kScratchRegister, src);
302 __ movq(src, kScratchRegister);
308 bool LCodeGen::GenerateJumpTable() {
310 if (jump_table_.length() > 0) {
311 Comment(";;; -------------------- Jump table --------------------");
313 for (int i = 0; i < jump_table_.length(); i++) {
314 __ bind(&jump_table_[i].label);
315 Address entry = jump_table_[i].address;
316 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
317 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
318 if (id == Deoptimizer::kNotDeoptimizationEntry) {
319 Comment(";;; jump table entry %d.", i);
321 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
323 if (jump_table_[i].needs_frame) {
324 ASSERT(!info()->saves_caller_doubles());
325 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
326 if (needs_frame.is_bound()) {
327 __ jmp(&needs_frame);
329 __ bind(&needs_frame);
330 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
334 // This variant of deopt can only be used with stubs. Since we don't
335 // have a function pointer to install in the stack frame that we're
336 // building, install a special marker there instead.
337 ASSERT(info()->IsStub());
338 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
340 __ movp(rsi, MemOperand(rsp, kPointerSize));
341 __ call(kScratchRegister);
344 if (info()->saves_caller_doubles()) {
345 ASSERT(info()->IsStub());
346 RestoreCallerDoubles();
348 __ call(entry, RelocInfo::RUNTIME_ENTRY);
351 return !is_aborted();
355 bool LCodeGen::GenerateDeferredCode() {
356 ASSERT(is_generating());
357 if (deferred_.length() > 0) {
358 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
359 LDeferredCode* code = deferred_[i];
362 instructions_->at(code->instruction_index())->hydrogen_value();
363 RecordAndWritePosition(
364 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
366 Comment(";;; <@%d,#%d> "
367 "-------------------- Deferred %s --------------------",
368 code->instruction_index(),
369 code->instr()->hydrogen_value()->id(),
370 code->instr()->Mnemonic());
371 __ bind(code->entry());
372 if (NeedsDeferredFrame()) {
373 Comment(";;; Build frame");
374 ASSERT(!frame_is_built_);
375 ASSERT(info()->IsStub());
376 frame_is_built_ = true;
377 // Build the frame in such a way that esi isn't trashed.
378 __ pushq(rbp); // Caller's frame pointer.
379 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
380 __ Push(Smi::FromInt(StackFrame::STUB));
381 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
382 Comment(";;; Deferred code");
385 if (NeedsDeferredFrame()) {
386 __ bind(code->done());
387 Comment(";;; Destroy frame");
388 ASSERT(frame_is_built_);
389 frame_is_built_ = false;
393 __ jmp(code->exit());
397 // Deferred code is the last part of the instruction sequence. Mark
398 // the generated code as done unless we bailed out.
399 if (!is_aborted()) status_ = DONE;
400 return !is_aborted();
404 bool LCodeGen::GenerateSafepointTable() {
406 safepoints_.Emit(masm(), GetStackSlotCount());
407 return !is_aborted();
411 Register LCodeGen::ToRegister(int index) const {
412 return Register::FromAllocationIndex(index);
416 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
417 return XMMRegister::FromAllocationIndex(index);
421 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
422 return XMMRegister::FromAllocationIndex(index);
426 Register LCodeGen::ToRegister(LOperand* op) const {
427 ASSERT(op->IsRegister());
428 return ToRegister(op->index());
432 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
433 ASSERT(op->IsDoubleRegister());
434 return ToDoubleRegister(op->index());
438 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
439 ASSERT(op->IsFloat32x4Register());
440 return ToSIMD128Register(op->index());
444 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
445 ASSERT(op->IsInt32x4Register());
446 return ToSIMD128Register(op->index());
450 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
451 ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
452 return ToSIMD128Register(op->index());
456 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
457 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
461 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
462 return op->IsConstantOperand() &&
463 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
467 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
468 return chunk_->LookupLiteralRepresentation(op).IsSmi();
472 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
473 HConstant* constant = chunk_->LookupConstant(op);
474 return constant->Integer32Value();
478 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
479 HConstant* constant = chunk_->LookupConstant(op);
480 return Smi::FromInt(constant->Integer32Value());
484 double LCodeGen::ToDouble(LConstantOperand* op) const {
485 HConstant* constant = chunk_->LookupConstant(op);
486 ASSERT(constant->HasDoubleValue());
487 return constant->DoubleValue();
491 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
492 HConstant* constant = chunk_->LookupConstant(op);
493 ASSERT(constant->HasExternalReferenceValue());
494 return constant->ExternalReferenceValue();
498 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
499 HConstant* constant = chunk_->LookupConstant(op);
500 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
501 return constant->handle(isolate());
505 static int ArgumentsOffsetWithoutFrame(int index) {
507 return -(index + 1) * kPointerSize + kPCOnStackSize;
511 Operand LCodeGen::ToOperand(LOperand* op) const {
512 // Does not handle registers. In X64 assembler, plain registers are not
513 // representable as an Operand.
514 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
515 op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
516 if (NeedsEagerFrame()) {
517 return Operand(rbp, StackSlotOffset(op->index()));
519 // Retrieve parameter without eager stack-frame relative to the
521 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
526 void LCodeGen::WriteTranslation(LEnvironment* environment,
527 Translation* translation) {
528 if (environment == NULL) return;
530 // The translation includes one command per value in the environment.
531 int translation_size = environment->translation_size();
532 // The output frame height does not include the parameters.
533 int height = translation_size - environment->parameter_count();
535 WriteTranslation(environment->outer(), translation);
536 bool has_closure_id = !info()->closure().is_null() &&
537 !info()->closure().is_identical_to(environment->closure());
538 int closure_id = has_closure_id
539 ? DefineDeoptimizationLiteral(environment->closure())
540 : Translation::kSelfLiteralId;
542 switch (environment->frame_type()) {
544 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
547 translation->BeginConstructStubFrame(closure_id, translation_size);
550 ASSERT(translation_size == 1);
552 translation->BeginGetterStubFrame(closure_id);
555 ASSERT(translation_size == 2);
557 translation->BeginSetterStubFrame(closure_id);
559 case ARGUMENTS_ADAPTOR:
560 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
563 translation->BeginCompiledStubFrame();
567 int object_index = 0;
568 int dematerialized_index = 0;
569 for (int i = 0; i < translation_size; ++i) {
570 LOperand* value = environment->values()->at(i);
571 AddToTranslation(environment,
574 environment->HasTaggedValueAt(i),
575 environment->HasUint32ValueAt(i),
577 &dematerialized_index);
582 void LCodeGen::AddToTranslation(LEnvironment* environment,
583 Translation* translation,
587 int* object_index_pointer,
588 int* dematerialized_index_pointer) {
589 if (op == LEnvironment::materialization_marker()) {
590 int object_index = (*object_index_pointer)++;
591 if (environment->ObjectIsDuplicateAt(object_index)) {
592 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
593 translation->DuplicateObject(dupe_of);
596 int object_length = environment->ObjectLengthAt(object_index);
597 if (environment->ObjectIsArgumentsAt(object_index)) {
598 translation->BeginArgumentsObject(object_length);
600 translation->BeginCapturedObject(object_length);
602 int dematerialized_index = *dematerialized_index_pointer;
603 int env_offset = environment->translation_size() + dematerialized_index;
604 *dematerialized_index_pointer += object_length;
605 for (int i = 0; i < object_length; ++i) {
606 LOperand* value = environment->values()->at(env_offset + i);
607 AddToTranslation(environment,
610 environment->HasTaggedValueAt(env_offset + i),
611 environment->HasUint32ValueAt(env_offset + i),
612 object_index_pointer,
613 dematerialized_index_pointer);
618 if (op->IsStackSlot()) {
620 translation->StoreStackSlot(op->index());
621 } else if (is_uint32) {
622 translation->StoreUint32StackSlot(op->index());
624 translation->StoreInt32StackSlot(op->index());
626 } else if (op->IsDoubleStackSlot()) {
627 translation->StoreDoubleStackSlot(op->index());
628 } else if (op->IsFloat32x4StackSlot()) {
629 translation->StoreSIMD128StackSlot(op->index(),
630 Translation::FLOAT32x4_STACK_SLOT);
631 } else if (op->IsInt32x4StackSlot()) {
632 translation->StoreSIMD128StackSlot(op->index(),
633 Translation::INT32x4_STACK_SLOT);
634 } else if (op->IsRegister()) {
635 Register reg = ToRegister(op);
637 translation->StoreRegister(reg);
638 } else if (is_uint32) {
639 translation->StoreUint32Register(reg);
641 translation->StoreInt32Register(reg);
643 } else if (op->IsDoubleRegister()) {
644 XMMRegister reg = ToDoubleRegister(op);
645 translation->StoreDoubleRegister(reg);
646 } else if (op->IsFloat32x4Register()) {
647 XMMRegister reg = ToFloat32x4Register(op);
648 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
649 } else if (op->IsInt32x4Register()) {
650 XMMRegister reg = ToInt32x4Register(op);
651 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
652 } else if (op->IsConstantOperand()) {
653 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
654 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
655 translation->StoreLiteral(src_index);
662 void LCodeGen::CallCodeGeneric(Handle<Code> code,
663 RelocInfo::Mode mode,
665 SafepointMode safepoint_mode,
667 ASSERT(instr != NULL);
669 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
671 // Signal that we don't inline smi code before these stubs in the
672 // optimizing code generator.
673 if (code->kind() == Code::BINARY_OP_IC ||
674 code->kind() == Code::COMPARE_IC) {
680 void LCodeGen::CallCode(Handle<Code> code,
681 RelocInfo::Mode mode,
682 LInstruction* instr) {
683 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
687 void LCodeGen::CallRuntime(const Runtime::Function* function,
690 SaveFPRegsMode save_doubles) {
691 ASSERT(instr != NULL);
692 ASSERT(instr->HasPointerMap());
694 __ CallRuntime(function, num_arguments, save_doubles);
696 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
700 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
701 if (context->IsRegister()) {
702 if (!ToRegister(context).is(rsi)) {
703 __ movp(rsi, ToRegister(context));
705 } else if (context->IsStackSlot()) {
706 __ movp(rsi, ToOperand(context));
707 } else if (context->IsConstantOperand()) {
708 HConstant* constant =
709 chunk_->LookupConstant(LConstantOperand::cast(context));
710 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
718 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
722 LoadContextFromDeferred(context);
724 __ CallRuntimeSaveDoubles(id);
725 RecordSafepointWithRegisters(
726 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
730 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
731 Safepoint::DeoptMode mode) {
732 if (!environment->HasBeenRegistered()) {
733 // Physical stack frame layout:
734 // -x ............. -4 0 ..................................... y
735 // [incoming arguments] [spill slots] [pushed outgoing arguments]
737 // Layout of the environment:
738 // 0 ..................................................... size-1
739 // [parameters] [locals] [expression stack including arguments]
741 // Layout of the translation:
742 // 0 ........................................................ size - 1 + 4
743 // [expression stack including arguments] [locals] [4 words] [parameters]
744 // |>------------ translation_size ------------<|
747 int jsframe_count = 0;
748 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
750 if (e->frame_type() == JS_FUNCTION) {
754 Translation translation(&translations_, frame_count, jsframe_count, zone());
755 WriteTranslation(environment, &translation);
756 int deoptimization_index = deoptimizations_.length();
757 int pc_offset = masm()->pc_offset();
758 environment->Register(deoptimization_index,
760 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
761 deoptimizations_.Add(environment, environment->zone());
766 void LCodeGen::DeoptimizeIf(Condition cc,
767 LEnvironment* environment,
768 Deoptimizer::BailoutType bailout_type) {
769 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
770 ASSERT(environment->HasBeenRegistered());
771 int id = environment->deoptimization_index();
772 ASSERT(info()->IsOptimizing() || info()->IsStub());
774 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
776 Abort(kBailoutWasNotPrepared);
780 if (DeoptEveryNTimes()) {
781 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
785 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
786 __ movl(rax, count_operand);
787 __ subl(rax, Immediate(1));
788 __ j(not_zero, &no_deopt, Label::kNear);
789 if (FLAG_trap_on_deopt) __ int3();
790 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
791 __ movl(count_operand, rax);
794 ASSERT(frame_is_built_);
795 __ call(entry, RelocInfo::RUNTIME_ENTRY);
797 __ movl(count_operand, rax);
802 if (info()->ShouldTrapOnDeopt()) {
804 if (cc != no_condition) {
805 __ j(NegateCondition(cc), &done, Label::kNear);
811 ASSERT(info()->IsStub() || frame_is_built_);
812 // Go through jump table if we need to handle condition, build frame, or
813 // restore caller doubles.
814 if (cc == no_condition && frame_is_built_ &&
815 !info()->saves_caller_doubles()) {
816 __ call(entry, RelocInfo::RUNTIME_ENTRY);
818 // We often have several deopts to the same entry, reuse the last
819 // jump entry if this is the case.
820 if (jump_table_.is_empty() ||
821 jump_table_.last().address != entry ||
822 jump_table_.last().needs_frame != !frame_is_built_ ||
823 jump_table_.last().bailout_type != bailout_type) {
824 Deoptimizer::JumpTableEntry table_entry(entry,
827 jump_table_.Add(table_entry, zone());
829 if (cc == no_condition) {
830 __ jmp(&jump_table_.last().label);
832 __ j(cc, &jump_table_.last().label);
838 void LCodeGen::DeoptimizeIf(Condition cc,
839 LEnvironment* environment) {
840 Deoptimizer::BailoutType bailout_type = info()->IsStub()
842 : Deoptimizer::EAGER;
843 DeoptimizeIf(cc, environment, bailout_type);
847 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
848 int length = deoptimizations_.length();
849 if (length == 0) return;
850 Handle<DeoptimizationInputData> data =
851 factory()->NewDeoptimizationInputData(length, TENURED);
853 Handle<ByteArray> translations =
854 translations_.CreateByteArray(isolate()->factory());
855 data->SetTranslationByteArray(*translations);
856 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
857 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
858 if (info_->IsOptimizing()) {
859 // Reference to shared function info does not change between phases.
860 AllowDeferredHandleDereference allow_handle_dereference;
861 data->SetSharedFunctionInfo(*info_->shared_info());
863 data->SetSharedFunctionInfo(Smi::FromInt(0));
866 Handle<FixedArray> literals =
867 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
868 { AllowDeferredHandleDereference copy_handles;
869 for (int i = 0; i < deoptimization_literals_.length(); i++) {
870 literals->set(i, *deoptimization_literals_[i]);
872 data->SetLiteralArray(*literals);
875 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
876 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
878 // Populate the deoptimization entries.
879 for (int i = 0; i < length; i++) {
880 LEnvironment* env = deoptimizations_[i];
881 data->SetAstId(i, env->ast_id());
882 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
883 data->SetArgumentsStackHeight(i,
884 Smi::FromInt(env->arguments_stack_height()));
885 data->SetPc(i, Smi::FromInt(env->pc_offset()));
887 code->set_deoptimization_data(*data);
891 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
892 int result = deoptimization_literals_.length();
893 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
894 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
896 deoptimization_literals_.Add(literal, zone());
901 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
902 ASSERT(deoptimization_literals_.length() == 0);
904 const ZoneList<Handle<JSFunction> >* inlined_closures =
905 chunk()->inlined_closures();
907 for (int i = 0, length = inlined_closures->length();
910 DefineDeoptimizationLiteral(inlined_closures->at(i));
913 inlined_function_count_ = deoptimization_literals_.length();
917 void LCodeGen::RecordSafepointWithLazyDeopt(
918 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
919 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
920 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
922 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
923 RecordSafepointWithRegisters(
924 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
929 void LCodeGen::RecordSafepoint(
930 LPointerMap* pointers,
931 Safepoint::Kind kind,
933 Safepoint::DeoptMode deopt_mode) {
934 ASSERT(kind == expected_safepoint_kind_);
936 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
938 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
939 kind, arguments, deopt_mode);
940 for (int i = 0; i < operands->length(); i++) {
941 LOperand* pointer = operands->at(i);
942 if (pointer->IsStackSlot()) {
943 safepoint.DefinePointerSlot(pointer->index(), zone());
944 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
945 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
951 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
952 Safepoint::DeoptMode deopt_mode) {
953 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
957 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
958 LPointerMap empty_pointers(zone());
959 RecordSafepoint(&empty_pointers, deopt_mode);
963 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
965 Safepoint::DeoptMode deopt_mode) {
966 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
970 void LCodeGen::RecordAndWritePosition(int position) {
971 if (position == RelocInfo::kNoPosition) return;
972 masm()->positions_recorder()->RecordPosition(position);
973 masm()->positions_recorder()->WriteRecordedPositions();
977 static const char* LabelType(LLabel* label) {
978 if (label->is_loop_header()) return " (loop header)";
979 if (label->is_osr_entry()) return " (OSR entry)";
984 void LCodeGen::DoLabel(LLabel* label) {
985 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
986 current_instruction_,
987 label->hydrogen_value()->id(),
990 __ bind(label->label());
991 current_block_ = label->block_id();
996 void LCodeGen::DoParallelMove(LParallelMove* move) {
997 resolver_.Resolve(move);
1001 void LCodeGen::DoGap(LGap* gap) {
1002 for (int i = LGap::FIRST_INNER_POSITION;
1003 i <= LGap::LAST_INNER_POSITION;
1005 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1006 LParallelMove* move = gap->GetParallelMove(inner_pos);
1007 if (move != NULL) DoParallelMove(move);
1012 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1017 void LCodeGen::DoParameter(LParameter* instr) {
1022 void LCodeGen::DoCallStub(LCallStub* instr) {
1023 ASSERT(ToRegister(instr->context()).is(rsi));
1024 ASSERT(ToRegister(instr->result()).is(rax));
1025 switch (instr->hydrogen()->major_key()) {
1026 case CodeStub::RegExpExec: {
1027 RegExpExecStub stub;
1028 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1031 case CodeStub::SubString: {
1033 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1036 case CodeStub::StringCompare: {
1037 StringCompareStub stub;
1038 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1047 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1048 GenerateOsrPrologue();
1052 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1053 Register dividend = ToRegister(instr->dividend());
1054 int32_t divisor = instr->divisor();
1055 ASSERT(dividend.is(ToRegister(instr->result())));
1057 // Theoretically, a variation of the branch-free code for integer division by
1058 // a power of 2 (calculating the remainder via an additional multiplication
1059 // (which gets simplified to an 'and') and subtraction) should be faster, and
1060 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1061 // indicate that positive dividends are heavily favored, so the branching
1062 // version performs better.
1063 HMod* hmod = instr->hydrogen();
1064 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1065 Label dividend_is_not_negative, done;
1066 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1067 __ testl(dividend, dividend);
1068 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1069 // Note that this is correct even for kMinInt operands.
1071 __ andl(dividend, Immediate(mask));
1073 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1074 DeoptimizeIf(zero, instr->environment());
1076 __ jmp(&done, Label::kNear);
1079 __ bind(÷nd_is_not_negative);
1080 __ andl(dividend, Immediate(mask));
1085 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1086 Register dividend = ToRegister(instr->dividend());
1087 int32_t divisor = instr->divisor();
1088 ASSERT(ToRegister(instr->result()).is(rax));
1091 DeoptimizeIf(no_condition, instr->environment());
1095 __ TruncatingDiv(dividend, Abs(divisor));
1096 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1097 __ movl(rax, dividend);
1100 // Check for negative zero.
1101 HMod* hmod = instr->hydrogen();
1102 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1103 Label remainder_not_zero;
1104 __ j(not_zero, &remainder_not_zero, Label::kNear);
1105 __ cmpl(dividend, Immediate(0));
1106 DeoptimizeIf(less, instr->environment());
1107 __ bind(&remainder_not_zero);
1112 void LCodeGen::DoModI(LModI* instr) {
1113 HMod* hmod = instr->hydrogen();
1115 Register left_reg = ToRegister(instr->left());
1116 ASSERT(left_reg.is(rax));
1117 Register right_reg = ToRegister(instr->right());
1118 ASSERT(!right_reg.is(rax));
1119 ASSERT(!right_reg.is(rdx));
1120 Register result_reg = ToRegister(instr->result());
1121 ASSERT(result_reg.is(rdx));
1124 // Check for x % 0, idiv would signal a divide error. We have to
1125 // deopt in this case because we can't return a NaN.
1126 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1127 __ testl(right_reg, right_reg);
1128 DeoptimizeIf(zero, instr->environment());
1131 // Check for kMinInt % -1, idiv would signal a divide error. We
1132 // have to deopt if we care about -0, because we can't return that.
1133 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1134 Label no_overflow_possible;
1135 __ cmpl(left_reg, Immediate(kMinInt));
1136 __ j(not_zero, &no_overflow_possible, Label::kNear);
1137 __ cmpl(right_reg, Immediate(-1));
1138 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139 DeoptimizeIf(equal, instr->environment());
1141 __ j(not_equal, &no_overflow_possible, Label::kNear);
1142 __ Set(result_reg, 0);
1143 __ jmp(&done, Label::kNear);
1145 __ bind(&no_overflow_possible);
1148 // Sign extend dividend in eax into edx:eax, since we are using only the low
1149 // 32 bits of the values.
1152 // If we care about -0, test if the dividend is <0 and the result is 0.
1153 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1154 Label positive_left;
1155 __ testl(left_reg, left_reg);
1156 __ j(not_sign, &positive_left, Label::kNear);
1157 __ idivl(right_reg);
1158 __ testl(result_reg, result_reg);
1159 DeoptimizeIf(zero, instr->environment());
1160 __ jmp(&done, Label::kNear);
1161 __ bind(&positive_left);
1163 __ idivl(right_reg);
1168 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1169 Register dividend = ToRegister(instr->dividend());
1170 int32_t divisor = instr->divisor();
1171 ASSERT(dividend.is(ToRegister(instr->result())));
1173 // If the divisor is positive, things are easy: There can be no deopts and we
1174 // can simply do an arithmetic right shift.
1175 if (divisor == 1) return;
1176 int32_t shift = WhichPowerOf2Abs(divisor);
1178 __ sarl(dividend, Immediate(shift));
1182 // If the divisor is negative, we have to negate and handle edge cases.
1184 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1185 DeoptimizeIf(zero, instr->environment());
1188 // If the negation could not overflow, simply shifting is OK.
1189 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1190 __ sarl(dividend, Immediate(shift));
1194 // Note that we could emit branch-free code, but that would need one more
1196 if (divisor == -1) {
1197 DeoptimizeIf(overflow, instr->environment());
1201 Label not_kmin_int, done;
1202 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1203 __ movl(dividend, Immediate(kMinInt / divisor));
1204 __ jmp(&done, Label::kNear);
1205 __ bind(¬_kmin_int);
1206 __ sarl(dividend, Immediate(shift));
1211 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1212 Register dividend = ToRegister(instr->dividend());
1213 int32_t divisor = instr->divisor();
1214 ASSERT(ToRegister(instr->result()).is(rdx));
1217 DeoptimizeIf(no_condition, instr->environment());
1221 // Check for (0 / -x) that will produce negative zero.
1222 HMathFloorOfDiv* hdiv = instr->hydrogen();
1223 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1224 __ testl(dividend, dividend);
1225 DeoptimizeIf(zero, instr->environment());
1228 // Easy case: We need no dynamic check for the dividend and the flooring
1229 // division is the same as the truncating division.
1230 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1231 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1232 __ TruncatingDiv(dividend, Abs(divisor));
1233 if (divisor < 0) __ negl(rdx);
1237 // In the general case we may need to adjust before and after the truncating
1238 // division to get a flooring division.
1239 Register temp = ToRegister(instr->temp3());
1240 ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1241 Label needs_adjustment, done;
1242 __ cmpl(dividend, Immediate(0));
1243 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1244 __ TruncatingDiv(dividend, Abs(divisor));
1245 if (divisor < 0) __ negl(rdx);
1246 __ jmp(&done, Label::kNear);
1247 __ bind(&needs_adjustment);
1248 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1249 __ TruncatingDiv(temp, Abs(divisor));
1250 if (divisor < 0) __ negl(rdx);
1256 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1257 Register dividend = ToRegister(instr->dividend());
1258 int32_t divisor = instr->divisor();
1259 Register result = ToRegister(instr->result());
1260 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1261 ASSERT(!result.is(dividend));
1263 // Check for (0 / -x) that will produce negative zero.
1264 HDiv* hdiv = instr->hydrogen();
1265 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1266 __ testl(dividend, dividend);
1267 DeoptimizeIf(zero, instr->environment());
1269 // Check for (kMinInt / -1).
1270 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1271 __ cmpl(dividend, Immediate(kMinInt));
1272 DeoptimizeIf(zero, instr->environment());
1274 // Deoptimize if remainder will not be 0.
1275 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1276 divisor != 1 && divisor != -1) {
1277 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1278 __ testl(dividend, Immediate(mask));
1279 DeoptimizeIf(not_zero, instr->environment());
1281 __ Move(result, dividend);
1282 int32_t shift = WhichPowerOf2Abs(divisor);
1284 // The arithmetic shift is always OK, the 'if' is an optimization only.
1285 if (shift > 1) __ sarl(result, Immediate(31));
1286 __ shrl(result, Immediate(32 - shift));
1287 __ addl(result, dividend);
1288 __ sarl(result, Immediate(shift));
1290 if (divisor < 0) __ negl(result);
1294 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1295 Register dividend = ToRegister(instr->dividend());
1296 int32_t divisor = instr->divisor();
1297 ASSERT(ToRegister(instr->result()).is(rdx));
1300 DeoptimizeIf(no_condition, instr->environment());
1304 // Check for (0 / -x) that will produce negative zero.
1305 HDiv* hdiv = instr->hydrogen();
1306 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1307 __ testl(dividend, dividend);
1308 DeoptimizeIf(zero, instr->environment());
1311 __ TruncatingDiv(dividend, Abs(divisor));
1312 if (divisor < 0) __ negl(rdx);
1314 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1316 __ imull(rax, rax, Immediate(divisor));
1317 __ subl(rax, dividend);
1318 DeoptimizeIf(not_equal, instr->environment());
1323 void LCodeGen::DoDivI(LDivI* instr) {
1324 HBinaryOperation* hdiv = instr->hydrogen();
1325 Register dividend = ToRegister(instr->left());
1326 Register divisor = ToRegister(instr->right());
1327 Register remainder = ToRegister(instr->temp());
1328 Register result = ToRegister(instr->result());
1329 ASSERT(dividend.is(rax));
1330 ASSERT(remainder.is(rdx));
1331 ASSERT(result.is(rax));
1332 ASSERT(!divisor.is(rax));
1333 ASSERT(!divisor.is(rdx));
1336 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1337 __ testl(divisor, divisor);
1338 DeoptimizeIf(zero, instr->environment());
1341 // Check for (0 / -x) that will produce negative zero.
1342 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1343 Label dividend_not_zero;
1344 __ testl(dividend, dividend);
1345 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1346 __ testl(divisor, divisor);
1347 DeoptimizeIf(sign, instr->environment());
1348 __ bind(÷nd_not_zero);
1351 // Check for (kMinInt / -1).
1352 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1353 Label dividend_not_min_int;
1354 __ cmpl(dividend, Immediate(kMinInt));
1355 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1356 __ cmpl(divisor, Immediate(-1));
1357 DeoptimizeIf(zero, instr->environment());
1358 __ bind(÷nd_not_min_int);
1361 // Sign extend to rdx (= remainder).
1365 if (hdiv->IsMathFloorOfDiv()) {
1367 __ testl(remainder, remainder);
1368 __ j(zero, &done, Label::kNear);
1369 __ xorl(remainder, divisor);
1370 __ sarl(remainder, Immediate(31));
1371 __ addl(result, remainder);
1373 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1374 // Deoptimize if remainder is not 0.
1375 __ testl(remainder, remainder);
1376 DeoptimizeIf(not_zero, instr->environment());
1381 void LCodeGen::DoMulI(LMulI* instr) {
1382 Register left = ToRegister(instr->left());
1383 LOperand* right = instr->right();
1385 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1386 if (instr->hydrogen_value()->representation().IsSmi()) {
1387 __ movp(kScratchRegister, left);
1389 __ movl(kScratchRegister, left);
1394 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1395 if (right->IsConstantOperand()) {
1396 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1397 if (right_value == -1) {
1399 } else if (right_value == 0) {
1400 __ xorl(left, left);
1401 } else if (right_value == 2) {
1402 __ addl(left, left);
1403 } else if (!can_overflow) {
1404 // If the multiplication is known to not overflow, we
1405 // can use operations that don't set the overflow flag
1407 switch (right_value) {
1412 __ leal(left, Operand(left, left, times_2, 0));
1415 __ shll(left, Immediate(2));
1418 __ leal(left, Operand(left, left, times_4, 0));
1421 __ shll(left, Immediate(3));
1424 __ leal(left, Operand(left, left, times_8, 0));
1427 __ shll(left, Immediate(4));
1430 __ imull(left, left, Immediate(right_value));
1434 __ imull(left, left, Immediate(right_value));
1436 } else if (right->IsStackSlot()) {
1437 if (instr->hydrogen_value()->representation().IsSmi()) {
1438 __ SmiToInteger64(left, left);
1439 __ imulp(left, ToOperand(right));
1441 __ imull(left, ToOperand(right));
1444 if (instr->hydrogen_value()->representation().IsSmi()) {
1445 __ SmiToInteger64(left, left);
1446 __ imulp(left, ToRegister(right));
1448 __ imull(left, ToRegister(right));
1453 DeoptimizeIf(overflow, instr->environment());
1456 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1457 // Bail out if the result is supposed to be negative zero.
1459 if (instr->hydrogen_value()->representation().IsSmi()) {
1460 __ testp(left, left);
1462 __ testl(left, left);
1464 __ j(not_zero, &done, Label::kNear);
1465 if (right->IsConstantOperand()) {
1466 // Constant can't be represented as Smi due to immediate size limit.
1467 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1468 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1469 DeoptimizeIf(no_condition, instr->environment());
1470 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1471 __ cmpl(kScratchRegister, Immediate(0));
1472 DeoptimizeIf(less, instr->environment());
1474 } else if (right->IsStackSlot()) {
1475 if (instr->hydrogen_value()->representation().IsSmi()) {
1476 __ orp(kScratchRegister, ToOperand(right));
1478 __ orl(kScratchRegister, ToOperand(right));
1480 DeoptimizeIf(sign, instr->environment());
1482 // Test the non-zero operand for negative sign.
1483 if (instr->hydrogen_value()->representation().IsSmi()) {
1484 __ orp(kScratchRegister, ToRegister(right));
1486 __ orl(kScratchRegister, ToRegister(right));
1488 DeoptimizeIf(sign, instr->environment());
1495 void LCodeGen::DoBitI(LBitI* instr) {
1496 LOperand* left = instr->left();
1497 LOperand* right = instr->right();
1498 ASSERT(left->Equals(instr->result()));
1499 ASSERT(left->IsRegister());
1501 if (right->IsConstantOperand()) {
1502 int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
1503 switch (instr->op()) {
1504 case Token::BIT_AND:
1505 __ andl(ToRegister(left), Immediate(right_operand));
1508 __ orl(ToRegister(left), Immediate(right_operand));
1510 case Token::BIT_XOR:
1511 if (right_operand == int32_t(~0)) {
1512 __ notl(ToRegister(left));
1514 __ xorl(ToRegister(left), Immediate(right_operand));
1521 } else if (right->IsStackSlot()) {
1522 switch (instr->op()) {
1523 case Token::BIT_AND:
1524 if (instr->IsInteger32()) {
1525 __ andl(ToRegister(left), ToOperand(right));
1527 __ andp(ToRegister(left), ToOperand(right));
1531 if (instr->IsInteger32()) {
1532 __ orl(ToRegister(left), ToOperand(right));
1534 __ orp(ToRegister(left), ToOperand(right));
1537 case Token::BIT_XOR:
1538 if (instr->IsInteger32()) {
1539 __ xorl(ToRegister(left), ToOperand(right));
1541 __ xorp(ToRegister(left), ToOperand(right));
1549 ASSERT(right->IsRegister());
1550 switch (instr->op()) {
1551 case Token::BIT_AND:
1552 if (instr->IsInteger32()) {
1553 __ andl(ToRegister(left), ToRegister(right));
1555 __ andp(ToRegister(left), ToRegister(right));
1559 if (instr->IsInteger32()) {
1560 __ orl(ToRegister(left), ToRegister(right));
1562 __ orp(ToRegister(left), ToRegister(right));
1565 case Token::BIT_XOR:
1566 if (instr->IsInteger32()) {
1567 __ xorl(ToRegister(left), ToRegister(right));
1569 __ xorp(ToRegister(left), ToRegister(right));
1580 void LCodeGen::DoShiftI(LShiftI* instr) {
1581 LOperand* left = instr->left();
1582 LOperand* right = instr->right();
1583 ASSERT(left->Equals(instr->result()));
1584 ASSERT(left->IsRegister());
1585 if (right->IsRegister()) {
1586 ASSERT(ToRegister(right).is(rcx));
1588 switch (instr->op()) {
1590 __ rorl_cl(ToRegister(left));
1593 __ sarl_cl(ToRegister(left));
1596 __ shrl_cl(ToRegister(left));
1597 if (instr->can_deopt()) {
1598 __ testl(ToRegister(left), ToRegister(left));
1599 DeoptimizeIf(negative, instr->environment());
1603 __ shll_cl(ToRegister(left));
1610 int32_t value = ToInteger32(LConstantOperand::cast(right));
1611 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1612 switch (instr->op()) {
1614 if (shift_count != 0) {
1615 __ rorl(ToRegister(left), Immediate(shift_count));
1619 if (shift_count != 0) {
1620 __ sarl(ToRegister(left), Immediate(shift_count));
1624 if (shift_count == 0 && instr->can_deopt()) {
1625 __ testl(ToRegister(left), ToRegister(left));
1626 DeoptimizeIf(negative, instr->environment());
1628 __ shrl(ToRegister(left), Immediate(shift_count));
1632 if (shift_count != 0) {
1633 if (instr->hydrogen_value()->representation().IsSmi()) {
1634 __ shl(ToRegister(left), Immediate(shift_count));
1636 __ shll(ToRegister(left), Immediate(shift_count));
1648 void LCodeGen::DoSubI(LSubI* instr) {
1649 LOperand* left = instr->left();
1650 LOperand* right = instr->right();
1651 ASSERT(left->Equals(instr->result()));
1653 if (right->IsConstantOperand()) {
1654 __ subl(ToRegister(left),
1655 Immediate(ToInteger32(LConstantOperand::cast(right))));
1656 } else if (right->IsRegister()) {
1657 if (instr->hydrogen_value()->representation().IsSmi()) {
1658 __ subp(ToRegister(left), ToRegister(right));
1660 __ subl(ToRegister(left), ToRegister(right));
1663 if (instr->hydrogen_value()->representation().IsSmi()) {
1664 __ subp(ToRegister(left), ToOperand(right));
1666 __ subl(ToRegister(left), ToOperand(right));
1670 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1671 DeoptimizeIf(overflow, instr->environment());
1676 void LCodeGen::DoConstantI(LConstantI* instr) {
1677 Register dst = ToRegister(instr->result());
1678 if (instr->value() == 0) {
1681 __ movl(dst, Immediate(instr->value()));
1686 void LCodeGen::DoConstantS(LConstantS* instr) {
1687 __ Move(ToRegister(instr->result()), instr->value());
1691 void LCodeGen::DoConstantD(LConstantD* instr) {
1692 ASSERT(instr->result()->IsDoubleRegister());
1693 XMMRegister res = ToDoubleRegister(instr->result());
1694 double v = instr->value();
1695 uint64_t int_val = BitCast<uint64_t, double>(v);
1696 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1697 // do so if the constant is -0.0.
1701 Register tmp = ToRegister(instr->temp());
1702 __ Set(tmp, int_val);
1708 void LCodeGen::DoConstantE(LConstantE* instr) {
1709 __ LoadAddress(ToRegister(instr->result()), instr->value());
1713 void LCodeGen::DoConstantT(LConstantT* instr) {
1714 Handle<Object> value = instr->value(isolate());
1715 __ Move(ToRegister(instr->result()), value);
1719 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1720 Register result = ToRegister(instr->result());
1721 Register map = ToRegister(instr->value());
1722 __ EnumLength(result, map);
1726 void LCodeGen::DoDateField(LDateField* instr) {
1727 Register object = ToRegister(instr->date());
1728 Register result = ToRegister(instr->result());
1729 Smi* index = instr->index();
1730 Label runtime, done, not_date_object;
1731 ASSERT(object.is(result));
1732 ASSERT(object.is(rax));
1734 Condition cc = masm()->CheckSmi(object);
1735 DeoptimizeIf(cc, instr->environment());
1736 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1737 DeoptimizeIf(not_equal, instr->environment());
1739 if (index->value() == 0) {
1740 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1742 if (index->value() < JSDate::kFirstUncachedField) {
1743 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1744 Operand stamp_operand = __ ExternalOperand(stamp);
1745 __ movp(kScratchRegister, stamp_operand);
1746 __ cmpp(kScratchRegister, FieldOperand(object,
1747 JSDate::kCacheStampOffset));
1748 __ j(not_equal, &runtime, Label::kNear);
1749 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1750 kPointerSize * index->value()));
1751 __ jmp(&done, Label::kNear);
1754 __ PrepareCallCFunction(2);
1755 __ movp(arg_reg_1, object);
1756 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1757 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1763 Operand LCodeGen::BuildSeqStringOperand(Register string,
1765 String::Encoding encoding) {
1766 if (index->IsConstantOperand()) {
1767 int offset = ToInteger32(LConstantOperand::cast(index));
1768 if (encoding == String::TWO_BYTE_ENCODING) {
1769 offset *= kUC16Size;
1771 STATIC_ASSERT(kCharSize == 1);
1772 return FieldOperand(string, SeqString::kHeaderSize + offset);
1774 return FieldOperand(
1775 string, ToRegister(index),
1776 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1777 SeqString::kHeaderSize);
1781 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1782 String::Encoding encoding = instr->hydrogen()->encoding();
1783 Register result = ToRegister(instr->result());
1784 Register string = ToRegister(instr->string());
1786 if (FLAG_debug_code) {
1788 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1789 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1791 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1792 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1793 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1794 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1795 ? one_byte_seq_type : two_byte_seq_type));
1796 __ Check(equal, kUnexpectedStringType);
1800 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1801 if (encoding == String::ONE_BYTE_ENCODING) {
1802 __ movzxbl(result, operand);
1804 __ movzxwl(result, operand);
1809 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1810 String::Encoding encoding = instr->hydrogen()->encoding();
1811 Register string = ToRegister(instr->string());
1813 if (FLAG_debug_code) {
1814 Register value = ToRegister(instr->value());
1815 Register index = ToRegister(instr->index());
1816 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1817 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1819 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1820 ? one_byte_seq_type : two_byte_seq_type;
1821 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1824 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1825 if (instr->value()->IsConstantOperand()) {
1826 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1827 ASSERT_LE(0, value);
1828 if (encoding == String::ONE_BYTE_ENCODING) {
1829 ASSERT_LE(value, String::kMaxOneByteCharCode);
1830 __ movb(operand, Immediate(value));
1832 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
1833 __ movw(operand, Immediate(value));
1836 Register value = ToRegister(instr->value());
1837 if (encoding == String::ONE_BYTE_ENCODING) {
1838 __ movb(operand, value);
1840 __ movw(operand, value);
1846 void LCodeGen::DoAddI(LAddI* instr) {
1847 LOperand* left = instr->left();
1848 LOperand* right = instr->right();
1850 Representation target_rep = instr->hydrogen()->representation();
1851 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1853 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1854 if (right->IsConstantOperand()) {
1855 int32_t offset = ToInteger32(LConstantOperand::cast(right));
1857 __ leap(ToRegister(instr->result()),
1858 MemOperand(ToRegister(left), offset));
1860 __ leal(ToRegister(instr->result()),
1861 MemOperand(ToRegister(left), offset));
1864 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1866 __ leap(ToRegister(instr->result()), address);
1868 __ leal(ToRegister(instr->result()), address);
1872 if (right->IsConstantOperand()) {
1874 __ addp(ToRegister(left),
1875 Immediate(ToInteger32(LConstantOperand::cast(right))));
1877 __ addl(ToRegister(left),
1878 Immediate(ToInteger32(LConstantOperand::cast(right))));
1880 } else if (right->IsRegister()) {
1882 __ addp(ToRegister(left), ToRegister(right));
1884 __ addl(ToRegister(left), ToRegister(right));
1888 __ addp(ToRegister(left), ToOperand(right));
1890 __ addl(ToRegister(left), ToOperand(right));
1893 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1894 DeoptimizeIf(overflow, instr->environment());
1900 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1901 LOperand* left = instr->left();
1902 LOperand* right = instr->right();
1903 ASSERT(left->Equals(instr->result()));
1904 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1905 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1907 Condition condition = (operation == HMathMinMax::kMathMin)
1910 Register left_reg = ToRegister(left);
1911 if (right->IsConstantOperand()) {
1912 Immediate right_imm =
1913 Immediate(ToInteger32(LConstantOperand::cast(right)));
1914 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1915 __ cmpl(left_reg, right_imm);
1916 __ j(condition, &return_left, Label::kNear);
1917 __ movp(left_reg, right_imm);
1918 } else if (right->IsRegister()) {
1919 Register right_reg = ToRegister(right);
1920 if (instr->hydrogen_value()->representation().IsSmi()) {
1921 __ cmpp(left_reg, right_reg);
1923 __ cmpl(left_reg, right_reg);
1925 __ j(condition, &return_left, Label::kNear);
1926 __ movp(left_reg, right_reg);
1928 Operand right_op = ToOperand(right);
1929 if (instr->hydrogen_value()->representation().IsSmi()) {
1930 __ cmpp(left_reg, right_op);
1932 __ cmpl(left_reg, right_op);
1934 __ j(condition, &return_left, Label::kNear);
1935 __ movp(left_reg, right_op);
1937 __ bind(&return_left);
1939 ASSERT(instr->hydrogen()->representation().IsDouble());
1940 Label check_nan_left, check_zero, return_left, return_right;
1941 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1942 XMMRegister left_reg = ToDoubleRegister(left);
1943 XMMRegister right_reg = ToDoubleRegister(right);
1944 __ ucomisd(left_reg, right_reg);
1945 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1946 __ j(equal, &check_zero, Label::kNear); // left == right.
1947 __ j(condition, &return_left, Label::kNear);
1948 __ jmp(&return_right, Label::kNear);
1950 __ bind(&check_zero);
1951 XMMRegister xmm_scratch = double_scratch0();
1952 __ xorps(xmm_scratch, xmm_scratch);
1953 __ ucomisd(left_reg, xmm_scratch);
1954 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1955 // At this point, both left and right are either 0 or -0.
1956 if (operation == HMathMinMax::kMathMin) {
1957 __ orps(left_reg, right_reg);
1959 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1960 __ addsd(left_reg, right_reg);
1962 __ jmp(&return_left, Label::kNear);
1964 __ bind(&check_nan_left);
1965 __ ucomisd(left_reg, left_reg); // NaN check.
1966 __ j(parity_even, &return_left, Label::kNear);
1967 __ bind(&return_right);
1968 __ movaps(left_reg, right_reg);
1970 __ bind(&return_left);
1975 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1976 XMMRegister left = ToDoubleRegister(instr->left());
1977 XMMRegister right = ToDoubleRegister(instr->right());
1978 XMMRegister result = ToDoubleRegister(instr->result());
1979 // All operations except MOD are computed in-place.
1980 ASSERT(instr->op() == Token::MOD || left.is(result));
1981 switch (instr->op()) {
1983 __ addsd(left, right);
1986 __ subsd(left, right);
1989 __ mulsd(left, right);
1992 __ divsd(left, right);
1993 // Don't delete this mov. It may improve performance on some CPUs,
1994 // when there is a mulsd depending on the result
1995 __ movaps(left, left);
1998 XMMRegister xmm_scratch = double_scratch0();
1999 __ PrepareCallCFunction(2);
2000 __ movaps(xmm_scratch, left);
2001 ASSERT(right.is(xmm1));
2003 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2004 __ movaps(result, xmm_scratch);
2014 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2015 ASSERT(ToRegister(instr->context()).is(rsi));
2016 ASSERT(ToRegister(instr->left()).is(rdx));
2017 ASSERT(ToRegister(instr->right()).is(rax));
2018 ASSERT(ToRegister(instr->result()).is(rax));
2020 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2021 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2025 template<class InstrType>
2026 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2027 int left_block = instr->TrueDestination(chunk_);
2028 int right_block = instr->FalseDestination(chunk_);
2030 int next_block = GetNextEmittedBlock();
2032 if (right_block == left_block || cc == no_condition) {
2033 EmitGoto(left_block);
2034 } else if (left_block == next_block) {
2035 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2036 } else if (right_block == next_block) {
2037 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2039 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2041 __ jmp(chunk_->GetAssemblyLabel(right_block));
2047 template<class InstrType>
2048 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2049 int false_block = instr->FalseDestination(chunk_);
2050 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2054 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2059 void LCodeGen::DoBranch(LBranch* instr) {
2060 Representation r = instr->hydrogen()->value()->representation();
2061 if (r.IsInteger32()) {
2062 ASSERT(!info()->IsStub());
2063 Register reg = ToRegister(instr->value());
2065 EmitBranch(instr, not_zero);
2066 } else if (r.IsSmi()) {
2067 ASSERT(!info()->IsStub());
2068 Register reg = ToRegister(instr->value());
2070 EmitBranch(instr, not_zero);
2071 } else if (r.IsDouble()) {
2072 ASSERT(!info()->IsStub());
2073 XMMRegister reg = ToDoubleRegister(instr->value());
2074 XMMRegister xmm_scratch = double_scratch0();
2075 __ xorps(xmm_scratch, xmm_scratch);
2076 __ ucomisd(reg, xmm_scratch);
2077 EmitBranch(instr, not_equal);
2078 } else if (r.IsSIMD128()) {
2079 ASSERT(!info()->IsStub());
2080 EmitBranch(instr, no_condition);
2082 ASSERT(r.IsTagged());
2083 Register reg = ToRegister(instr->value());
2084 HType type = instr->hydrogen()->value()->type();
2085 if (type.IsBoolean()) {
2086 ASSERT(!info()->IsStub());
2087 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2088 EmitBranch(instr, equal);
2089 } else if (type.IsSmi()) {
2090 ASSERT(!info()->IsStub());
2091 __ SmiCompare(reg, Smi::FromInt(0));
2092 EmitBranch(instr, not_equal);
2093 } else if (type.IsJSArray()) {
2094 ASSERT(!info()->IsStub());
2095 EmitBranch(instr, no_condition);
2096 } else if (type.IsSIMD128()) {
2097 ASSERT(!info()->IsStub());
2098 EmitBranch(instr, no_condition);
2099 } else if (type.IsHeapNumber()) {
2100 ASSERT(!info()->IsStub());
2101 XMMRegister xmm_scratch = double_scratch0();
2102 __ xorps(xmm_scratch, xmm_scratch);
2103 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2104 EmitBranch(instr, not_equal);
2105 } else if (type.IsString()) {
2106 ASSERT(!info()->IsStub());
2107 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2108 EmitBranch(instr, not_equal);
2110 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2111 // Avoid deopts in the case where we've never executed this path before.
2112 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2114 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2115 // undefined -> false.
2116 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2117 __ j(equal, instr->FalseLabel(chunk_));
2119 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2121 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2122 __ j(equal, instr->TrueLabel(chunk_));
2124 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2125 __ j(equal, instr->FalseLabel(chunk_));
2127 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2129 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2130 __ j(equal, instr->FalseLabel(chunk_));
2133 if (expected.Contains(ToBooleanStub::SMI)) {
2134 // Smis: 0 -> false, all other -> true.
2135 __ Cmp(reg, Smi::FromInt(0));
2136 __ j(equal, instr->FalseLabel(chunk_));
2137 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2138 } else if (expected.NeedsMap()) {
2139 // If we need a map later and have a Smi -> deopt.
2140 __ testb(reg, Immediate(kSmiTagMask));
2141 DeoptimizeIf(zero, instr->environment());
2144 const Register map = kScratchRegister;
2145 if (expected.NeedsMap()) {
2146 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2148 if (expected.CanBeUndetectable()) {
2149 // Undetectable -> false.
2150 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2151 Immediate(1 << Map::kIsUndetectable));
2152 __ j(not_zero, instr->FalseLabel(chunk_));
2156 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2157 // spec object -> true.
2158 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2159 __ j(above_equal, instr->TrueLabel(chunk_));
2162 if (expected.Contains(ToBooleanStub::STRING)) {
2163 // String value -> false iff empty.
2165 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2166 __ j(above_equal, ¬_string, Label::kNear);
2167 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2168 __ j(not_zero, instr->TrueLabel(chunk_));
2169 __ jmp(instr->FalseLabel(chunk_));
2170 __ bind(¬_string);
2173 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2174 // Symbol value -> true.
2175 __ CmpInstanceType(map, SYMBOL_TYPE);
2176 __ j(equal, instr->TrueLabel(chunk_));
2179 if (expected.Contains(ToBooleanStub::FLOAT32x4)) {
2180 // Float32x4 value -> true.
2181 __ CmpInstanceType(map, FLOAT32x4_TYPE);
2182 __ j(equal, instr->TrueLabel(chunk_));
2185 if (expected.Contains(ToBooleanStub::INT32x4)) {
2186 // Int32x4 value -> true.
2187 __ CmpInstanceType(map, INT32x4_TYPE);
2188 __ j(equal, instr->TrueLabel(chunk_));
2191 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2192 // heap number -> false iff +0, -0, or NaN.
2193 Label not_heap_number;
2194 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2195 __ j(not_equal, ¬_heap_number, Label::kNear);
2196 XMMRegister xmm_scratch = double_scratch0();
2197 __ xorps(xmm_scratch, xmm_scratch);
2198 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2199 __ j(zero, instr->FalseLabel(chunk_));
2200 __ jmp(instr->TrueLabel(chunk_));
2201 __ bind(¬_heap_number);
2204 if (!expected.IsGeneric()) {
2205 // We've seen something for the first time -> deopt.
2206 // This can only happen if we are not generic already.
2207 DeoptimizeIf(no_condition, instr->environment());
2214 void LCodeGen::EmitGoto(int block) {
2215 if (!IsNextEmittedBlock(block)) {
2216 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2221 void LCodeGen::DoGoto(LGoto* instr) {
2222 EmitGoto(instr->block_id());
2226 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2227 Condition cond = no_condition;
2230 case Token::EQ_STRICT:
2234 case Token::NE_STRICT:
2238 cond = is_unsigned ? below : less;
2241 cond = is_unsigned ? above : greater;
2244 cond = is_unsigned ? below_equal : less_equal;
2247 cond = is_unsigned ? above_equal : greater_equal;
2250 case Token::INSTANCEOF:
2258 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2259 LOperand* left = instr->left();
2260 LOperand* right = instr->right();
2261 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2263 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2264 // We can statically evaluate the comparison.
2265 double left_val = ToDouble(LConstantOperand::cast(left));
2266 double right_val = ToDouble(LConstantOperand::cast(right));
2267 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2268 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2269 EmitGoto(next_block);
2271 if (instr->is_double()) {
2272 // Don't base result on EFLAGS when a NaN is involved. Instead
2273 // jump to the false block.
2274 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2275 __ j(parity_even, instr->FalseLabel(chunk_));
2278 if (right->IsConstantOperand()) {
2279 value = ToInteger32(LConstantOperand::cast(right));
2280 if (instr->hydrogen_value()->representation().IsSmi()) {
2281 __ Cmp(ToRegister(left), Smi::FromInt(value));
2283 __ cmpl(ToRegister(left), Immediate(value));
2285 } else if (left->IsConstantOperand()) {
2286 value = ToInteger32(LConstantOperand::cast(left));
2287 if (instr->hydrogen_value()->representation().IsSmi()) {
2288 if (right->IsRegister()) {
2289 __ Cmp(ToRegister(right), Smi::FromInt(value));
2291 __ Cmp(ToOperand(right), Smi::FromInt(value));
2293 } else if (right->IsRegister()) {
2294 __ cmpl(ToRegister(right), Immediate(value));
2296 __ cmpl(ToOperand(right), Immediate(value));
2298 // We transposed the operands. Reverse the condition.
2299 cc = ReverseCondition(cc);
2300 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2301 if (right->IsRegister()) {
2302 __ cmpp(ToRegister(left), ToRegister(right));
2304 __ cmpp(ToRegister(left), ToOperand(right));
2307 if (right->IsRegister()) {
2308 __ cmpl(ToRegister(left), ToRegister(right));
2310 __ cmpl(ToRegister(left), ToOperand(right));
2314 EmitBranch(instr, cc);
2319 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2320 Register left = ToRegister(instr->left());
2322 if (instr->right()->IsConstantOperand()) {
2323 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2324 __ Cmp(left, right);
2326 Register right = ToRegister(instr->right());
2327 __ cmpp(left, right);
2329 EmitBranch(instr, equal);
2333 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2334 if (instr->hydrogen()->representation().IsTagged()) {
2335 Register input_reg = ToRegister(instr->object());
2336 __ Cmp(input_reg, factory()->the_hole_value());
2337 EmitBranch(instr, equal);
2341 XMMRegister input_reg = ToDoubleRegister(instr->object());
2342 __ ucomisd(input_reg, input_reg);
2343 EmitFalseBranch(instr, parity_odd);
2345 __ subp(rsp, Immediate(kDoubleSize));
2346 __ movsd(MemOperand(rsp, 0), input_reg);
2347 __ addp(rsp, Immediate(kDoubleSize));
2349 int offset = sizeof(kHoleNanUpper32);
2350 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2351 EmitBranch(instr, equal);
2355 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2356 Representation rep = instr->hydrogen()->value()->representation();
2357 ASSERT(!rep.IsInteger32());
2359 if (rep.IsDouble()) {
2360 XMMRegister value = ToDoubleRegister(instr->value());
2361 XMMRegister xmm_scratch = double_scratch0();
2362 __ xorps(xmm_scratch, xmm_scratch);
2363 __ ucomisd(xmm_scratch, value);
2364 EmitFalseBranch(instr, not_equal);
2365 __ movmskpd(kScratchRegister, value);
2366 __ testl(kScratchRegister, Immediate(1));
2367 EmitBranch(instr, not_zero);
2369 Register value = ToRegister(instr->value());
2370 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2371 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2372 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2374 EmitFalseBranch(instr, no_overflow);
2375 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2376 Immediate(0x00000000));
2377 EmitBranch(instr, equal);
2382 Condition LCodeGen::EmitIsObject(Register input,
2383 Label* is_not_object,
2385 ASSERT(!input.is(kScratchRegister));
2387 __ JumpIfSmi(input, is_not_object);
2389 __ CompareRoot(input, Heap::kNullValueRootIndex);
2390 __ j(equal, is_object);
2392 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2393 // Undetectable objects behave like undefined.
2394 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2395 Immediate(1 << Map::kIsUndetectable));
2396 __ j(not_zero, is_not_object);
2398 __ movzxbl(kScratchRegister,
2399 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2400 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2401 __ j(below, is_not_object);
2402 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2407 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2408 Register reg = ToRegister(instr->value());
2410 Condition true_cond = EmitIsObject(
2411 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2413 EmitBranch(instr, true_cond);
2417 Condition LCodeGen::EmitIsString(Register input,
2419 Label* is_not_string,
2420 SmiCheck check_needed = INLINE_SMI_CHECK) {
2421 if (check_needed == INLINE_SMI_CHECK) {
2422 __ JumpIfSmi(input, is_not_string);
2425 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2431 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2432 Register reg = ToRegister(instr->value());
2433 Register temp = ToRegister(instr->temp());
2435 SmiCheck check_needed =
2436 instr->hydrogen()->value()->IsHeapObject()
2437 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2439 Condition true_cond = EmitIsString(
2440 reg, temp, instr->FalseLabel(chunk_), check_needed);
2442 EmitBranch(instr, true_cond);
2446 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2448 if (instr->value()->IsRegister()) {
2449 Register input = ToRegister(instr->value());
2450 is_smi = masm()->CheckSmi(input);
2452 Operand input = ToOperand(instr->value());
2453 is_smi = masm()->CheckSmi(input);
2455 EmitBranch(instr, is_smi);
2459 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2460 Register input = ToRegister(instr->value());
2461 Register temp = ToRegister(instr->temp());
2463 if (!instr->hydrogen()->value()->IsHeapObject()) {
2464 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2466 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2467 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2468 Immediate(1 << Map::kIsUndetectable));
2469 EmitBranch(instr, not_zero);
2473 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2474 ASSERT(ToRegister(instr->context()).is(rsi));
2475 Token::Value op = instr->op();
2477 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2478 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2480 Condition condition = TokenToCondition(op, false);
2483 EmitBranch(instr, condition);
2487 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2488 InstanceType from = instr->from();
2489 InstanceType to = instr->to();
2490 if (from == FIRST_TYPE) return to;
2491 ASSERT(from == to || to == LAST_TYPE);
2496 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2497 InstanceType from = instr->from();
2498 InstanceType to = instr->to();
2499 if (from == to) return equal;
2500 if (to == LAST_TYPE) return above_equal;
2501 if (from == FIRST_TYPE) return below_equal;
2507 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2508 Register input = ToRegister(instr->value());
2510 if (!instr->hydrogen()->value()->IsHeapObject()) {
2511 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2514 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2515 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2519 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2520 Register input = ToRegister(instr->value());
2521 Register result = ToRegister(instr->result());
2523 __ AssertString(input);
2525 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2526 ASSERT(String::kHashShift >= kSmiTagSize);
2527 __ IndexFromHash(result, result);
2531 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2532 LHasCachedArrayIndexAndBranch* instr) {
2533 Register input = ToRegister(instr->value());
2535 __ testl(FieldOperand(input, String::kHashFieldOffset),
2536 Immediate(String::kContainsCachedArrayIndexMask));
2537 EmitBranch(instr, equal);
2541 // Branches to a label or falls through with the answer in the z flag.
2542 // Trashes the temp register.
2543 void LCodeGen::EmitClassOfTest(Label* is_true,
2545 Handle<String> class_name,
2549 ASSERT(!input.is(temp));
2550 ASSERT(!input.is(temp2));
2551 ASSERT(!temp.is(temp2));
2553 __ JumpIfSmi(input, is_false);
2555 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2556 // Assuming the following assertions, we can use the same compares to test
2557 // for both being a function type and being in the object type range.
2558 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2559 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2560 FIRST_SPEC_OBJECT_TYPE + 1);
2561 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2562 LAST_SPEC_OBJECT_TYPE - 1);
2563 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2564 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2565 __ j(below, is_false);
2566 __ j(equal, is_true);
2567 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2568 __ j(equal, is_true);
2570 // Faster code path to avoid two compares: subtract lower bound from the
2571 // actual type and do a signed compare with the width of the type range.
2572 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2573 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2574 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2575 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2576 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2577 __ j(above, is_false);
2580 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2581 // Check if the constructor in the map is a function.
2582 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2584 // Objects with a non-function constructor have class 'Object'.
2585 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2586 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2587 __ j(not_equal, is_true);
2589 __ j(not_equal, is_false);
2592 // temp now contains the constructor function. Grab the
2593 // instance class name from there.
2594 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2595 __ movp(temp, FieldOperand(temp,
2596 SharedFunctionInfo::kInstanceClassNameOffset));
2597 // The class name we are testing against is internalized since it's a literal.
2598 // The name in the constructor is internalized because of the way the context
2599 // is booted. This routine isn't expected to work for random API-created
2600 // classes and it doesn't have to because you can't access it with natives
2601 // syntax. Since both sides are internalized it is sufficient to use an
2602 // identity comparison.
2603 ASSERT(class_name->IsInternalizedString());
2604 __ Cmp(temp, class_name);
2605 // End with the answer in the z flag.
2609 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2610 Register input = ToRegister(instr->value());
2611 Register temp = ToRegister(instr->temp());
2612 Register temp2 = ToRegister(instr->temp2());
2613 Handle<String> class_name = instr->hydrogen()->class_name();
2615 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2616 class_name, input, temp, temp2);
2618 EmitBranch(instr, equal);
2622 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2623 Register reg = ToRegister(instr->value());
2625 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2626 EmitBranch(instr, equal);
2630 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2631 ASSERT(ToRegister(instr->context()).is(rsi));
2632 InstanceofStub stub(InstanceofStub::kNoFlags);
2633 __ Push(ToRegister(instr->left()));
2634 __ Push(ToRegister(instr->right()));
2635 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2636 Label true_value, done;
2638 __ j(zero, &true_value, Label::kNear);
2639 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2640 __ jmp(&done, Label::kNear);
2641 __ bind(&true_value);
2642 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2647 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2648 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2650 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2651 LInstanceOfKnownGlobal* instr)
2652 : LDeferredCode(codegen), instr_(instr) { }
2653 virtual void Generate() V8_OVERRIDE {
2654 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2656 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2657 Label* map_check() { return &map_check_; }
2659 LInstanceOfKnownGlobal* instr_;
2663 ASSERT(ToRegister(instr->context()).is(rsi));
2664 DeferredInstanceOfKnownGlobal* deferred;
2665 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2667 Label done, false_result;
2668 Register object = ToRegister(instr->value());
2670 // A Smi is not an instance of anything.
2671 __ JumpIfSmi(object, &false_result, Label::kNear);
2673 // This is the inlined call site instanceof cache. The two occurences of the
2674 // hole value will be patched to the last map/result pair generated by the
2677 // Use a temp register to avoid memory operands with variable lengths.
2678 Register map = ToRegister(instr->temp());
2679 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2680 __ bind(deferred->map_check()); // Label for calculating code patching.
2681 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2682 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2683 __ cmpp(map, Operand(kScratchRegister, 0));
2684 __ j(not_equal, &cache_miss, Label::kNear);
2685 // Patched to load either true or false.
2686 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2688 // Check that the code size between patch label and patch sites is invariant.
2689 Label end_of_patched_code;
2690 __ bind(&end_of_patched_code);
2693 __ jmp(&done, Label::kNear);
2695 // The inlined call site cache did not match. Check for null and string
2696 // before calling the deferred code.
2697 __ bind(&cache_miss); // Null is not an instance of anything.
2698 __ CompareRoot(object, Heap::kNullValueRootIndex);
2699 __ j(equal, &false_result, Label::kNear);
2701 // String values are not instances of anything.
2702 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2704 __ bind(&false_result);
2705 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2707 __ bind(deferred->exit());
2712 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2715 PushSafepointRegistersScope scope(this);
2716 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2717 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2718 InstanceofStub stub(flags);
2720 __ Push(ToRegister(instr->value()));
2721 __ Push(instr->function());
2723 static const int kAdditionalDelta = 10;
2725 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2727 __ PushImm32(delta);
2729 // We are pushing three values on the stack but recording a
2730 // safepoint with two arguments because stub is going to
2731 // remove the third argument from the stack before jumping
2732 // to instanceof builtin on the slow path.
2733 CallCodeGeneric(stub.GetCode(isolate()),
2734 RelocInfo::CODE_TARGET,
2736 RECORD_SAFEPOINT_WITH_REGISTERS,
2738 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2739 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2740 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2741 // Move result to a register that survives the end of the
2742 // PushSafepointRegisterScope.
2743 __ movp(kScratchRegister, rax);
2745 __ testp(kScratchRegister, kScratchRegister);
2748 __ j(not_zero, &load_false, Label::kNear);
2749 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2750 __ jmp(&done, Label::kNear);
2751 __ bind(&load_false);
2752 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2757 void LCodeGen::DoCmpT(LCmpT* instr) {
2758 ASSERT(ToRegister(instr->context()).is(rsi));
2759 Token::Value op = instr->op();
2761 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2762 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2764 Condition condition = TokenToCondition(op, false);
2765 Label true_value, done;
2767 __ j(condition, &true_value, Label::kNear);
2768 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2769 __ jmp(&done, Label::kNear);
2770 __ bind(&true_value);
2771 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2776 void LCodeGen::DoReturn(LReturn* instr) {
2777 if (FLAG_trace && info()->IsOptimizing()) {
2778 // Preserve the return value on the stack and rely on the runtime call
2779 // to return the value in the same register. We're leaving the code
2780 // managed by the register allocator and tearing down the frame, it's
2781 // safe to write to the context register.
2783 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2784 __ CallRuntime(Runtime::kTraceExit, 1);
2786 if (info()->saves_caller_doubles()) {
2787 RestoreCallerDoubles();
2789 int no_frame_start = -1;
2790 if (NeedsEagerFrame()) {
2793 no_frame_start = masm_->pc_offset();
2795 if (instr->has_constant_parameter_count()) {
2796 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2799 Register reg = ToRegister(instr->parameter_count());
2800 // The argument count parameter is a smi
2801 __ SmiToInteger32(reg, reg);
2802 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2803 __ PopReturnAddressTo(return_addr_reg);
2804 __ shl(reg, Immediate(kPointerSizeLog2));
2806 __ jmp(return_addr_reg);
2808 if (no_frame_start != -1) {
2809 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2814 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2815 Register result = ToRegister(instr->result());
2816 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2817 if (instr->hydrogen()->RequiresHoleCheck()) {
2818 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2819 DeoptimizeIf(equal, instr->environment());
2824 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2825 ASSERT(ToRegister(instr->context()).is(rsi));
2826 ASSERT(ToRegister(instr->global_object()).is(rax));
2827 ASSERT(ToRegister(instr->result()).is(rax));
2829 __ Move(rcx, instr->name());
2830 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2831 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2832 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2836 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2837 Register value = ToRegister(instr->value());
2838 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2840 // If the cell we are storing to contains the hole it could have
2841 // been deleted from the property dictionary. In that case, we need
2842 // to update the property details in the property dictionary to mark
2843 // it as no longer deleted. We deoptimize in that case.
2844 if (instr->hydrogen()->RequiresHoleCheck()) {
2845 // We have a temp because CompareRoot might clobber kScratchRegister.
2846 Register cell = ToRegister(instr->temp());
2847 ASSERT(!value.is(cell));
2848 __ Move(cell, cell_handle, RelocInfo::CELL);
2849 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2850 DeoptimizeIf(equal, instr->environment());
2852 __ movp(Operand(cell, 0), value);
2855 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2856 __ movp(Operand(kScratchRegister, 0), value);
2858 // Cells are always rescanned, so no write barrier here.
2862 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2863 Register context = ToRegister(instr->context());
2864 Register result = ToRegister(instr->result());
2865 __ movp(result, ContextOperand(context, instr->slot_index()));
2866 if (instr->hydrogen()->RequiresHoleCheck()) {
2867 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2868 if (instr->hydrogen()->DeoptimizesOnHole()) {
2869 DeoptimizeIf(equal, instr->environment());
2872 __ j(not_equal, &is_not_hole, Label::kNear);
2873 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2874 __ bind(&is_not_hole);
2880 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2881 Register context = ToRegister(instr->context());
2882 Register value = ToRegister(instr->value());
2884 Operand target = ContextOperand(context, instr->slot_index());
2886 Label skip_assignment;
2887 if (instr->hydrogen()->RequiresHoleCheck()) {
2888 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2889 if (instr->hydrogen()->DeoptimizesOnHole()) {
2890 DeoptimizeIf(equal, instr->environment());
2892 __ j(not_equal, &skip_assignment);
2895 __ movp(target, value);
2897 if (instr->hydrogen()->NeedsWriteBarrier()) {
2898 SmiCheck check_needed =
2899 instr->hydrogen()->value()->IsHeapObject()
2900 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2901 int offset = Context::SlotOffset(instr->slot_index());
2902 Register scratch = ToRegister(instr->temp());
2903 __ RecordWriteContextSlot(context,
2908 EMIT_REMEMBERED_SET,
2912 __ bind(&skip_assignment);
2916 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2917 HObjectAccess access = instr->hydrogen()->access();
2918 int offset = access.offset();
2920 if (access.IsExternalMemory()) {
2921 Register result = ToRegister(instr->result());
2922 if (instr->object()->IsConstantOperand()) {
2923 ASSERT(result.is(rax));
2924 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2926 Register object = ToRegister(instr->object());
2927 __ Load(result, MemOperand(object, offset), access.representation());
2932 Register object = ToRegister(instr->object());
2933 if (instr->hydrogen()->representation().IsDouble()) {
2934 XMMRegister result = ToDoubleRegister(instr->result());
2935 __ movsd(result, FieldOperand(object, offset));
2939 Register result = ToRegister(instr->result());
2940 if (!access.IsInobject()) {
2941 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2945 Representation representation = access.representation();
2946 if (representation.IsSmi() &&
2947 instr->hydrogen()->representation().IsInteger32()) {
2949 Register scratch = kScratchRegister;
2950 __ Load(scratch, FieldOperand(object, offset), representation);
2951 __ AssertSmi(scratch);
2954 // Read int value directly from upper half of the smi.
2955 STATIC_ASSERT(kSmiTag == 0);
2956 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2957 offset += kPointerSize / 2;
2958 representation = Representation::Integer32();
2960 __ Load(result, FieldOperand(object, offset), representation);
2964 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2965 ASSERT(ToRegister(instr->context()).is(rsi));
2966 ASSERT(ToRegister(instr->object()).is(rax));
2967 ASSERT(ToRegister(instr->result()).is(rax));
2969 __ Move(rcx, instr->name());
2970 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2971 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2975 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2976 Register function = ToRegister(instr->function());
2977 Register result = ToRegister(instr->result());
2979 // Check that the function really is a function.
2980 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2981 DeoptimizeIf(not_equal, instr->environment());
2983 // Check whether the function has an instance prototype.
2985 __ testb(FieldOperand(result, Map::kBitFieldOffset),
2986 Immediate(1 << Map::kHasNonInstancePrototype));
2987 __ j(not_zero, &non_instance, Label::kNear);
2989 // Get the prototype or initial map from the function.
2991 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2993 // Check that the function has a prototype or an initial map.
2994 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2995 DeoptimizeIf(equal, instr->environment());
2997 // If the function does not have an initial map, we're done.
2999 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3000 __ j(not_equal, &done, Label::kNear);
3002 // Get the prototype from the initial map.
3003 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3004 __ jmp(&done, Label::kNear);
3006 // Non-instance prototype: Fetch prototype from constructor field
3007 // in the function's map.
3008 __ bind(&non_instance);
3009 __ movp(result, FieldOperand(result, Map::kConstructorOffset));
3016 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3017 Register result = ToRegister(instr->result());
3018 __ LoadRoot(result, instr->index());
3022 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3023 Register arguments = ToRegister(instr->arguments());
3024 Register result = ToRegister(instr->result());
3026 if (instr->length()->IsConstantOperand() &&
3027 instr->index()->IsConstantOperand()) {
3028 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3029 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3030 if (const_index >= 0 && const_index < const_length) {
3031 StackArgumentsAccessor args(arguments, const_length,
3032 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3033 __ movp(result, args.GetArgumentOperand(const_index));
3034 } else if (FLAG_debug_code) {
3038 Register length = ToRegister(instr->length());
3039 // There are two words between the frame pointer and the last argument.
3040 // Subtracting from length accounts for one of them add one more.
3041 if (instr->index()->IsRegister()) {
3042 __ subl(length, ToRegister(instr->index()));
3044 __ subl(length, ToOperand(instr->index()));
3046 StackArgumentsAccessor args(arguments, length,
3047 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3048 __ movp(result, args.GetArgumentOperand(0));
3053 void LCodeGen::HandleExternalArrayOpRequiresPreScale(
3055 ElementsKind elements_kind) {
3056 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3057 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
3058 static_cast<int>(maximal_scale_factor);
3059 ASSERT(pre_shift_size > 0);
3060 __ shl(ToRegister(key), Immediate(pre_shift_size));
3065 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3066 ElementsKind elements_kind = instr->elements_kind();
3067 LOperand* key = instr->key();
3068 if (!key->IsConstantOperand()) {
3069 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
3071 int base_offset = instr->is_fixed_typed_array()
3072 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3074 Operand operand(BuildFastArrayOperand(
3079 instr->additional_index()));
3081 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3082 elements_kind == FLOAT32_ELEMENTS) {
3083 XMMRegister result(ToDoubleRegister(instr->result()));
3084 __ movss(result, operand);
3085 __ cvtss2sd(result, result);
3086 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3087 elements_kind == FLOAT64_ELEMENTS) {
3088 __ movsd(ToDoubleRegister(instr->result()), operand);
3089 } else if (IsSIMD128ElementsKind(elements_kind)) {
3090 __ movups(ToSIMD128Register(instr->result()), operand);
3092 Register result(ToRegister(instr->result()));
3093 switch (elements_kind) {
3094 case EXTERNAL_INT8_ELEMENTS:
3096 __ movsxbl(result, operand);
3098 case EXTERNAL_UINT8_ELEMENTS:
3099 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3100 case UINT8_ELEMENTS:
3101 case UINT8_CLAMPED_ELEMENTS:
3102 __ movzxbl(result, operand);
3104 case EXTERNAL_INT16_ELEMENTS:
3105 case INT16_ELEMENTS:
3106 __ movsxwl(result, operand);
3108 case EXTERNAL_UINT16_ELEMENTS:
3109 case UINT16_ELEMENTS:
3110 __ movzxwl(result, operand);
3112 case EXTERNAL_INT32_ELEMENTS:
3113 case INT32_ELEMENTS:
3114 __ movl(result, operand);
3116 case EXTERNAL_UINT32_ELEMENTS:
3117 case UINT32_ELEMENTS:
3118 __ movl(result, operand);
3119 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3120 __ testl(result, result);
3121 DeoptimizeIf(negative, instr->environment());
3124 case EXTERNAL_FLOAT32_ELEMENTS:
3125 case EXTERNAL_FLOAT64_ELEMENTS:
3126 case EXTERNAL_FLOAT32x4_ELEMENTS:
3127 case EXTERNAL_INT32x4_ELEMENTS:
3128 case FLOAT32_ELEMENTS:
3129 case FLOAT64_ELEMENTS:
3130 case FLOAT32x4_ELEMENTS:
3131 case INT32x4_ELEMENTS:
3133 case FAST_SMI_ELEMENTS:
3134 case FAST_DOUBLE_ELEMENTS:
3135 case FAST_HOLEY_ELEMENTS:
3136 case FAST_HOLEY_SMI_ELEMENTS:
3137 case FAST_HOLEY_DOUBLE_ELEMENTS:
3138 case DICTIONARY_ELEMENTS:
3139 case SLOPPY_ARGUMENTS_ELEMENTS:
3147 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3148 XMMRegister result(ToDoubleRegister(instr->result()));
3149 LOperand* key = instr->key();
3150 if (instr->hydrogen()->RequiresHoleCheck()) {
3151 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3152 sizeof(kHoleNanLower32);
3153 Operand hole_check_operand = BuildFastArrayOperand(
3156 FAST_DOUBLE_ELEMENTS,
3158 instr->additional_index());
3159 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3160 DeoptimizeIf(equal, instr->environment());
3163 Operand double_load_operand = BuildFastArrayOperand(
3166 FAST_DOUBLE_ELEMENTS,
3167 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3168 instr->additional_index());
3169 __ movsd(result, double_load_operand);
3173 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3174 HLoadKeyed* hinstr = instr->hydrogen();
3175 Register result = ToRegister(instr->result());
3176 LOperand* key = instr->key();
3177 bool requires_hole_check = hinstr->RequiresHoleCheck();
3178 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3179 Representation representation = hinstr->representation();
3181 if (representation.IsInteger32() &&
3182 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3183 ASSERT(!requires_hole_check);
3185 Register scratch = kScratchRegister;
3187 BuildFastArrayOperand(instr->elements(),
3191 instr->additional_index()),
3192 Representation::Smi());
3193 __ AssertSmi(scratch);
3195 // Read int value directly from upper half of the smi.
3196 STATIC_ASSERT(kSmiTag == 0);
3197 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3198 offset += kPointerSize / 2;
3202 BuildFastArrayOperand(instr->elements(),
3206 instr->additional_index()),
3209 // Check for the hole value.
3210 if (requires_hole_check) {
3211 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3212 Condition smi = __ CheckSmi(result);
3213 DeoptimizeIf(NegateCondition(smi), instr->environment());
3215 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3216 DeoptimizeIf(equal, instr->environment());
3222 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3223 if (instr->is_typed_elements()) {
3224 DoLoadKeyedExternalArray(instr);
3225 } else if (instr->hydrogen()->representation().IsDouble()) {
3226 DoLoadKeyedFixedDoubleArray(instr);
3228 DoLoadKeyedFixedArray(instr);
3233 Operand LCodeGen::BuildFastArrayOperand(
3234 LOperand* elements_pointer,
3236 ElementsKind elements_kind,
3238 uint32_t additional_index) {
3239 Register elements_pointer_reg = ToRegister(elements_pointer);
3240 int shift_size = ElementsKindToShiftSize(elements_kind);
3241 if (key->IsConstantOperand()) {
3242 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3243 if (constant_value & 0xF0000000) {
3244 Abort(kArrayIndexConstantValueTooBig);
3247 return Operand(elements_pointer_reg,
3248 ((constant_value + additional_index) << shift_size)
3251 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3252 // Make sure the key is pre-scaled against maximal_scale_factor.
3253 shift_size = static_cast<int>(maximal_scale_factor);
3255 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3256 return Operand(elements_pointer_reg,
3259 offset + (additional_index << shift_size));
3264 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3265 ASSERT(ToRegister(instr->context()).is(rsi));
3266 ASSERT(ToRegister(instr->object()).is(rdx));
3267 ASSERT(ToRegister(instr->key()).is(rax));
3269 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3270 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3274 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3275 Register result = ToRegister(instr->result());
3277 if (instr->hydrogen()->from_inlined()) {
3278 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3280 // Check for arguments adapter frame.
3281 Label done, adapted;
3282 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3283 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3284 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3285 __ j(equal, &adapted, Label::kNear);
3287 // No arguments adaptor frame.
3288 __ movp(result, rbp);
3289 __ jmp(&done, Label::kNear);
3291 // Arguments adaptor frame present.
3293 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3295 // Result is the frame pointer for the frame if not adapted and for the real
3296 // frame below the adaptor frame if adapted.
3302 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3303 Register result = ToRegister(instr->result());
3307 // If no arguments adaptor frame the number of arguments is fixed.
3308 if (instr->elements()->IsRegister()) {
3309 __ cmpp(rbp, ToRegister(instr->elements()));
3311 __ cmpp(rbp, ToOperand(instr->elements()));
3313 __ movl(result, Immediate(scope()->num_parameters()));
3314 __ j(equal, &done, Label::kNear);
3316 // Arguments adaptor frame present. Get argument length from there.
3317 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3318 __ SmiToInteger32(result,
3320 ArgumentsAdaptorFrameConstants::kLengthOffset));
3322 // Argument length is in result register.
3327 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3328 Register receiver = ToRegister(instr->receiver());
3329 Register function = ToRegister(instr->function());
3331 // If the receiver is null or undefined, we have to pass the global
3332 // object as a receiver to normal functions. Values have to be
3333 // passed unchanged to builtins and strict-mode functions.
3334 Label global_object, receiver_ok;
3335 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3337 if (!instr->hydrogen()->known_function()) {
3338 // Do not transform the receiver to object for strict mode
3340 __ movp(kScratchRegister,
3341 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3342 __ testb(FieldOperand(kScratchRegister,
3343 SharedFunctionInfo::kStrictModeByteOffset),
3344 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3345 __ j(not_equal, &receiver_ok, dist);
3347 // Do not transform the receiver to object for builtins.
3348 __ testb(FieldOperand(kScratchRegister,
3349 SharedFunctionInfo::kNativeByteOffset),
3350 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3351 __ j(not_equal, &receiver_ok, dist);
3354 // Normal function. Replace undefined or null with global receiver.
3355 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3356 __ j(equal, &global_object, Label::kNear);
3357 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3358 __ j(equal, &global_object, Label::kNear);
3360 // The receiver should be a JS object.
3361 Condition is_smi = __ CheckSmi(receiver);
3362 DeoptimizeIf(is_smi, instr->environment());
3363 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3364 DeoptimizeIf(below, instr->environment());
3366 __ jmp(&receiver_ok, Label::kNear);
3367 __ bind(&global_object);
3368 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3371 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3373 FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
3375 __ bind(&receiver_ok);
3379 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3380 Register receiver = ToRegister(instr->receiver());
3381 Register function = ToRegister(instr->function());
3382 Register length = ToRegister(instr->length());
3383 Register elements = ToRegister(instr->elements());
3384 ASSERT(receiver.is(rax)); // Used for parameter count.
3385 ASSERT(function.is(rdi)); // Required by InvokeFunction.
3386 ASSERT(ToRegister(instr->result()).is(rax));
3388 // Copy the arguments to this function possibly from the
3389 // adaptor frame below it.
3390 const uint32_t kArgumentsLimit = 1 * KB;
3391 __ cmpp(length, Immediate(kArgumentsLimit));
3392 DeoptimizeIf(above, instr->environment());
3395 __ movp(receiver, length);
3397 // Loop through the arguments pushing them onto the execution
3400 // length is a small non-negative integer, due to the test above.
3401 __ testl(length, length);
3402 __ j(zero, &invoke, Label::kNear);
3404 StackArgumentsAccessor args(elements, length,
3405 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3406 __ Push(args.GetArgumentOperand(0));
3408 __ j(not_zero, &loop);
3410 // Invoke the function.
3412 ASSERT(instr->HasPointerMap());
3413 LPointerMap* pointers = instr->pointer_map();
3414 SafepointGenerator safepoint_generator(
3415 this, pointers, Safepoint::kLazyDeopt);
3416 ParameterCount actual(rax);
3417 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3421 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3422 LOperand* argument = instr->value();
3423 EmitPushTaggedOperand(argument);
3427 void LCodeGen::DoDrop(LDrop* instr) {
3428 __ Drop(instr->count());
3432 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3433 Register result = ToRegister(instr->result());
3434 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3438 void LCodeGen::DoContext(LContext* instr) {
3439 Register result = ToRegister(instr->result());
3440 if (info()->IsOptimizing()) {
3441 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3443 // If there is no frame, the context must be in rsi.
3444 ASSERT(result.is(rsi));
3449 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3450 ASSERT(ToRegister(instr->context()).is(rsi));
3451 __ Push(rsi); // The context is the first argument.
3452 __ Push(instr->hydrogen()->pairs());
3453 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3454 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3458 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3459 int formal_parameter_count,
3461 LInstruction* instr,
3462 RDIState rdi_state) {
3463 bool dont_adapt_arguments =
3464 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3465 bool can_invoke_directly =
3466 dont_adapt_arguments || formal_parameter_count == arity;
3468 LPointerMap* pointers = instr->pointer_map();
3470 if (can_invoke_directly) {
3471 if (rdi_state == RDI_UNINITIALIZED) {
3472 __ Move(rdi, function);
3476 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3478 // Set rax to arguments count if adaption is not needed. Assumes that rax
3479 // is available to write to at this point.
3480 if (dont_adapt_arguments) {
3485 if (function.is_identical_to(info()->closure())) {
3488 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3491 // Set up deoptimization.
3492 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3494 // We need to adapt arguments.
3495 SafepointGenerator generator(
3496 this, pointers, Safepoint::kLazyDeopt);
3497 ParameterCount count(arity);
3498 ParameterCount expected(formal_parameter_count);
3499 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3504 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3505 ASSERT(ToRegister(instr->result()).is(rax));
3507 LPointerMap* pointers = instr->pointer_map();
3508 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3510 if (instr->target()->IsConstantOperand()) {
3511 LConstantOperand* target = LConstantOperand::cast(instr->target());
3512 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3513 generator.BeforeCall(__ CallSize(code));
3514 __ call(code, RelocInfo::CODE_TARGET);
3516 ASSERT(instr->target()->IsRegister());
3517 Register target = ToRegister(instr->target());
3518 generator.BeforeCall(__ CallSize(target));
3519 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3522 generator.AfterCall();
3526 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3527 ASSERT(ToRegister(instr->function()).is(rdi));
3528 ASSERT(ToRegister(instr->result()).is(rax));
3530 if (instr->hydrogen()->pass_argument_count()) {
3531 __ Set(rax, instr->arity());
3535 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3537 LPointerMap* pointers = instr->pointer_map();
3538 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3540 bool is_self_call = false;
3541 if (instr->hydrogen()->function()->IsConstant()) {
3542 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3543 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3544 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3545 is_self_call = jsfun.is_identical_to(info()->closure());
3551 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3552 generator.BeforeCall(__ CallSize(target));
3555 generator.AfterCall();
3559 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3560 Register input_reg = ToRegister(instr->value());
3561 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3562 Heap::kHeapNumberMapRootIndex);
3563 DeoptimizeIf(not_equal, instr->environment());
3565 Label slow, allocated, done;
3566 Register tmp = input_reg.is(rax) ? rcx : rax;
3567 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3569 // Preserve the value of all registers.
3570 PushSafepointRegistersScope scope(this);
3572 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3573 // Check the sign of the argument. If the argument is positive, just
3574 // return it. We do not need to patch the stack since |input| and
3575 // |result| are the same register and |input| will be restored
3576 // unchanged by popping safepoint registers.
3577 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3580 __ AllocateHeapNumber(tmp, tmp2, &slow);
3581 __ jmp(&allocated, Label::kNear);
3583 // Slow case: Call the runtime system to do the number allocation.
3585 CallRuntimeFromDeferred(
3586 Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3587 // Set the pointer to the new heap number in tmp.
3588 if (!tmp.is(rax)) __ movp(tmp, rax);
3589 // Restore input_reg after call to runtime.
3590 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3592 __ bind(&allocated);
3593 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3594 __ shl(tmp2, Immediate(1));
3595 __ shr(tmp2, Immediate(1));
3596 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3597 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3603 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3604 Register input_reg = ToRegister(instr->value());
3605 __ testl(input_reg, input_reg);
3607 __ j(not_sign, &is_positive, Label::kNear);
3608 __ negl(input_reg); // Sets flags.
3609 DeoptimizeIf(negative, instr->environment());
3610 __ bind(&is_positive);
3614 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3615 Register input_reg = ToRegister(instr->value());
3616 __ testp(input_reg, input_reg);
3618 __ j(not_sign, &is_positive, Label::kNear);
3619 __ negp(input_reg); // Sets flags.
3620 DeoptimizeIf(negative, instr->environment());
3621 __ bind(&is_positive);
3625 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3626 // Class for deferred case.
3627 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3629 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3630 : LDeferredCode(codegen), instr_(instr) { }
3631 virtual void Generate() V8_OVERRIDE {
3632 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3634 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3639 ASSERT(instr->value()->Equals(instr->result()));
3640 Representation r = instr->hydrogen()->value()->representation();
3643 XMMRegister scratch = double_scratch0();
3644 XMMRegister input_reg = ToDoubleRegister(instr->value());
3645 __ xorps(scratch, scratch);
3646 __ subsd(scratch, input_reg);
3647 __ andps(input_reg, scratch);
3648 } else if (r.IsInteger32()) {
3649 EmitIntegerMathAbs(instr);
3650 } else if (r.IsSmi()) {
3651 EmitSmiMathAbs(instr);
3652 } else { // Tagged case.
3653 DeferredMathAbsTaggedHeapNumber* deferred =
3654 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3655 Register input_reg = ToRegister(instr->value());
3657 __ JumpIfNotSmi(input_reg, deferred->entry());
3658 EmitSmiMathAbs(instr);
3659 __ bind(deferred->exit());
3664 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3665 XMMRegister xmm_scratch = double_scratch0();
3666 Register output_reg = ToRegister(instr->result());
3667 XMMRegister input_reg = ToDoubleRegister(instr->value());
3669 if (CpuFeatures::IsSupported(SSE4_1)) {
3670 CpuFeatureScope scope(masm(), SSE4_1);
3671 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3672 // Deoptimize if minus zero.
3673 __ movq(output_reg, input_reg);
3674 __ subq(output_reg, Immediate(1));
3675 DeoptimizeIf(overflow, instr->environment());
3677 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3678 __ cvttsd2si(output_reg, xmm_scratch);
3679 __ cmpl(output_reg, Immediate(0x1));
3680 DeoptimizeIf(overflow, instr->environment());
3682 Label negative_sign, done;
3683 // Deoptimize on unordered.
3684 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3685 __ ucomisd(input_reg, xmm_scratch);
3686 DeoptimizeIf(parity_even, instr->environment());
3687 __ j(below, &negative_sign, Label::kNear);
3689 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3690 // Check for negative zero.
3691 Label positive_sign;
3692 __ j(above, &positive_sign, Label::kNear);
3693 __ movmskpd(output_reg, input_reg);
3694 __ testq(output_reg, Immediate(1));
3695 DeoptimizeIf(not_zero, instr->environment());
3696 __ Set(output_reg, 0);
3697 __ jmp(&done, Label::kNear);
3698 __ bind(&positive_sign);
3701 // Use truncating instruction (OK because input is positive).
3702 __ cvttsd2si(output_reg, input_reg);
3703 // Overflow is signalled with minint.
3704 __ cmpl(output_reg, Immediate(0x1));
3705 DeoptimizeIf(overflow, instr->environment());
3706 __ jmp(&done, Label::kNear);
3708 // Non-zero negative reaches here.
3709 __ bind(&negative_sign);
3710 // Truncate, then compare and compensate.
3711 __ cvttsd2si(output_reg, input_reg);
3712 __ Cvtlsi2sd(xmm_scratch, output_reg);
3713 __ ucomisd(input_reg, xmm_scratch);
3714 __ j(equal, &done, Label::kNear);
3715 __ subl(output_reg, Immediate(1));
3716 DeoptimizeIf(overflow, instr->environment());
3723 void LCodeGen::DoMathRound(LMathRound* instr) {
3724 const XMMRegister xmm_scratch = double_scratch0();
3725 Register output_reg = ToRegister(instr->result());
3726 XMMRegister input_reg = ToDoubleRegister(instr->value());
3727 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3728 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3729 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3731 Label done, round_to_zero, below_one_half;
3732 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3733 __ movq(kScratchRegister, one_half);
3734 __ movq(xmm_scratch, kScratchRegister);
3735 __ ucomisd(xmm_scratch, input_reg);
3736 __ j(above, &below_one_half, Label::kNear);
3738 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3739 __ addsd(xmm_scratch, input_reg);
3740 __ cvttsd2si(output_reg, xmm_scratch);
3741 // Overflow is signalled with minint.
3742 __ cmpl(output_reg, Immediate(0x1));
3743 __ RecordComment("D2I conversion overflow");
3744 DeoptimizeIf(overflow, instr->environment());
3745 __ jmp(&done, dist);
3747 __ bind(&below_one_half);
3748 __ movq(kScratchRegister, minus_one_half);
3749 __ movq(xmm_scratch, kScratchRegister);
3750 __ ucomisd(xmm_scratch, input_reg);
3751 __ j(below_equal, &round_to_zero, Label::kNear);
3753 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3754 // compare and compensate.
3755 __ movq(input_temp, input_reg); // Do not alter input_reg.
3756 __ subsd(input_temp, xmm_scratch);
3757 __ cvttsd2si(output_reg, input_temp);
3758 // Catch minint due to overflow, and to prevent overflow when compensating.
3759 __ cmpl(output_reg, Immediate(0x1));
3760 __ RecordComment("D2I conversion overflow");
3761 DeoptimizeIf(overflow, instr->environment());
3763 __ Cvtlsi2sd(xmm_scratch, output_reg);
3764 __ ucomisd(xmm_scratch, input_temp);
3765 __ j(equal, &done, dist);
3766 __ subl(output_reg, Immediate(1));
3767 // No overflow because we already ruled out minint.
3768 __ jmp(&done, dist);
3770 __ bind(&round_to_zero);
3771 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3772 // we can ignore the difference between a result of -0 and +0.
3773 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3774 __ movq(output_reg, input_reg);
3775 __ testq(output_reg, output_reg);
3776 __ RecordComment("Minus zero");
3777 DeoptimizeIf(negative, instr->environment());
3779 __ Set(output_reg, 0);
3784 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3785 XMMRegister input_reg = ToDoubleRegister(instr->value());
3786 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3787 __ sqrtsd(input_reg, input_reg);
3791 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3792 XMMRegister xmm_scratch = double_scratch0();
3793 XMMRegister input_reg = ToDoubleRegister(instr->value());
3794 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3796 // Note that according to ECMA-262 15.8.2.13:
3797 // Math.pow(-Infinity, 0.5) == Infinity
3798 // Math.sqrt(-Infinity) == NaN
3800 // Check base for -Infinity. According to IEEE-754, double-precision
3801 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3802 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3803 __ movq(xmm_scratch, kScratchRegister);
3804 __ ucomisd(xmm_scratch, input_reg);
3805 // Comparing -Infinity with NaN results in "unordered", which sets the
3806 // zero flag as if both were equal. However, it also sets the carry flag.
3807 __ j(not_equal, &sqrt, Label::kNear);
3808 __ j(carry, &sqrt, Label::kNear);
3809 // If input is -Infinity, return Infinity.
3810 __ xorps(input_reg, input_reg);
3811 __ subsd(input_reg, xmm_scratch);
3812 __ jmp(&done, Label::kNear);
3816 __ xorps(xmm_scratch, xmm_scratch);
3817 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3818 __ sqrtsd(input_reg, input_reg);
3823 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
3824 switch (instr->op()) {
3825 case kFloat32x4Zero: {
3826 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3827 __ xorps(result_reg, result_reg);
3830 case kInt32x4Zero: {
3831 XMMRegister result_reg = ToInt32x4Register(instr->result());
3832 __ xorps(result_reg, result_reg);
3842 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
3844 switch (instr->op()) {
3845 case kSIMD128Change: {
3846 Comment(";;; deoptimize: can not perform representation change"
3847 "for float32x4 or int32x4");
3848 DeoptimizeIf(no_condition, instr->environment());
3853 case kFloat32x4Reciprocal:
3854 case kFloat32x4ReciprocalSqrt:
3855 case kFloat32x4Sqrt: {
3856 ASSERT(instr->value()->Equals(instr->result()));
3857 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3858 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3859 switch (instr->op()) {
3861 __ absps(input_reg);
3864 __ negateps(input_reg);
3866 case kFloat32x4Reciprocal:
3867 __ rcpps(input_reg, input_reg);
3869 case kFloat32x4ReciprocalSqrt:
3870 __ rsqrtps(input_reg, input_reg);
3872 case kFloat32x4Sqrt:
3873 __ sqrtps(input_reg, input_reg);
3883 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3884 XMMRegister input_reg = ToInt32x4Register(instr->value());
3885 switch (instr->op()) {
3887 __ notps(input_reg);
3890 __ pnegd(input_reg);
3898 case kFloat32x4BitsToInt32x4:
3899 case kFloat32x4ToInt32x4: {
3900 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3901 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3902 XMMRegister result_reg = ToInt32x4Register(instr->result());
3903 if (instr->op() == kFloat32x4BitsToInt32x4) {
3904 if (!result_reg.is(input_reg)) {
3905 __ movaps(result_reg, input_reg);
3908 ASSERT(instr->op() == kFloat32x4ToInt32x4);
3909 __ cvtps2dq(result_reg, input_reg);
3913 case kInt32x4BitsToFloat32x4:
3914 case kInt32x4ToFloat32x4: {
3915 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3916 XMMRegister input_reg = ToInt32x4Register(instr->value());
3917 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3918 if (instr->op() == kInt32x4BitsToFloat32x4) {
3919 if (!result_reg.is(input_reg)) {
3920 __ movaps(result_reg, input_reg);
3923 ASSERT(instr->op() == kInt32x4ToFloat32x4);
3924 __ cvtdq2ps(result_reg, input_reg);
3928 case kFloat32x4Splat: {
3929 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
3930 XMMRegister input_reg = ToDoubleRegister(instr->value());
3931 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3932 XMMRegister xmm_scratch = xmm0;
3933 __ xorps(xmm_scratch, xmm_scratch);
3934 __ cvtsd2ss(xmm_scratch, input_reg);
3935 __ shufps(xmm_scratch, xmm_scratch, 0x0);
3936 __ movaps(result_reg, xmm_scratch);
3939 case kInt32x4Splat: {
3940 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3941 Register input_reg = ToRegister(instr->value());
3942 XMMRegister result_reg = ToInt32x4Register(instr->result());
3943 __ movd(result_reg, input_reg);
3944 __ shufps(result_reg, result_reg, 0x0);
3947 case kInt32x4GetSignMask: {
3948 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3949 XMMRegister input_reg = ToInt32x4Register(instr->value());
3950 Register result = ToRegister(instr->result());
3951 __ movmskps(result, input_reg);
3954 case kFloat32x4GetSignMask: {
3955 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3956 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3957 Register result = ToRegister(instr->result());
3958 __ movmskps(result, input_reg);
3961 case kFloat32x4GetW:
3963 case kFloat32x4GetZ:
3965 case kFloat32x4GetY:
3967 case kFloat32x4GetX: {
3968 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3969 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3970 XMMRegister result = ToDoubleRegister(instr->result());
3971 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
3973 if (select == 0x0) {
3974 __ xorps(xmm_scratch, xmm_scratch);
3975 __ cvtss2sd(xmm_scratch, input_reg);
3976 if (!xmm_scratch.is(result)) {
3977 __ movaps(result, xmm_scratch);
3980 __ pshufd(xmm_scratch, input_reg, select);
3981 if (!xmm_scratch.is(result)) {
3982 __ xorps(result, result);
3984 __ cvtss2sd(result, xmm_scratch);
3992 case kInt32x4GetFlagX:
3993 case kInt32x4GetFlagY:
3994 case kInt32x4GetFlagZ:
3995 case kInt32x4GetFlagW: {
3996 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3998 switch (instr->op()) {
3999 case kInt32x4GetFlagX:
4003 case kInt32x4GetFlagY:
4008 case kInt32x4GetFlagZ:
4013 case kInt32x4GetFlagW:
4022 XMMRegister input_reg = ToInt32x4Register(instr->value());
4023 Register result = ToRegister(instr->result());
4024 if (select == 0x0) {
4025 __ movd(result, input_reg);
4027 if (CpuFeatures::IsSupported(SSE4_1)) {
4028 CpuFeatureScope scope(masm(), SSE4_1);
4029 __ extractps(result, input_reg, select);
4031 XMMRegister xmm_scratch = xmm0;
4032 __ pshufd(xmm_scratch, input_reg, select);
4033 __ movd(result, xmm_scratch);
4038 Label false_value, done;
4039 __ testl(result, result);
4040 __ j(zero, &false_value, Label::kNear);
4041 __ LoadRoot(result, Heap::kTrueValueRootIndex);
4042 __ jmp(&done, Label::kNear);
4043 __ bind(&false_value);
4044 __ LoadRoot(result, Heap::kFalseValueRootIndex);
4056 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
4057 uint8_t imm8 = 0; // for with operation
4058 switch (instr->op()) {
4064 case kFloat32x4Max: {
4065 ASSERT(instr->left()->Equals(instr->result()));
4066 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4067 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4068 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4069 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4070 switch (instr->op()) {
4072 __ addps(left_reg, right_reg);
4075 __ subps(left_reg, right_reg);
4078 __ mulps(left_reg, right_reg);
4081 __ divps(left_reg, right_reg);
4084 __ minps(left_reg, right_reg);
4087 __ maxps(left_reg, right_reg);
4095 case kFloat32x4Scale: {
4096 ASSERT(instr->left()->Equals(instr->result()));
4097 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4098 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4099 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4100 XMMRegister right_reg = ToDoubleRegister(instr->right());
4101 XMMRegister scratch_reg = xmm0;
4102 __ xorps(scratch_reg, scratch_reg);
4103 __ cvtsd2ss(scratch_reg, right_reg);
4104 __ shufps(scratch_reg, scratch_reg, 0x0);
4105 __ mulps(left_reg, scratch_reg);
4108 case kFloat32x4Shuffle: {
4109 ASSERT(instr->left()->Equals(instr->result()));
4110 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4111 if (instr->hydrogen()->right()->IsConstant() &&
4112 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4113 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4114 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4115 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4116 __ shufps(left_reg, left_reg, select);
4119 Comment(";;; deoptimize: non-constant selector for shuffle");
4120 DeoptimizeIf(no_condition, instr->environment());
4124 case kInt32x4Shuffle: {
4125 ASSERT(instr->left()->Equals(instr->result()));
4126 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4127 if (instr->hydrogen()->right()->IsConstant() &&
4128 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4129 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4130 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4131 XMMRegister left_reg = ToInt32x4Register(instr->left());
4132 __ pshufd(left_reg, left_reg, select);
4135 Comment(";;; deoptimize: non-constant selector for shuffle");
4136 DeoptimizeIf(no_condition, instr->environment());
4140 case kInt32x4ShiftLeft:
4141 case kInt32x4ShiftRight:
4142 case kInt32x4ShiftRightArithmetic: {
4143 ASSERT(instr->left()->Equals(instr->result()));
4144 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4145 if (instr->hydrogen()->right()->IsConstant() &&
4146 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4147 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4148 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
4149 XMMRegister left_reg = ToInt32x4Register(instr->left());
4150 switch (instr->op()) {
4151 case kInt32x4ShiftLeft:
4152 __ pslld(left_reg, shift);
4154 case kInt32x4ShiftRight:
4155 __ psrld(left_reg, shift);
4157 case kInt32x4ShiftRightArithmetic:
4158 __ psrad(left_reg, shift);
4165 XMMRegister left_reg = ToInt32x4Register(instr->left());
4166 Register shift = ToRegister(instr->right());
4167 XMMRegister xmm_scratch = double_scratch0();
4168 __ movd(xmm_scratch, shift);
4169 switch (instr->op()) {
4170 case kInt32x4ShiftLeft:
4171 __ pslld(left_reg, xmm_scratch);
4173 case kInt32x4ShiftRight:
4174 __ psrld(left_reg, xmm_scratch);
4176 case kInt32x4ShiftRightArithmetic:
4177 __ psrad(left_reg, xmm_scratch);
4185 case kFloat32x4LessThan:
4186 case kFloat32x4LessThanOrEqual:
4187 case kFloat32x4Equal:
4188 case kFloat32x4NotEqual:
4189 case kFloat32x4GreaterThanOrEqual:
4190 case kFloat32x4GreaterThan: {
4191 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4192 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4193 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4194 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4195 XMMRegister result_reg = ToInt32x4Register(instr->result());
4196 switch (instr->op()) {
4197 case kFloat32x4LessThan:
4198 if (result_reg.is(left_reg)) {
4199 __ cmpltps(result_reg, right_reg);
4200 } else if (result_reg.is(right_reg)) {
4201 __ cmpnltps(result_reg, left_reg);
4203 __ movaps(result_reg, left_reg);
4204 __ cmpltps(result_reg, right_reg);
4207 case kFloat32x4LessThanOrEqual:
4208 if (result_reg.is(left_reg)) {
4209 __ cmpleps(result_reg, right_reg);
4210 } else if (result_reg.is(right_reg)) {
4211 __ cmpnleps(result_reg, left_reg);
4213 __ movaps(result_reg, left_reg);
4214 __ cmpleps(result_reg, right_reg);
4217 case kFloat32x4Equal:
4218 if (result_reg.is(left_reg)) {
4219 __ cmpeqps(result_reg, right_reg);
4220 } else if (result_reg.is(right_reg)) {
4221 __ cmpeqps(result_reg, left_reg);
4223 __ movaps(result_reg, left_reg);
4224 __ cmpeqps(result_reg, right_reg);
4227 case kFloat32x4NotEqual:
4228 if (result_reg.is(left_reg)) {
4229 __ cmpneqps(result_reg, right_reg);
4230 } else if (result_reg.is(right_reg)) {
4231 __ cmpneqps(result_reg, left_reg);
4233 __ movaps(result_reg, left_reg);
4234 __ cmpneqps(result_reg, right_reg);
4237 case kFloat32x4GreaterThanOrEqual:
4238 if (result_reg.is(left_reg)) {
4239 __ cmpnltps(result_reg, right_reg);
4240 } else if (result_reg.is(right_reg)) {
4241 __ cmpltps(result_reg, left_reg);
4243 __ movaps(result_reg, left_reg);
4244 __ cmpnltps(result_reg, right_reg);
4247 case kFloat32x4GreaterThan:
4248 if (result_reg.is(left_reg)) {
4249 __ cmpnleps(result_reg, right_reg);
4250 } else if (result_reg.is(right_reg)) {
4251 __ cmpleps(result_reg, left_reg);
4253 __ movaps(result_reg, left_reg);
4254 __ cmpnleps(result_reg, right_reg);
4269 case kInt32x4GreaterThan:
4271 case kInt32x4LessThan: {
4272 ASSERT(instr->left()->Equals(instr->result()));
4273 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4274 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
4275 XMMRegister left_reg = ToInt32x4Register(instr->left());
4276 XMMRegister right_reg = ToInt32x4Register(instr->right());
4277 switch (instr->op()) {
4279 __ andps(left_reg, right_reg);
4282 __ orps(left_reg, right_reg);
4285 __ xorps(left_reg, right_reg);
4288 __ paddd(left_reg, right_reg);
4291 __ psubd(left_reg, right_reg);
4294 if (CpuFeatures::IsSupported(SSE4_1)) {
4295 CpuFeatureScope scope(masm(), SSE4_1);
4296 __ pmulld(left_reg, right_reg);
4298 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
4299 XMMRegister xmm_scratch = xmm0;
4300 __ movaps(xmm_scratch, left_reg);
4301 __ pmuludq(left_reg, right_reg);
4302 __ psrldq(xmm_scratch, 4);
4303 __ psrldq(right_reg, 4);
4304 __ pmuludq(xmm_scratch, right_reg);
4305 __ pshufd(left_reg, left_reg, 8);
4306 __ pshufd(xmm_scratch, xmm_scratch, 8);
4307 __ punpackldq(left_reg, xmm_scratch);
4310 case kInt32x4GreaterThan:
4311 __ pcmpgtd(left_reg, right_reg);
4314 __ pcmpeqd(left_reg, right_reg);
4316 case kInt32x4LessThan: {
4317 XMMRegister xmm_scratch = xmm0;
4318 __ movaps(xmm_scratch, right_reg);
4319 __ pcmpgtd(xmm_scratch, left_reg);
4320 __ movaps(left_reg, xmm_scratch);
4329 case kFloat32x4WithW:
4331 case kFloat32x4WithZ:
4333 case kFloat32x4WithY:
4335 case kFloat32x4WithX: {
4336 ASSERT(instr->left()->Equals(instr->result()));
4337 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4338 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4339 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4340 XMMRegister right_reg = ToDoubleRegister(instr->right());
4341 XMMRegister xmm_scratch = xmm0;
4342 __ xorps(xmm_scratch, xmm_scratch);
4343 __ cvtsd2ss(xmm_scratch, right_reg);
4344 if (CpuFeatures::IsSupported(SSE4_1)) {
4346 CpuFeatureScope scope(masm(), SSE4_1);
4347 __ insertps(left_reg, xmm_scratch, imm8);
4349 __ subq(rsp, Immediate(kFloat32x4Size));
4350 __ movups(Operand(rsp, 0), left_reg);
4351 __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
4352 __ movups(left_reg, Operand(rsp, 0));
4353 __ addq(rsp, Immediate(kFloat32x4Size));
4363 case kInt32x4WithX: {
4364 ASSERT(instr->left()->Equals(instr->result()));
4365 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4366 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
4367 XMMRegister left_reg = ToInt32x4Register(instr->left());
4368 Register right_reg = ToRegister(instr->right());
4369 if (CpuFeatures::IsSupported(SSE4_1)) {
4370 CpuFeatureScope scope(masm(), SSE4_1);
4371 __ pinsrd(left_reg, right_reg, imm8);
4373 __ subq(rsp, Immediate(kInt32x4Size));
4374 __ movdqu(Operand(rsp, 0), left_reg);
4375 __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
4376 __ movdqu(left_reg, Operand(rsp, 0));
4377 __ addq(rsp, Immediate(kInt32x4Size));
4381 case kInt32x4WithFlagW:
4383 case kInt32x4WithFlagZ:
4385 case kInt32x4WithFlagY:
4387 case kInt32x4WithFlagX: {
4388 ASSERT(instr->left()->Equals(instr->result()));
4389 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4390 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
4391 HType type = instr->hydrogen()->right()->type();
4392 XMMRegister left_reg = ToInt32x4Register(instr->left());
4393 Register right_reg = ToRegister(instr->right());
4394 Label load_false_value, done;
4395 if (type.IsBoolean()) {
4396 __ subq(rsp, Immediate(kInt32x4Size));
4397 __ movups(Operand(rsp, 0), left_reg);
4398 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
4399 __ j(not_equal, &load_false_value, Label::kNear);
4401 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
4402 DeoptimizeIf(no_condition, instr->environment());
4406 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
4407 __ jmp(&done, Label::kNear);
4408 __ bind(&load_false_value);
4409 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
4411 __ movups(left_reg, Operand(rsp, 0));
4412 __ addq(rsp, Immediate(kInt32x4Size));
4422 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
4423 switch (instr->op()) {
4424 case kInt32x4Select: {
4425 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
4426 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4427 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4429 XMMRegister mask_reg = ToInt32x4Register(instr->first());
4430 XMMRegister left_reg = ToFloat32x4Register(instr->second());
4431 XMMRegister right_reg = ToFloat32x4Register(instr->third());
4432 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4433 XMMRegister temp_reg = xmm0;
4436 __ movaps(temp_reg, mask_reg);
4439 // temp_reg = temp_reg & falseValue.
4440 __ andps(temp_reg, right_reg);
4442 if (!result_reg.is(mask_reg)) {
4443 if (result_reg.is(left_reg)) {
4444 // result_reg = result_reg & trueValue.
4445 __ andps(result_reg, mask_reg);
4446 // out = result_reg | temp_reg.
4447 __ orps(result_reg, temp_reg);
4449 __ movaps(result_reg, mask_reg);
4450 // result_reg = result_reg & trueValue.
4451 __ andps(result_reg, left_reg);
4452 // out = result_reg | temp_reg.
4453 __ orps(result_reg, temp_reg);
4456 // result_reg = result_reg & trueValue.
4457 __ andps(result_reg, left_reg);
4458 // out = result_reg | temp_reg.
4459 __ orps(result_reg, temp_reg);
4463 case kFloat32x4ShuffleMix: {
4464 ASSERT(instr->first()->Equals(instr->result()));
4465 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4466 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4467 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
4468 if (instr->hydrogen()->third()->IsConstant() &&
4469 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
4470 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
4471 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4472 XMMRegister first_reg = ToFloat32x4Register(instr->first());
4473 XMMRegister second_reg = ToFloat32x4Register(instr->second());
4474 __ shufps(first_reg, second_reg, select);
4477 Comment(";;; deoptimize: non-constant selector for shuffle");
4478 DeoptimizeIf(no_condition, instr->environment());
4482 case kFloat32x4Clamp: {
4483 ASSERT(instr->first()->Equals(instr->result()));
4484 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4485 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4486 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4488 XMMRegister value_reg = ToFloat32x4Register(instr->first());
4489 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
4490 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
4491 __ minps(value_reg, upper_reg);
4492 __ maxps(value_reg, lower_reg);
4502 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
4503 switch (instr->op()) {
4504 case kFloat32x4Constructor: {
4505 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
4506 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
4507 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
4508 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
4509 XMMRegister x_reg = ToDoubleRegister(instr->x());
4510 XMMRegister y_reg = ToDoubleRegister(instr->y());
4511 XMMRegister z_reg = ToDoubleRegister(instr->z());
4512 XMMRegister w_reg = ToDoubleRegister(instr->w());
4513 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4514 __ subq(rsp, Immediate(kFloat32x4Size));
4515 __ xorps(xmm0, xmm0);
4516 __ cvtsd2ss(xmm0, x_reg);
4517 __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
4518 __ xorps(xmm0, xmm0);
4519 __ cvtsd2ss(xmm0, y_reg);
4520 __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
4521 __ xorps(xmm0, xmm0);
4522 __ cvtsd2ss(xmm0, z_reg);
4523 __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
4524 __ xorps(xmm0, xmm0);
4525 __ cvtsd2ss(xmm0, w_reg);
4526 __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
4527 __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
4528 __ addq(rsp, Immediate(kFloat32x4Size));
4531 case kInt32x4Constructor: {
4532 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
4533 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
4534 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
4535 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
4536 Register x_reg = ToRegister(instr->x());
4537 Register y_reg = ToRegister(instr->y());
4538 Register z_reg = ToRegister(instr->z());
4539 Register w_reg = ToRegister(instr->w());
4540 XMMRegister result_reg = ToInt32x4Register(instr->result());
4541 __ subq(rsp, Immediate(kInt32x4Size));
4542 __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
4543 __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
4544 __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
4545 __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
4546 __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
4547 __ addq(rsp, Immediate(kInt32x4Size));
4550 case kInt32x4Bool: {
4551 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
4552 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
4553 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
4554 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
4555 HType x_type = instr->hydrogen()->x()->type();
4556 HType y_type = instr->hydrogen()->y()->type();
4557 HType z_type = instr->hydrogen()->z()->type();
4558 HType w_type = instr->hydrogen()->w()->type();
4559 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
4560 !z_type.IsBoolean() || !w_type.IsBoolean()) {
4561 Comment(";;; deoptimize: other types for int32x4.bool.");
4562 DeoptimizeIf(no_condition, instr->environment());
4565 XMMRegister result_reg = ToInt32x4Register(instr->result());
4566 Register x_reg = ToRegister(instr->x());
4567 Register y_reg = ToRegister(instr->y());
4568 Register z_reg = ToRegister(instr->z());
4569 Register w_reg = ToRegister(instr->w());
4570 Label load_false_x, done_x, load_false_y, done_y,
4571 load_false_z, done_z, load_false_w, done_w;
4572 __ subq(rsp, Immediate(kInt32x4Size));
4574 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
4575 __ j(not_equal, &load_false_x, Label::kNear);
4576 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
4577 __ jmp(&done_x, Label::kNear);
4578 __ bind(&load_false_x);
4579 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
4582 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
4583 __ j(not_equal, &load_false_y, Label::kNear);
4584 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
4585 __ jmp(&done_y, Label::kNear);
4586 __ bind(&load_false_y);
4587 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
4590 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
4591 __ j(not_equal, &load_false_z, Label::kNear);
4592 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
4593 __ jmp(&done_z, Label::kNear);
4594 __ bind(&load_false_z);
4595 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
4598 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
4599 __ j(not_equal, &load_false_w, Label::kNear);
4600 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
4601 __ jmp(&done_w, Label::kNear);
4602 __ bind(&load_false_w);
4603 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
4606 __ movups(result_reg, Operand(rsp, 0));
4607 __ addq(rsp, Immediate(kInt32x4Size));
4617 void LCodeGen::DoPower(LPower* instr) {
4618 Representation exponent_type = instr->hydrogen()->right()->representation();
4619 // Having marked this as a call, we can use any registers.
4620 // Just make sure that the input/output registers are the expected ones.
4622 Register exponent = rdx;
4623 ASSERT(!instr->right()->IsRegister() ||
4624 ToRegister(instr->right()).is(exponent));
4625 ASSERT(!instr->right()->IsDoubleRegister() ||
4626 ToDoubleRegister(instr->right()).is(xmm1));
4627 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4628 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4630 if (exponent_type.IsSmi()) {
4631 MathPowStub stub(MathPowStub::TAGGED);
4633 } else if (exponent_type.IsTagged()) {
4635 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
4636 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
4637 DeoptimizeIf(not_equal, instr->environment());
4639 MathPowStub stub(MathPowStub::TAGGED);
4641 } else if (exponent_type.IsInteger32()) {
4642 MathPowStub stub(MathPowStub::INTEGER);
4645 ASSERT(exponent_type.IsDouble());
4646 MathPowStub stub(MathPowStub::DOUBLE);
4652 void LCodeGen::DoMathExp(LMathExp* instr) {
4653 XMMRegister input = ToDoubleRegister(instr->value());
4654 XMMRegister result = ToDoubleRegister(instr->result());
4655 XMMRegister temp0 = double_scratch0();
4656 Register temp1 = ToRegister(instr->temp1());
4657 Register temp2 = ToRegister(instr->temp2());
4659 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4663 void LCodeGen::DoMathLog(LMathLog* instr) {
4664 ASSERT(instr->value()->Equals(instr->result()));
4665 XMMRegister input_reg = ToDoubleRegister(instr->value());
4666 XMMRegister xmm_scratch = double_scratch0();
4667 Label positive, done, zero;
4668 __ xorps(xmm_scratch, xmm_scratch);
4669 __ ucomisd(input_reg, xmm_scratch);
4670 __ j(above, &positive, Label::kNear);
4671 __ j(not_carry, &zero, Label::kNear);
4672 ExternalReference nan =
4673 ExternalReference::address_of_canonical_non_hole_nan();
4674 Operand nan_operand = masm()->ExternalOperand(nan);
4675 __ movsd(input_reg, nan_operand);
4676 __ jmp(&done, Label::kNear);
4678 ExternalReference ninf =
4679 ExternalReference::address_of_negative_infinity();
4680 Operand ninf_operand = masm()->ExternalOperand(ninf);
4681 __ movsd(input_reg, ninf_operand);
4682 __ jmp(&done, Label::kNear);
4685 __ subp(rsp, Immediate(kDoubleSize));
4686 __ movsd(Operand(rsp, 0), input_reg);
4687 __ fld_d(Operand(rsp, 0));
4689 __ fstp_d(Operand(rsp, 0));
4690 __ movsd(input_reg, Operand(rsp, 0));
4691 __ addp(rsp, Immediate(kDoubleSize));
4696 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4697 Register input = ToRegister(instr->value());
4698 Register result = ToRegister(instr->result());
4699 Label not_zero_input;
4700 __ bsrl(result, input);
4702 __ j(not_zero, ¬_zero_input);
4703 __ Set(result, 63); // 63^31 == 32
4705 __ bind(¬_zero_input);
4706 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4710 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4711 ASSERT(ToRegister(instr->context()).is(rsi));
4712 ASSERT(ToRegister(instr->function()).is(rdi));
4713 ASSERT(instr->HasPointerMap());
4715 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4716 if (known_function.is_null()) {
4717 LPointerMap* pointers = instr->pointer_map();
4718 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4719 ParameterCount count(instr->arity());
4720 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4722 CallKnownFunction(known_function,
4723 instr->hydrogen()->formal_parameter_count(),
4726 RDI_CONTAINS_TARGET);
4731 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4732 ASSERT(ToRegister(instr->context()).is(rsi));
4733 ASSERT(ToRegister(instr->function()).is(rdi));
4734 ASSERT(ToRegister(instr->result()).is(rax));
4736 int arity = instr->arity();
4737 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4738 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4742 void LCodeGen::DoCallNew(LCallNew* instr) {
4743 ASSERT(ToRegister(instr->context()).is(rsi));
4744 ASSERT(ToRegister(instr->constructor()).is(rdi));
4745 ASSERT(ToRegister(instr->result()).is(rax));
4747 __ Set(rax, instr->arity());
4748 // No cell in ebx for construct type feedback in optimized code
4749 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4750 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4751 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4755 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4756 ASSERT(ToRegister(instr->context()).is(rsi));
4757 ASSERT(ToRegister(instr->constructor()).is(rdi));
4758 ASSERT(ToRegister(instr->result()).is(rax));
4760 __ Set(rax, instr->arity());
4761 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4762 ElementsKind kind = instr->hydrogen()->elements_kind();
4763 AllocationSiteOverrideMode override_mode =
4764 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4765 ? DISABLE_ALLOCATION_SITES
4768 if (instr->arity() == 0) {
4769 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4770 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4771 } else if (instr->arity() == 1) {
4773 if (IsFastPackedElementsKind(kind)) {
4775 // We might need a change here
4776 // look at the first argument
4777 __ movp(rcx, Operand(rsp, 0));
4779 __ j(zero, &packed_case, Label::kNear);
4781 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4782 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4783 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4784 __ jmp(&done, Label::kNear);
4785 __ bind(&packed_case);
4788 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4789 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4792 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4793 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4798 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4799 ASSERT(ToRegister(instr->context()).is(rsi));
4800 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4804 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4805 Register function = ToRegister(instr->function());
4806 Register code_object = ToRegister(instr->code_object());
4807 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4808 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4812 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4813 Register result = ToRegister(instr->result());
4814 Register base = ToRegister(instr->base_object());
4815 if (instr->offset()->IsConstantOperand()) {
4816 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4817 __ leap(result, Operand(base, ToInteger32(offset)));
4819 Register offset = ToRegister(instr->offset());
4820 __ leap(result, Operand(base, offset, times_1, 0));
4825 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4826 HStoreNamedField* hinstr = instr->hydrogen();
4827 Representation representation = instr->representation();
4829 HObjectAccess access = hinstr->access();
4830 int offset = access.offset();
4832 if (access.IsExternalMemory()) {
4833 ASSERT(!hinstr->NeedsWriteBarrier());
4834 Register value = ToRegister(instr->value());
4835 if (instr->object()->IsConstantOperand()) {
4836 ASSERT(value.is(rax));
4837 LConstantOperand* object = LConstantOperand::cast(instr->object());
4838 __ store_rax(ToExternalReference(object));
4840 Register object = ToRegister(instr->object());
4841 __ Store(MemOperand(object, offset), value, representation);
4846 Register object = ToRegister(instr->object());
4847 Handle<Map> transition = instr->transition();
4848 SmiCheck check_needed = hinstr->value()->IsHeapObject()
4849 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4851 ASSERT(!(representation.IsSmi() &&
4852 instr->value()->IsConstantOperand() &&
4853 !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
4854 if (representation.IsHeapObject()) {
4855 if (instr->value()->IsConstantOperand()) {
4856 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4857 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
4858 DeoptimizeIf(no_condition, instr->environment());
4861 if (!hinstr->value()->type().IsHeapObject()) {
4862 Register value = ToRegister(instr->value());
4863 Condition cc = masm()->CheckSmi(value);
4864 DeoptimizeIf(cc, instr->environment());
4866 // We know that value is a smi now, so we can omit the check below.
4867 check_needed = OMIT_SMI_CHECK;
4870 } else if (representation.IsDouble()) {
4871 ASSERT(transition.is_null());
4872 ASSERT(access.IsInobject());
4873 ASSERT(!hinstr->NeedsWriteBarrier());
4874 XMMRegister value = ToDoubleRegister(instr->value());
4875 __ movsd(FieldOperand(object, offset), value);
4879 if (!transition.is_null()) {
4880 if (!hinstr->NeedsWriteBarrierForMap()) {
4881 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4883 Register temp = ToRegister(instr->temp());
4884 __ Move(kScratchRegister, transition);
4885 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4886 // Update the write barrier for the map field.
4887 __ RecordWriteField(object,
4888 HeapObject::kMapOffset,
4892 OMIT_REMEMBERED_SET,
4898 Register write_register = object;
4899 if (!access.IsInobject()) {
4900 write_register = ToRegister(instr->temp());
4901 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4904 if (representation.IsSmi() &&
4905 hinstr->value()->representation().IsInteger32()) {
4906 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4908 Register scratch = kScratchRegister;
4909 __ Load(scratch, FieldOperand(write_register, offset), representation);
4910 __ AssertSmi(scratch);
4912 // Store int value directly to upper half of the smi.
4913 STATIC_ASSERT(kSmiTag == 0);
4914 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4915 offset += kPointerSize / 2;
4916 representation = Representation::Integer32();
4919 Operand operand = FieldOperand(write_register, offset);
4921 if (instr->value()->IsRegister()) {
4922 Register value = ToRegister(instr->value());
4923 __ Store(operand, value, representation);
4925 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4926 if (IsInteger32Constant(operand_value)) {
4927 ASSERT(!hinstr->NeedsWriteBarrier());
4928 int32_t value = ToInteger32(operand_value);
4929 if (representation.IsSmi()) {
4930 __ Move(operand, Smi::FromInt(value));
4933 __ movl(operand, Immediate(value));
4937 Handle<Object> handle_value = ToHandle(operand_value);
4938 ASSERT(!hinstr->NeedsWriteBarrier());
4939 __ Move(operand, handle_value);
4943 if (hinstr->NeedsWriteBarrier()) {
4944 Register value = ToRegister(instr->value());
4945 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4946 // Update the write barrier for the object for in-object properties.
4947 __ RecordWriteField(write_register,
4952 EMIT_REMEMBERED_SET,
4958 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4959 ASSERT(ToRegister(instr->context()).is(rsi));
4960 ASSERT(ToRegister(instr->object()).is(rdx));
4961 ASSERT(ToRegister(instr->value()).is(rax));
4963 __ Move(rcx, instr->hydrogen()->name());
4964 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4965 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4969 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4970 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4972 __ j(NegateCondition(cc), &done, Label::kNear);
4976 DeoptimizeIf(cc, check->environment());
4981 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4982 HBoundsCheck* hinstr = instr->hydrogen();
4983 if (hinstr->skip_check()) return;
4985 Representation representation = hinstr->length()->representation();
4986 ASSERT(representation.Equals(hinstr->index()->representation()));
4987 ASSERT(representation.IsSmiOrInteger32());
4989 if (instr->length()->IsRegister()) {
4990 Register reg = ToRegister(instr->length());
4992 if (instr->index()->IsConstantOperand()) {
4993 int32_t constant_index =
4994 ToInteger32(LConstantOperand::cast(instr->index()));
4995 if (representation.IsSmi()) {
4996 __ Cmp(reg, Smi::FromInt(constant_index));
4998 __ cmpl(reg, Immediate(constant_index));
5001 Register reg2 = ToRegister(instr->index());
5002 if (representation.IsSmi()) {
5009 Operand length = ToOperand(instr->length());
5010 if (instr->index()->IsConstantOperand()) {
5011 int32_t constant_index =
5012 ToInteger32(LConstantOperand::cast(instr->index()));
5013 if (representation.IsSmi()) {
5014 __ Cmp(length, Smi::FromInt(constant_index));
5016 __ cmpl(length, Immediate(constant_index));
5019 if (representation.IsSmi()) {
5020 __ cmpp(length, ToRegister(instr->index()));
5022 __ cmpl(length, ToRegister(instr->index()));
5026 Condition condition = hinstr->allow_equality() ? below : below_equal;
5027 ApplyCheckIf(condition, instr);
5031 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
5032 ElementsKind elements_kind = instr->elements_kind();
5033 LOperand* key = instr->key();
5034 if (!key->IsConstantOperand()) {
5035 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
5037 int base_offset = instr->is_fixed_typed_array()
5038 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
5040 Operand operand(BuildFastArrayOperand(
5045 instr->additional_index()));
5047 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
5048 elements_kind == FLOAT32_ELEMENTS) {
5049 XMMRegister value(ToDoubleRegister(instr->value()));
5050 __ cvtsd2ss(value, value);
5051 __ movss(operand, value);
5052 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
5053 elements_kind == FLOAT64_ELEMENTS) {
5054 __ movsd(operand, ToDoubleRegister(instr->value()));
5055 } else if (IsSIMD128ElementsKind(elements_kind)) {
5056 __ movups(operand, ToSIMD128Register(instr->value()));
5058 Register value(ToRegister(instr->value()));
5059 switch (elements_kind) {
5060 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5061 case EXTERNAL_INT8_ELEMENTS:
5062 case EXTERNAL_UINT8_ELEMENTS:
5064 case UINT8_ELEMENTS:
5065 case UINT8_CLAMPED_ELEMENTS:
5066 __ movb(operand, value);
5068 case EXTERNAL_INT16_ELEMENTS:
5069 case EXTERNAL_UINT16_ELEMENTS:
5070 case INT16_ELEMENTS:
5071 case UINT16_ELEMENTS:
5072 __ movw(operand, value);
5074 case EXTERNAL_INT32_ELEMENTS:
5075 case EXTERNAL_UINT32_ELEMENTS:
5076 case INT32_ELEMENTS:
5077 case UINT32_ELEMENTS:
5078 __ movl(operand, value);
5080 case EXTERNAL_FLOAT32_ELEMENTS:
5081 case EXTERNAL_FLOAT32x4_ELEMENTS:
5082 case EXTERNAL_INT32x4_ELEMENTS:
5083 case EXTERNAL_FLOAT64_ELEMENTS:
5084 case FLOAT32_ELEMENTS:
5085 case FLOAT64_ELEMENTS:
5086 case FLOAT32x4_ELEMENTS:
5087 case INT32x4_ELEMENTS:
5089 case FAST_SMI_ELEMENTS:
5090 case FAST_DOUBLE_ELEMENTS:
5091 case FAST_HOLEY_ELEMENTS:
5092 case FAST_HOLEY_SMI_ELEMENTS:
5093 case FAST_HOLEY_DOUBLE_ELEMENTS:
5094 case DICTIONARY_ELEMENTS:
5095 case SLOPPY_ARGUMENTS_ELEMENTS:
5103 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
5104 XMMRegister value = ToDoubleRegister(instr->value());
5105 LOperand* key = instr->key();
5106 if (instr->NeedsCanonicalization()) {
5109 __ ucomisd(value, value);
5110 __ j(parity_odd, &have_value, Label::kNear); // NaN.
5112 __ Set(kScratchRegister, BitCast<uint64_t>(
5113 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
5114 __ movq(value, kScratchRegister);
5116 __ bind(&have_value);
5119 Operand double_store_operand = BuildFastArrayOperand(
5122 FAST_DOUBLE_ELEMENTS,
5123 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
5124 instr->additional_index());
5126 __ movsd(double_store_operand, value);
5130 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
5131 HStoreKeyed* hinstr = instr->hydrogen();
5132 LOperand* key = instr->key();
5133 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
5134 Representation representation = hinstr->value()->representation();
5136 if (representation.IsInteger32()) {
5137 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5138 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
5140 Register scratch = kScratchRegister;
5142 BuildFastArrayOperand(instr->elements(),
5146 instr->additional_index()),
5147 Representation::Smi());
5148 __ AssertSmi(scratch);
5150 // Store int value directly to upper half of the smi.
5151 STATIC_ASSERT(kSmiTag == 0);
5152 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5153 offset += kPointerSize / 2;
5157 BuildFastArrayOperand(instr->elements(),
5161 instr->additional_index());
5163 if (instr->value()->IsRegister()) {
5164 __ Store(operand, ToRegister(instr->value()), representation);
5166 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5167 if (IsInteger32Constant(operand_value)) {
5168 int32_t value = ToInteger32(operand_value);
5169 if (representation.IsSmi()) {
5170 __ Move(operand, Smi::FromInt(value));
5173 __ movl(operand, Immediate(value));
5176 Handle<Object> handle_value = ToHandle(operand_value);
5177 __ Move(operand, handle_value);
5181 if (hinstr->NeedsWriteBarrier()) {
5182 Register elements = ToRegister(instr->elements());
5183 ASSERT(instr->value()->IsRegister());
5184 Register value = ToRegister(instr->value());
5185 ASSERT(!key->IsConstantOperand());
5186 SmiCheck check_needed = hinstr->value()->IsHeapObject()
5187 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5188 // Compute address of modified element and store it into key register.
5189 Register key_reg(ToRegister(key));
5190 __ leap(key_reg, operand);
5191 __ RecordWrite(elements,
5195 EMIT_REMEMBERED_SET,
5201 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
5202 if (instr->is_typed_elements()) {
5203 DoStoreKeyedExternalArray(instr);
5204 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
5205 DoStoreKeyedFixedDoubleArray(instr);
5207 DoStoreKeyedFixedArray(instr);
5212 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5213 ASSERT(ToRegister(instr->context()).is(rsi));
5214 ASSERT(ToRegister(instr->object()).is(rdx));
5215 ASSERT(ToRegister(instr->key()).is(rcx));
5216 ASSERT(ToRegister(instr->value()).is(rax));
5218 Handle<Code> ic = instr->strict_mode() == STRICT
5219 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5220 : isolate()->builtins()->KeyedStoreIC_Initialize();
5221 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5225 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5226 Register object_reg = ToRegister(instr->object());
5228 Handle<Map> from_map = instr->original_map();
5229 Handle<Map> to_map = instr->transitioned_map();
5230 ElementsKind from_kind = instr->from_kind();
5231 ElementsKind to_kind = instr->to_kind();
5233 Label not_applicable;
5234 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
5235 __ j(not_equal, ¬_applicable);
5236 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5237 Register new_map_reg = ToRegister(instr->new_map_temp());
5238 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
5239 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
5241 ASSERT_NE(instr->temp(), NULL);
5242 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
5243 ToRegister(instr->temp()), kDontSaveFPRegs);
5245 ASSERT(ToRegister(instr->context()).is(rsi));
5246 PushSafepointRegistersScope scope(this);
5247 if (!object_reg.is(rax)) {
5248 __ movp(rax, object_reg);
5250 __ Move(rbx, to_map);
5251 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5252 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
5254 RecordSafepointWithRegisters(
5255 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5257 __ bind(¬_applicable);
5261 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5262 Register object = ToRegister(instr->object());
5263 Register temp = ToRegister(instr->temp());
5264 Label no_memento_found;
5265 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
5266 DeoptimizeIf(equal, instr->environment());
5267 __ bind(&no_memento_found);
5271 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5272 ASSERT(ToRegister(instr->context()).is(rsi));
5273 ASSERT(ToRegister(instr->left()).is(rdx));
5274 ASSERT(ToRegister(instr->right()).is(rax));
5275 StringAddStub stub(instr->hydrogen()->flags(),
5276 instr->hydrogen()->pretenure_flag());
5277 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5281 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5282 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
5284 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5285 : LDeferredCode(codegen), instr_(instr) { }
5286 virtual void Generate() V8_OVERRIDE {
5287 codegen()->DoDeferredStringCharCodeAt(instr_);
5289 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5291 LStringCharCodeAt* instr_;
5294 DeferredStringCharCodeAt* deferred =
5295 new(zone()) DeferredStringCharCodeAt(this, instr);
5297 StringCharLoadGenerator::Generate(masm(),
5298 ToRegister(instr->string()),
5299 ToRegister(instr->index()),
5300 ToRegister(instr->result()),
5302 __ bind(deferred->exit());
5306 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5307 Register string = ToRegister(instr->string());
5308 Register result = ToRegister(instr->result());
5310 // TODO(3095996): Get rid of this. For now, we need to make the
5311 // result register contain a valid pointer because it is already
5312 // contained in the register pointer map.
5315 PushSafepointRegistersScope scope(this);
5317 // Push the index as a smi. This is safe because of the checks in
5318 // DoStringCharCodeAt above.
5319 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5320 if (instr->index()->IsConstantOperand()) {
5321 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
5322 __ Push(Smi::FromInt(const_index));
5324 Register index = ToRegister(instr->index());
5325 __ Integer32ToSmi(index, index);
5328 CallRuntimeFromDeferred(
5329 Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
5331 __ SmiToInteger32(rax, rax);
5332 __ StoreToSafepointRegisterSlot(result, rax);
5336 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5337 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5339 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5340 : LDeferredCode(codegen), instr_(instr) { }
5341 virtual void Generate() V8_OVERRIDE {
5342 codegen()->DoDeferredStringCharFromCode(instr_);
5344 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5346 LStringCharFromCode* instr_;
5349 DeferredStringCharFromCode* deferred =
5350 new(zone()) DeferredStringCharFromCode(this, instr);
5352 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5353 Register char_code = ToRegister(instr->char_code());
5354 Register result = ToRegister(instr->result());
5355 ASSERT(!char_code.is(result));
5357 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
5358 __ j(above, deferred->entry());
5359 __ movsxlq(char_code, char_code);
5360 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5361 __ movp(result, FieldOperand(result,
5362 char_code, times_pointer_size,
5363 FixedArray::kHeaderSize));
5364 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5365 __ j(equal, deferred->entry());
5366 __ bind(deferred->exit());
5370 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5371 Register char_code = ToRegister(instr->char_code());
5372 Register result = ToRegister(instr->result());
5374 // TODO(3095996): Get rid of this. For now, we need to make the
5375 // result register contain a valid pointer because it is already
5376 // contained in the register pointer map.
5379 PushSafepointRegistersScope scope(this);
5380 __ Integer32ToSmi(char_code, char_code);
5382 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5383 __ StoreToSafepointRegisterSlot(result, rax);
5387 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5388 LOperand* input = instr->value();
5389 ASSERT(input->IsRegister() || input->IsStackSlot());
5390 LOperand* output = instr->result();
5391 ASSERT(output->IsDoubleRegister());
5392 if (input->IsRegister()) {
5393 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
5395 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
5400 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5401 LOperand* input = instr->value();
5402 LOperand* output = instr->result();
5403 LOperand* temp = instr->temp();
5405 __ LoadUint32(ToDoubleRegister(output),
5407 ToDoubleRegister(temp));
5411 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5412 LOperand* input = instr->value();
5413 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5414 Register reg = ToRegister(input);
5416 __ Integer32ToSmi(reg, reg);
5420 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5421 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5423 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5424 : LDeferredCode(codegen), instr_(instr) { }
5425 virtual void Generate() V8_OVERRIDE {
5426 codegen()->DoDeferredNumberTagU(instr_);
5428 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5430 LNumberTagU* instr_;
5433 LOperand* input = instr->value();
5434 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5435 Register reg = ToRegister(input);
5437 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
5438 __ cmpl(reg, Immediate(Smi::kMaxValue));
5439 __ j(above, deferred->entry());
5440 __ Integer32ToSmi(reg, reg);
5441 __ bind(deferred->exit());
5445 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
5447 Register reg = ToRegister(instr->value());
5448 Register tmp = ToRegister(instr->temp1());
5449 XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
5451 // Load value into temp_xmm which will be preserved across potential call to
5452 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
5453 // XMM registers on x64).
5454 XMMRegister xmm_scratch = double_scratch0();
5455 __ LoadUint32(temp_xmm, reg, xmm_scratch);
5457 if (FLAG_inline_new) {
5458 __ AllocateHeapNumber(reg, tmp, &slow);
5459 __ jmp(&done, Label::kNear);
5462 // Slow case: Call the runtime system to do the number allocation.
5465 // Put a valid pointer value in the stack slot where the result
5466 // register is stored, as this register is in the pointer map, but contains
5467 // an integer value.
5470 // Preserve the value of all registers.
5471 PushSafepointRegistersScope scope(this);
5473 // NumberTagU uses the context from the frame, rather than
5474 // the environment's HContext or HInlinedContext value.
5475 // They only call Runtime::kHiddenAllocateHeapNumber.
5476 // The corresponding HChange instructions are added in a phase that does
5477 // not have easy access to the local context.
5478 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5479 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5480 RecordSafepointWithRegisters(
5481 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5482 __ StoreToSafepointRegisterSlot(reg, rax);
5485 // Done. Put the value in temp_xmm into the value of the allocated heap
5488 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
5492 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5493 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5495 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5496 : LDeferredCode(codegen), instr_(instr) { }
5497 virtual void Generate() V8_OVERRIDE {
5498 codegen()->DoDeferredNumberTagD(instr_);
5500 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5502 LNumberTagD* instr_;
5505 XMMRegister input_reg = ToDoubleRegister(instr->value());
5506 Register reg = ToRegister(instr->result());
5507 Register tmp = ToRegister(instr->temp());
5509 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
5510 if (FLAG_inline_new) {
5511 __ AllocateHeapNumber(reg, tmp, deferred->entry());
5513 __ jmp(deferred->entry());
5515 __ bind(deferred->exit());
5516 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5520 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5521 // TODO(3095996): Get rid of this. For now, we need to make the
5522 // result register contain a valid pointer because it is already
5523 // contained in the register pointer map.
5524 Register reg = ToRegister(instr->result());
5525 __ Move(reg, Smi::FromInt(0));
5528 PushSafepointRegistersScope scope(this);
5529 // NumberTagD uses the context from the frame, rather than
5530 // the environment's HContext or HInlinedContext value.
5531 // They only call Runtime::kHiddenAllocateHeapNumber.
5532 // The corresponding HChange instructions are added in a phase that does
5533 // not have easy access to the local context.
5534 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5535 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5536 RecordSafepointWithRegisters(
5537 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5538 __ movp(kScratchRegister, rax);
5540 __ movp(reg, kScratchRegister);
5544 void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
5545 Runtime::FunctionId id) {
5546 // TODO(3095996): Get rid of this. For now, we need to make the
5547 // result register contain a valid pointer because it is already
5548 // contained in the register pointer map.
5549 Register reg = ToRegister(instr->result());
5550 __ Move(reg, Smi::FromInt(0));
5553 PushSafepointRegistersScope scope(this);
5554 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5555 __ CallRuntimeSaveDoubles(id);
5556 RecordSafepointWithRegisters(
5557 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5558 __ movp(kScratchRegister, rax);
5560 __ movp(reg, kScratchRegister);
5565 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
5566 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
5568 DeferredSIMD128ToTagged(LCodeGen* codegen,
5569 LSIMD128ToTagged* instr,
5570 Runtime::FunctionId id)
5571 : LDeferredCode(codegen), instr_(instr), id_(id) { }
5572 virtual void Generate() V8_OVERRIDE {
5573 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
5575 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5577 LSIMD128ToTagged* instr_;
5578 Runtime::FunctionId id_;
5581 XMMRegister input_reg = ToSIMD128Register(instr->value());
5582 Register reg = ToRegister(instr->result());
5583 Register tmp = ToRegister(instr->temp());
5585 DeferredSIMD128ToTagged* deferred =
5586 new(zone()) DeferredSIMD128ToTagged(this, instr,
5587 static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
5588 if (FLAG_inline_new) {
5589 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
5590 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5592 __ jmp(deferred->entry());
5594 __ bind(deferred->exit());
5595 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
5599 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
5600 if (instr->value()->IsFloat32x4Register()) {
5601 HandleSIMD128ToTagged<Float32x4>(instr);
5603 ASSERT(instr->value()->IsInt32x4Register());
5604 HandleSIMD128ToTagged<Int32x4>(instr);
5609 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5610 HChange* hchange = instr->hydrogen();
5611 Register input = ToRegister(instr->value());
5612 Register output = ToRegister(instr->result());
5613 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5614 hchange->value()->CheckFlag(HValue::kUint32)) {
5615 __ testl(input, input);
5616 DeoptimizeIf(sign, instr->environment());
5618 __ Integer32ToSmi(output, input);
5619 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5620 !hchange->value()->CheckFlag(HValue::kUint32)) {
5621 DeoptimizeIf(overflow, instr->environment());
5626 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5627 ASSERT(instr->value()->Equals(instr->result()));
5628 Register input = ToRegister(instr->value());
5629 if (instr->needs_check()) {
5630 Condition is_smi = __ CheckSmi(input);
5631 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5633 __ AssertSmi(input);
5635 __ SmiToInteger32(input, input);
5639 void LCodeGen::EmitNumberUntagD(Register input_reg,
5640 XMMRegister result_reg,
5641 bool can_convert_undefined_to_nan,
5642 bool deoptimize_on_minus_zero,
5644 NumberUntagDMode mode) {
5645 Label convert, load_smi, done;
5647 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5649 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5651 // Heap number map check.
5652 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5653 Heap::kHeapNumberMapRootIndex);
5655 // On x64 it is safe to load at heap number offset before evaluating the map
5656 // check, since all heap objects are at least two words long.
5657 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5659 if (can_convert_undefined_to_nan) {
5660 __ j(not_equal, &convert, Label::kNear);
5662 DeoptimizeIf(not_equal, env);
5665 if (deoptimize_on_minus_zero) {
5666 XMMRegister xmm_scratch = double_scratch0();
5667 __ xorps(xmm_scratch, xmm_scratch);
5668 __ ucomisd(xmm_scratch, result_reg);
5669 __ j(not_equal, &done, Label::kNear);
5670 __ movmskpd(kScratchRegister, result_reg);
5671 __ testq(kScratchRegister, Immediate(1));
5672 DeoptimizeIf(not_zero, env);
5674 __ jmp(&done, Label::kNear);
5676 if (can_convert_undefined_to_nan) {
5679 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5680 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5681 DeoptimizeIf(not_equal, env);
5683 __ xorps(result_reg, result_reg);
5684 __ divsd(result_reg, result_reg);
5685 __ jmp(&done, Label::kNear);
5688 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5691 // Smi to XMM conversion
5693 __ SmiToInteger32(kScratchRegister, input_reg);
5694 __ Cvtlsi2sd(result_reg, kScratchRegister);
5699 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5700 Register input_reg = ToRegister(instr->value());
5702 if (instr->truncating()) {
5703 Label no_heap_number, check_bools, check_false;
5705 // Heap number map check.
5706 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5707 Heap::kHeapNumberMapRootIndex);
5708 __ j(not_equal, &no_heap_number, Label::kNear);
5709 __ TruncateHeapNumberToI(input_reg, input_reg);
5712 __ bind(&no_heap_number);
5713 // Check for Oddballs. Undefined/False is converted to zero and True to one
5714 // for truncating conversions.
5715 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5716 __ j(not_equal, &check_bools, Label::kNear);
5717 __ Set(input_reg, 0);
5720 __ bind(&check_bools);
5721 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5722 __ j(not_equal, &check_false, Label::kNear);
5723 __ Set(input_reg, 1);
5726 __ bind(&check_false);
5727 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5728 __ RecordComment("Deferred TaggedToI: cannot truncate");
5729 DeoptimizeIf(not_equal, instr->environment());
5730 __ Set(input_reg, 0);
5734 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5735 __ TaggedToI(input_reg, input_reg, xmm_temp,
5736 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5740 DeoptimizeIf(no_condition, instr->environment());
5745 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5746 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5748 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5749 : LDeferredCode(codegen), instr_(instr) { }
5750 virtual void Generate() V8_OVERRIDE {
5751 codegen()->DoDeferredTaggedToI(instr_, done());
5753 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5758 LOperand* input = instr->value();
5759 ASSERT(input->IsRegister());
5760 ASSERT(input->Equals(instr->result()));
5761 Register input_reg = ToRegister(input);
5763 if (instr->hydrogen()->value()->representation().IsSmi()) {
5764 __ SmiToInteger32(input_reg, input_reg);
5766 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5767 __ JumpIfNotSmi(input_reg, deferred->entry());
5768 __ SmiToInteger32(input_reg, input_reg);
5769 __ bind(deferred->exit());
5774 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5775 LOperand* input = instr->value();
5776 ASSERT(input->IsRegister());
5777 LOperand* result = instr->result();
5778 ASSERT(result->IsDoubleRegister());
5780 Register input_reg = ToRegister(input);
5781 XMMRegister result_reg = ToDoubleRegister(result);
5783 HValue* value = instr->hydrogen()->value();
5784 NumberUntagDMode mode = value->representation().IsSmi()
5785 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5787 EmitNumberUntagD(input_reg, result_reg,
5788 instr->hydrogen()->can_convert_undefined_to_nan(),
5789 instr->hydrogen()->deoptimize_on_minus_zero(),
5790 instr->environment(),
5796 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
5797 LOperand* input = instr->value();
5798 ASSERT(input->IsRegister());
5799 LOperand* result = instr->result();
5800 ASSERT(result->IsSIMD128Register());
5802 Register input_reg = ToRegister(input);
5803 XMMRegister result_reg = ToSIMD128Register(result);
5805 Condition cc = masm()->CheckSmi(input_reg);
5806 DeoptimizeIf(cc, instr->environment());
5807 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5808 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5809 DeoptimizeIf(not_equal, instr->environment());
5810 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
5814 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
5815 if (instr->representation().IsFloat32x4()) {
5816 HandleTaggedToSIMD128<Float32x4>(instr);
5818 ASSERT(instr->representation().IsInt32x4());
5819 HandleTaggedToSIMD128<Int32x4>(instr);
5824 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5825 LOperand* input = instr->value();
5826 ASSERT(input->IsDoubleRegister());
5827 LOperand* result = instr->result();
5828 ASSERT(result->IsRegister());
5830 XMMRegister input_reg = ToDoubleRegister(input);
5831 Register result_reg = ToRegister(result);
5833 if (instr->truncating()) {
5834 __ TruncateDoubleToI(result_reg, input_reg);
5836 Label bailout, done;
5837 XMMRegister xmm_scratch = double_scratch0();
5838 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5839 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5841 __ jmp(&done, Label::kNear);
5843 DeoptimizeIf(no_condition, instr->environment());
5849 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5850 LOperand* input = instr->value();
5851 ASSERT(input->IsDoubleRegister());
5852 LOperand* result = instr->result();
5853 ASSERT(result->IsRegister());
5855 XMMRegister input_reg = ToDoubleRegister(input);
5856 Register result_reg = ToRegister(result);
5858 Label bailout, done;
5859 XMMRegister xmm_scratch = double_scratch0();
5860 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5861 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5863 __ jmp(&done, Label::kNear);
5865 DeoptimizeIf(no_condition, instr->environment());
5868 __ Integer32ToSmi(result_reg, result_reg);
5869 DeoptimizeIf(overflow, instr->environment());
5873 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5874 LOperand* input = instr->value();
5875 Condition cc = masm()->CheckSmi(ToRegister(input));
5876 DeoptimizeIf(NegateCondition(cc), instr->environment());
5880 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5881 if (!instr->hydrogen()->value()->IsHeapObject()) {
5882 LOperand* input = instr->value();
5883 Condition cc = masm()->CheckSmi(ToRegister(input));
5884 DeoptimizeIf(cc, instr->environment());
5889 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5890 Register input = ToRegister(instr->value());
5892 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5894 if (instr->hydrogen()->is_interval_check()) {
5897 instr->hydrogen()->GetCheckInterval(&first, &last);
5899 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5900 Immediate(static_cast<int8_t>(first)));
5902 // If there is only one type in the interval check for equality.
5903 if (first == last) {
5904 DeoptimizeIf(not_equal, instr->environment());
5906 DeoptimizeIf(below, instr->environment());
5907 // Omit check for the last type.
5908 if (last != LAST_TYPE) {
5909 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5910 Immediate(static_cast<int8_t>(last)));
5911 DeoptimizeIf(above, instr->environment());
5917 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5919 if (IsPowerOf2(mask)) {
5920 ASSERT(tag == 0 || IsPowerOf2(tag));
5921 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5923 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5925 __ movzxbl(kScratchRegister,
5926 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5927 __ andb(kScratchRegister, Immediate(mask));
5928 __ cmpb(kScratchRegister, Immediate(tag));
5929 DeoptimizeIf(not_equal, instr->environment());
5935 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5936 Register reg = ToRegister(instr->value());
5937 __ Cmp(reg, instr->hydrogen()->object().handle());
5938 DeoptimizeIf(not_equal, instr->environment());
5942 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5944 PushSafepointRegistersScope scope(this);
5947 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5948 RecordSafepointWithRegisters(
5949 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5951 __ testp(rax, Immediate(kSmiTagMask));
5953 DeoptimizeIf(zero, instr->environment());
5957 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5958 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5960 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5961 : LDeferredCode(codegen), instr_(instr), object_(object) {
5962 SetExit(check_maps());
5964 virtual void Generate() V8_OVERRIDE {
5965 codegen()->DoDeferredInstanceMigration(instr_, object_);
5967 Label* check_maps() { return &check_maps_; }
5968 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5975 if (instr->hydrogen()->CanOmitMapChecks()) return;
5977 LOperand* input = instr->value();
5978 ASSERT(input->IsRegister());
5979 Register reg = ToRegister(input);
5981 DeferredCheckMaps* deferred = NULL;
5982 if (instr->hydrogen()->has_migration_target()) {
5983 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5984 __ bind(deferred->check_maps());
5987 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5989 for (int i = 0; i < map_set.size() - 1; i++) {
5990 Handle<Map> map = map_set.at(i).handle();
5991 __ CompareMap(reg, map);
5992 __ j(equal, &success, Label::kNear);
5995 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5996 __ CompareMap(reg, map);
5997 if (instr->hydrogen()->has_migration_target()) {
5998 __ j(not_equal, deferred->entry());
6000 DeoptimizeIf(not_equal, instr->environment());
6007 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
6008 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
6009 XMMRegister xmm_scratch = double_scratch0();
6010 Register result_reg = ToRegister(instr->result());
6011 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
6015 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
6016 ASSERT(instr->unclamped()->Equals(instr->result()));
6017 Register value_reg = ToRegister(instr->result());
6018 __ ClampUint8(value_reg);
6022 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
6023 ASSERT(instr->unclamped()->Equals(instr->result()));
6024 Register input_reg = ToRegister(instr->unclamped());
6025 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
6026 XMMRegister xmm_scratch = double_scratch0();
6027 Label is_smi, done, heap_number;
6028 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
6029 __ JumpIfSmi(input_reg, &is_smi, dist);
6031 // Check for heap number
6032 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
6033 factory()->heap_number_map());
6034 __ j(equal, &heap_number, Label::kNear);
6036 // Check for undefined. Undefined is converted to zero for clamping
6038 __ Cmp(input_reg, factory()->undefined_value());
6039 DeoptimizeIf(not_equal, instr->environment());
6040 __ xorl(input_reg, input_reg);
6041 __ jmp(&done, Label::kNear);
6044 __ bind(&heap_number);
6045 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
6046 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
6047 __ jmp(&done, Label::kNear);
6051 __ SmiToInteger32(input_reg, input_reg);
6052 __ ClampUint8(input_reg);
6058 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
6059 XMMRegister value_reg = ToDoubleRegister(instr->value());
6060 Register result_reg = ToRegister(instr->result());
6061 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
6062 __ movq(result_reg, value_reg);
6063 __ shr(result_reg, Immediate(32));
6065 __ movd(result_reg, value_reg);
6070 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
6071 Register hi_reg = ToRegister(instr->hi());
6072 Register lo_reg = ToRegister(instr->lo());
6073 XMMRegister result_reg = ToDoubleRegister(instr->result());
6074 XMMRegister xmm_scratch = double_scratch0();
6075 __ movd(result_reg, hi_reg);
6076 __ psllq(result_reg, 32);
6077 __ movd(xmm_scratch, lo_reg);
6078 __ orps(result_reg, xmm_scratch);
6082 void LCodeGen::DoAllocate(LAllocate* instr) {
6083 class DeferredAllocate V8_FINAL : public LDeferredCode {
6085 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
6086 : LDeferredCode(codegen), instr_(instr) { }
6087 virtual void Generate() V8_OVERRIDE {
6088 codegen()->DoDeferredAllocate(instr_);
6090 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6095 DeferredAllocate* deferred =
6096 new(zone()) DeferredAllocate(this, instr);
6098 Register result = ToRegister(instr->result());
6099 Register temp = ToRegister(instr->temp());
6101 // Allocate memory for the object.
6102 AllocationFlags flags = TAG_OBJECT;
6103 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
6104 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6106 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6107 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6108 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6109 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6110 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6111 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6112 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6115 if (instr->size()->IsConstantOperand()) {
6116 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6117 if (size <= Page::kMaxRegularHeapObjectSize) {
6118 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6120 __ jmp(deferred->entry());
6123 Register size = ToRegister(instr->size());
6124 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6127 __ bind(deferred->exit());
6129 if (instr->hydrogen()->MustPrefillWithFiller()) {
6130 if (instr->size()->IsConstantOperand()) {
6131 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6132 __ movl(temp, Immediate((size / kPointerSize) - 1));
6134 temp = ToRegister(instr->size());
6135 __ sar(temp, Immediate(kPointerSizeLog2));
6140 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
6141 isolate()->factory()->one_pointer_filler_map());
6143 __ j(not_zero, &loop);
6148 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6149 Register result = ToRegister(instr->result());
6151 // TODO(3095996): Get rid of this. For now, we need to make the
6152 // result register contain a valid pointer because it is already
6153 // contained in the register pointer map.
6154 __ Move(result, Smi::FromInt(0));
6156 PushSafepointRegistersScope scope(this);
6157 if (instr->size()->IsRegister()) {
6158 Register size = ToRegister(instr->size());
6159 ASSERT(!size.is(result));
6160 __ Integer32ToSmi(size, size);
6163 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6164 __ Push(Smi::FromInt(size));
6168 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6169 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6170 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6171 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6172 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6173 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6174 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6176 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6178 __ Push(Smi::FromInt(flags));
6180 CallRuntimeFromDeferred(
6181 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
6182 __ StoreToSafepointRegisterSlot(result, rax);
6186 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6187 ASSERT(ToRegister(instr->value()).is(rax));
6189 CallRuntime(Runtime::kToFastProperties, 1, instr);
6193 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6194 ASSERT(ToRegister(instr->context()).is(rsi));
6196 // Registers will be used as follows:
6197 // rcx = literals array.
6198 // rbx = regexp literal.
6199 // rax = regexp literal clone.
6200 int literal_offset =
6201 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6202 __ Move(rcx, instr->hydrogen()->literals());
6203 __ movp(rbx, FieldOperand(rcx, literal_offset));
6204 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
6205 __ j(not_equal, &materialized, Label::kNear);
6207 // Create regexp literal using runtime function
6208 // Result will be in rax.
6210 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
6211 __ Push(instr->hydrogen()->pattern());
6212 __ Push(instr->hydrogen()->flags());
6213 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
6216 __ bind(&materialized);
6217 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6218 Label allocated, runtime_allocate;
6219 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
6220 __ jmp(&allocated, Label::kNear);
6222 __ bind(&runtime_allocate);
6224 __ Push(Smi::FromInt(size));
6225 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
6228 __ bind(&allocated);
6229 // Copy the content into the newly allocated memory.
6230 // (Unroll copy loop once for better throughput).
6231 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6232 __ movp(rdx, FieldOperand(rbx, i));
6233 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
6234 __ movp(FieldOperand(rax, i), rdx);
6235 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
6237 if ((size % (2 * kPointerSize)) != 0) {
6238 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
6239 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
6244 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6245 ASSERT(ToRegister(instr->context()).is(rsi));
6246 // Use the fast case closure allocation code that allocates in new
6247 // space for nested functions that don't need literals cloning.
6248 bool pretenure = instr->hydrogen()->pretenure();
6249 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6250 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
6251 instr->hydrogen()->is_generator());
6252 __ Move(rbx, instr->hydrogen()->shared_info());
6253 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6256 __ Push(instr->hydrogen()->shared_info());
6257 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
6258 Heap::kFalseValueRootIndex);
6259 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6264 void LCodeGen::DoTypeof(LTypeof* instr) {
6265 ASSERT(ToRegister(instr->context()).is(rsi));
6266 LOperand* input = instr->value();
6267 EmitPushTaggedOperand(input);
6268 CallRuntime(Runtime::kTypeof, 1, instr);
6272 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
6273 ASSERT(!operand->IsDoubleRegister());
6274 if (operand->IsConstantOperand()) {
6275 __ Push(ToHandle(LConstantOperand::cast(operand)));
6276 } else if (operand->IsRegister()) {
6277 __ Push(ToRegister(operand));
6279 __ Push(ToOperand(operand));
6284 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6285 Register input = ToRegister(instr->value());
6286 Condition final_branch_condition = EmitTypeofIs(instr, input);
6287 if (final_branch_condition != no_condition) {
6288 EmitBranch(instr, final_branch_condition);
6293 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6294 Label* true_label = instr->TrueLabel(chunk_);
6295 Label* false_label = instr->FalseLabel(chunk_);
6296 Handle<String> type_name = instr->type_literal();
6297 int left_block = instr->TrueDestination(chunk_);
6298 int right_block = instr->FalseDestination(chunk_);
6299 int next_block = GetNextEmittedBlock();
6301 Label::Distance true_distance = left_block == next_block ? Label::kNear
6303 Label::Distance false_distance = right_block == next_block ? Label::kNear
6305 Condition final_branch_condition = no_condition;
6306 if (type_name->Equals(heap()->number_string())) {
6307 __ JumpIfSmi(input, true_label, true_distance);
6308 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
6309 Heap::kHeapNumberMapRootIndex);
6311 final_branch_condition = equal;
6313 } else if (type_name->Equals(heap()->float32x4_string())) {
6314 __ JumpIfSmi(input, false_label, false_distance);
6315 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6316 final_branch_condition = equal;
6318 } else if (type_name->Equals(heap()->int32x4_string())) {
6319 __ JumpIfSmi(input, false_label, false_distance);
6320 __ CmpObjectType(input, INT32x4_TYPE, input);
6321 final_branch_condition = equal;
6323 } else if (type_name->Equals(heap()->string_string())) {
6324 __ JumpIfSmi(input, false_label, false_distance);
6325 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6326 __ j(above_equal, false_label, false_distance);
6327 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6328 Immediate(1 << Map::kIsUndetectable));
6329 final_branch_condition = zero;
6331 } else if (type_name->Equals(heap()->symbol_string())) {
6332 __ JumpIfSmi(input, false_label, false_distance);
6333 __ CmpObjectType(input, SYMBOL_TYPE, input);
6334 final_branch_condition = equal;
6336 } else if (type_name->Equals(heap()->boolean_string())) {
6337 __ CompareRoot(input, Heap::kTrueValueRootIndex);
6338 __ j(equal, true_label, true_distance);
6339 __ CompareRoot(input, Heap::kFalseValueRootIndex);
6340 final_branch_condition = equal;
6342 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6343 __ CompareRoot(input, Heap::kNullValueRootIndex);
6344 final_branch_condition = equal;
6346 } else if (type_name->Equals(heap()->undefined_string())) {
6347 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
6348 __ j(equal, true_label, true_distance);
6349 __ JumpIfSmi(input, false_label, false_distance);
6350 // Check for undetectable objects => true.
6351 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
6352 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6353 Immediate(1 << Map::kIsUndetectable));
6354 final_branch_condition = not_zero;
6356 } else if (type_name->Equals(heap()->function_string())) {
6357 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6358 __ JumpIfSmi(input, false_label, false_distance);
6359 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6360 __ j(equal, true_label, true_distance);
6361 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6362 final_branch_condition = equal;
6364 } else if (type_name->Equals(heap()->object_string())) {
6365 __ JumpIfSmi(input, false_label, false_distance);
6366 if (!FLAG_harmony_typeof) {
6367 __ CompareRoot(input, Heap::kNullValueRootIndex);
6368 __ j(equal, true_label, true_distance);
6370 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6371 __ j(below, false_label, false_distance);
6372 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6373 __ j(above, false_label, false_distance);
6374 // Check for undetectable objects => false.
6375 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6376 Immediate(1 << Map::kIsUndetectable));
6377 final_branch_condition = zero;
6380 __ jmp(false_label, false_distance);
6383 return final_branch_condition;
6387 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6388 Register temp = ToRegister(instr->temp());
6390 EmitIsConstructCall(temp);
6391 EmitBranch(instr, equal);
6395 void LCodeGen::EmitIsConstructCall(Register temp) {
6396 // Get the frame pointer for the calling frame.
6397 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6399 // Skip the arguments adaptor frame if it exists.
6400 Label check_frame_marker;
6401 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6402 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6403 __ j(not_equal, &check_frame_marker, Label::kNear);
6404 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6406 // Check the marker in the calling frame.
6407 __ bind(&check_frame_marker);
6408 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6409 Smi::FromInt(StackFrame::CONSTRUCT));
6413 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6414 if (!info()->IsStub()) {
6415 // Ensure that we have enough space after the previous lazy-bailout
6416 // instruction for patching the code here.
6417 int current_pc = masm()->pc_offset();
6418 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6419 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6420 __ Nop(padding_size);
6423 last_lazy_deopt_pc_ = masm()->pc_offset();
6427 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6428 last_lazy_deopt_pc_ = masm()->pc_offset();
6429 ASSERT(instr->HasEnvironment());
6430 LEnvironment* env = instr->environment();
6431 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6432 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6436 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6437 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6438 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6439 // needed return address), even though the implementation of LAZY and EAGER is
6440 // now identical. When LAZY is eventually completely folded into EAGER, remove
6441 // the special case below.
6442 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6443 type = Deoptimizer::LAZY;
6446 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6447 DeoptimizeIf(no_condition, instr->environment(), type);
6451 void LCodeGen::DoDummy(LDummy* instr) {
6452 // Nothing to see here, move on!
6456 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6457 // Nothing to see here, move on!
6461 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6462 PushSafepointRegistersScope scope(this);
6463 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6464 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6465 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
6466 ASSERT(instr->HasEnvironment());
6467 LEnvironment* env = instr->environment();
6468 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6472 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6473 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6475 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6476 : LDeferredCode(codegen), instr_(instr) { }
6477 virtual void Generate() V8_OVERRIDE {
6478 codegen()->DoDeferredStackCheck(instr_);
6480 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6482 LStackCheck* instr_;
6485 ASSERT(instr->HasEnvironment());
6486 LEnvironment* env = instr->environment();
6487 // There is no LLazyBailout instruction for stack-checks. We have to
6488 // prepare for lazy deoptimization explicitly here.
6489 if (instr->hydrogen()->is_function_entry()) {
6490 // Perform stack overflow check.
6492 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6493 __ j(above_equal, &done, Label::kNear);
6495 ASSERT(instr->context()->IsRegister());
6496 ASSERT(ToRegister(instr->context()).is(rsi));
6497 CallCode(isolate()->builtins()->StackCheck(),
6498 RelocInfo::CODE_TARGET,
6502 ASSERT(instr->hydrogen()->is_backwards_branch());
6503 // Perform stack overflow check if this goto needs it before jumping.
6504 DeferredStackCheck* deferred_stack_check =
6505 new(zone()) DeferredStackCheck(this, instr);
6506 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6507 __ j(below, deferred_stack_check->entry());
6508 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6509 __ bind(instr->done_label());
6510 deferred_stack_check->SetExit(instr->done_label());
6511 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6512 // Don't record a deoptimization index for the safepoint here.
6513 // This will be done explicitly when emitting call and the safepoint in
6514 // the deferred code.
6519 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6520 // This is a pseudo-instruction that ensures that the environment here is
6521 // properly registered for deoptimization and records the assembler's PC
6523 LEnvironment* environment = instr->environment();
6525 // If the environment were already registered, we would have no way of
6526 // backpatching it with the spill slot operands.
6527 ASSERT(!environment->HasBeenRegistered());
6528 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6530 GenerateOsrPrologue();
6534 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6535 ASSERT(ToRegister(instr->context()).is(rsi));
6536 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
6537 DeoptimizeIf(equal, instr->environment());
6539 Register null_value = rdi;
6540 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6541 __ cmpp(rax, null_value);
6542 DeoptimizeIf(equal, instr->environment());
6544 Condition cc = masm()->CheckSmi(rax);
6545 DeoptimizeIf(cc, instr->environment());
6547 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6548 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
6549 DeoptimizeIf(below_equal, instr->environment());
6551 Label use_cache, call_runtime;
6552 __ CheckEnumCache(null_value, &call_runtime);
6554 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
6555 __ jmp(&use_cache, Label::kNear);
6557 // Get the set of properties to enumerate.
6558 __ bind(&call_runtime);
6560 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6562 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
6563 Heap::kMetaMapRootIndex);
6564 DeoptimizeIf(not_equal, instr->environment());
6565 __ bind(&use_cache);
6569 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6570 Register map = ToRegister(instr->map());
6571 Register result = ToRegister(instr->result());
6572 Label load_cache, done;
6573 __ EnumLength(result, map);
6574 __ Cmp(result, Smi::FromInt(0));
6575 __ j(not_equal, &load_cache, Label::kNear);
6576 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
6577 __ jmp(&done, Label::kNear);
6578 __ bind(&load_cache);
6579 __ LoadInstanceDescriptors(map, result);
6581 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6583 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6585 Condition cc = masm()->CheckSmi(result);
6586 DeoptimizeIf(cc, instr->environment());
6590 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6591 Register object = ToRegister(instr->value());
6592 __ cmpp(ToRegister(instr->map()),
6593 FieldOperand(object, HeapObject::kMapOffset));
6594 DeoptimizeIf(not_equal, instr->environment());
6598 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6599 Register object = ToRegister(instr->object());
6600 Register index = ToRegister(instr->index());
6602 Label out_of_object, done;
6603 __ SmiToInteger32(index, index);
6604 __ cmpl(index, Immediate(0));
6605 __ j(less, &out_of_object, Label::kNear);
6606 __ movp(object, FieldOperand(object,
6609 JSObject::kHeaderSize));
6610 __ jmp(&done, Label::kNear);
6612 __ bind(&out_of_object);
6613 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6615 // Index is now equal to out of object property index plus 1.
6616 __ movp(object, FieldOperand(object,
6619 FixedArray::kHeaderSize - kPointerSize));
6626 } } // namespace v8::internal
6628 #endif // V8_TARGET_ARCH_X64