1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "x64/lithium-codegen-x64.h"
10 #include "code-stubs.h"
11 #include "stub-cache.h"
12 #include "hydrogen-osr.h"
18 // When invoking builtins, we need to record the safepoint in the middle of
19 // the invoke instruction sequence generated by the macro assembler.
20 class SafepointGenerator V8_FINAL : public CallWrapper {
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
28 virtual ~SafepointGenerator() {}
30 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
32 virtual void AfterCall() const V8_OVERRIDE {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
45 bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk());
50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // MANUAL indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::MANUAL);
55 return GeneratePrologue() &&
57 GenerateDeferredCode() &&
58 GenerateJumpTable() &&
59 GenerateSafepointTable();
63 void LCodeGen::FinishCode(Handle<Code> code) {
65 code->set_stack_slots(GetStackSlotCount());
66 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
67 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
68 PopulateDeoptimizationData(code);
73 void LCodeGen::MakeSureStackPagesMapped(int offset) {
74 const int kPageSize = 4 * KB;
75 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
76 __ movp(Operand(rsp, offset), rax);
82 void LCodeGen::SaveCallerDoubles() {
83 ASSERT(info()->saves_caller_doubles());
84 ASSERT(NeedsEagerFrame());
85 Comment(";;; Save clobbered callee double registers");
87 BitVector* doubles = chunk()->allocated_double_registers();
88 BitVector::Iterator save_iterator(doubles);
89 while (!save_iterator.Done()) {
90 __ movsd(MemOperand(rsp, count * kDoubleSize),
91 XMMRegister::FromAllocationIndex(save_iterator.Current()));
92 save_iterator.Advance();
98 void LCodeGen::RestoreCallerDoubles() {
99 ASSERT(info()->saves_caller_doubles());
100 ASSERT(NeedsEagerFrame());
101 Comment(";;; Restore clobbered callee double registers");
102 BitVector* doubles = chunk()->allocated_double_registers();
103 BitVector::Iterator save_iterator(doubles);
105 while (!save_iterator.Done()) {
106 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
107 MemOperand(rsp, count * kDoubleSize));
108 save_iterator.Advance();
114 bool LCodeGen::GeneratePrologue() {
115 ASSERT(is_generating());
117 if (info()->IsOptimizing()) {
118 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
121 if (strlen(FLAG_stop_at) > 0 &&
122 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
127 // Sloppy mode functions need to replace the receiver with the global proxy
128 // when called as functions (without an explicit receiver object).
129 if (info_->this_has_uses() &&
130 info_->strict_mode() == SLOPPY &&
131 !info_->is_native()) {
133 StackArgumentsAccessor args(rsp, scope()->num_parameters());
134 __ movp(rcx, args.GetReceiverOperand());
136 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
137 __ j(not_equal, &ok, Label::kNear);
139 __ movp(rcx, GlobalObjectOperand());
140 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
142 __ movp(args.GetReceiverOperand(), rcx);
148 info()->set_prologue_offset(masm_->pc_offset());
149 if (NeedsEagerFrame()) {
150 ASSERT(!frame_is_built_);
151 frame_is_built_ = true;
152 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
153 info()->AddNoFrameRange(0, masm_->pc_offset());
156 // Reserve space for the stack slots needed by the code.
157 int slots = GetStackSlotCount();
159 if (FLAG_debug_code) {
160 __ subp(rsp, Immediate(slots * kPointerSize));
162 MakeSureStackPagesMapped(slots * kPointerSize);
166 __ movq(kScratchRegister, kSlotsZapValue);
169 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
172 __ j(not_zero, &loop);
175 __ subp(rsp, Immediate(slots * kPointerSize));
177 MakeSureStackPagesMapped(slots * kPointerSize);
181 if (info()->saves_caller_doubles()) {
186 // Possibly allocate a local context.
187 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
188 if (heap_slots > 0) {
189 Comment(";;; Allocate local context");
190 // Argument to NewContext is the function, which is still in rdi.
191 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
192 FastNewContextStub stub(isolate(), heap_slots);
196 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
198 RecordSafepoint(Safepoint::kNoLazyDeopt);
199 // Context is returned in rax. It replaces the context passed to us.
200 // It's saved in the stack and kept live in rsi.
202 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
204 // Copy any necessary parameters into the context.
205 int num_parameters = scope()->num_parameters();
206 for (int i = 0; i < num_parameters; i++) {
207 Variable* var = scope()->parameter(i);
208 if (var->IsContextSlot()) {
209 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
210 (num_parameters - 1 - i) * kPointerSize;
211 // Load parameter from stack.
212 __ movp(rax, Operand(rbp, parameter_offset));
213 // Store it in the context.
214 int context_offset = Context::SlotOffset(var->index());
215 __ movp(Operand(rsi, context_offset), rax);
216 // Update the write barrier. This clobbers rax and rbx.
217 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
220 Comment(";;; End allocate local context");
224 if (FLAG_trace && info()->IsOptimizing()) {
225 __ CallRuntime(Runtime::kTraceEnter, 0);
227 return !is_aborted();
231 void LCodeGen::GenerateOsrPrologue() {
232 // Generate the OSR entry prologue at the first unknown OSR value, or if there
233 // are none, at the OSR entrypoint instruction.
234 if (osr_pc_offset_ >= 0) return;
236 osr_pc_offset_ = masm()->pc_offset();
238 // Adjust the frame size, subsuming the unoptimized frame into the
240 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
242 __ subp(rsp, Immediate(slots * kPointerSize));
246 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
247 if (instr->IsCall()) {
248 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
250 if (!instr->IsLazyBailout() && !instr->IsGap()) {
251 safepoints_.BumpLastLazySafepointIndex();
256 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
257 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
258 instr->hydrogen_value()->representation().IsInteger32() &&
259 instr->result()->IsRegister()) {
260 __ AssertZeroExtended(ToRegister(instr->result()));
263 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
264 if (instr->result()->IsRegister()) {
265 Register result_reg = ToRegister(instr->result());
266 __ movsxlq(result_reg, result_reg);
268 // Sign extend the 32bit result in the stack slots.
269 ASSERT(instr->result()->IsStackSlot());
270 Operand src = ToOperand(instr->result());
271 __ movsxlq(kScratchRegister, src);
272 __ movq(src, kScratchRegister);
278 bool LCodeGen::GenerateJumpTable() {
280 if (jump_table_.length() > 0) {
281 Comment(";;; -------------------- Jump table --------------------");
283 for (int i = 0; i < jump_table_.length(); i++) {
284 __ bind(&jump_table_[i].label);
285 Address entry = jump_table_[i].address;
286 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
287 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
288 if (id == Deoptimizer::kNotDeoptimizationEntry) {
289 Comment(";;; jump table entry %d.", i);
291 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
293 if (jump_table_[i].needs_frame) {
294 ASSERT(!info()->saves_caller_doubles());
295 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
296 if (needs_frame.is_bound()) {
297 __ jmp(&needs_frame);
299 __ bind(&needs_frame);
300 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
304 // This variant of deopt can only be used with stubs. Since we don't
305 // have a function pointer to install in the stack frame that we're
306 // building, install a special marker there instead.
307 ASSERT(info()->IsStub());
308 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
310 __ movp(rsi, MemOperand(rsp, kPointerSize));
311 __ call(kScratchRegister);
314 if (info()->saves_caller_doubles()) {
315 ASSERT(info()->IsStub());
316 RestoreCallerDoubles();
318 __ call(entry, RelocInfo::RUNTIME_ENTRY);
321 return !is_aborted();
325 bool LCodeGen::GenerateDeferredCode() {
326 ASSERT(is_generating());
327 if (deferred_.length() > 0) {
328 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
329 LDeferredCode* code = deferred_[i];
332 instructions_->at(code->instruction_index())->hydrogen_value();
333 RecordAndWritePosition(
334 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
336 Comment(";;; <@%d,#%d> "
337 "-------------------- Deferred %s --------------------",
338 code->instruction_index(),
339 code->instr()->hydrogen_value()->id(),
340 code->instr()->Mnemonic());
341 __ bind(code->entry());
342 if (NeedsDeferredFrame()) {
343 Comment(";;; Build frame");
344 ASSERT(!frame_is_built_);
345 ASSERT(info()->IsStub());
346 frame_is_built_ = true;
347 // Build the frame in such a way that esi isn't trashed.
348 __ pushq(rbp); // Caller's frame pointer.
349 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
350 __ Push(Smi::FromInt(StackFrame::STUB));
351 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
352 Comment(";;; Deferred code");
355 if (NeedsDeferredFrame()) {
356 __ bind(code->done());
357 Comment(";;; Destroy frame");
358 ASSERT(frame_is_built_);
359 frame_is_built_ = false;
363 __ jmp(code->exit());
367 // Deferred code is the last part of the instruction sequence. Mark
368 // the generated code as done unless we bailed out.
369 if (!is_aborted()) status_ = DONE;
370 return !is_aborted();
374 bool LCodeGen::GenerateSafepointTable() {
376 safepoints_.Emit(masm(), GetStackSlotCount());
377 return !is_aborted();
381 Register LCodeGen::ToRegister(int index) const {
382 return Register::FromAllocationIndex(index);
386 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
387 return XMMRegister::FromAllocationIndex(index);
391 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
392 return XMMRegister::FromAllocationIndex(index);
396 Register LCodeGen::ToRegister(LOperand* op) const {
397 ASSERT(op->IsRegister());
398 return ToRegister(op->index());
402 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
403 ASSERT(op->IsDoubleRegister());
404 return ToDoubleRegister(op->index());
408 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
409 ASSERT(op->IsFloat32x4Register());
410 return ToSIMD128Register(op->index());
414 XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
415 ASSERT(op->IsFloat64x2Register());
416 return ToSIMD128Register(op->index());
420 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
421 ASSERT(op->IsInt32x4Register());
422 return ToSIMD128Register(op->index());
426 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
427 ASSERT(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
428 op->IsInt32x4Register());
429 return ToSIMD128Register(op->index());
433 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
434 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
438 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
439 return op->IsConstantOperand() &&
440 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
444 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
445 return chunk_->LookupLiteralRepresentation(op).IsSmi();
449 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
450 HConstant* constant = chunk_->LookupConstant(op);
451 return constant->Integer32Value();
455 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
456 HConstant* constant = chunk_->LookupConstant(op);
457 return Smi::FromInt(constant->Integer32Value());
461 double LCodeGen::ToDouble(LConstantOperand* op) const {
462 HConstant* constant = chunk_->LookupConstant(op);
463 ASSERT(constant->HasDoubleValue());
464 return constant->DoubleValue();
468 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
469 HConstant* constant = chunk_->LookupConstant(op);
470 ASSERT(constant->HasExternalReferenceValue());
471 return constant->ExternalReferenceValue();
475 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
476 HConstant* constant = chunk_->LookupConstant(op);
477 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
478 return constant->handle(isolate());
482 static int ArgumentsOffsetWithoutFrame(int index) {
484 return -(index + 1) * kPointerSize + kPCOnStackSize;
488 Operand LCodeGen::ToOperand(LOperand* op) const {
489 // Does not handle registers. In X64 assembler, plain registers are not
490 // representable as an Operand.
491 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
492 op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
493 op->IsInt32x4StackSlot());
494 if (NeedsEagerFrame()) {
495 return Operand(rbp, StackSlotOffset(op->index()));
497 // Retrieve parameter without eager stack-frame relative to the
499 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
504 void LCodeGen::WriteTranslation(LEnvironment* environment,
505 Translation* translation) {
506 if (environment == NULL) return;
508 // The translation includes one command per value in the environment.
509 int translation_size = environment->translation_size();
510 // The output frame height does not include the parameters.
511 int height = translation_size - environment->parameter_count();
513 WriteTranslation(environment->outer(), translation);
514 bool has_closure_id = !info()->closure().is_null() &&
515 !info()->closure().is_identical_to(environment->closure());
516 int closure_id = has_closure_id
517 ? DefineDeoptimizationLiteral(environment->closure())
518 : Translation::kSelfLiteralId;
520 switch (environment->frame_type()) {
522 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
525 translation->BeginConstructStubFrame(closure_id, translation_size);
528 ASSERT(translation_size == 1);
530 translation->BeginGetterStubFrame(closure_id);
533 ASSERT(translation_size == 2);
535 translation->BeginSetterStubFrame(closure_id);
537 case ARGUMENTS_ADAPTOR:
538 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
541 translation->BeginCompiledStubFrame();
545 int object_index = 0;
546 int dematerialized_index = 0;
547 for (int i = 0; i < translation_size; ++i) {
548 LOperand* value = environment->values()->at(i);
549 AddToTranslation(environment,
552 environment->HasTaggedValueAt(i),
553 environment->HasUint32ValueAt(i),
555 &dematerialized_index);
560 void LCodeGen::AddToTranslation(LEnvironment* environment,
561 Translation* translation,
565 int* object_index_pointer,
566 int* dematerialized_index_pointer) {
567 if (op == LEnvironment::materialization_marker()) {
568 int object_index = (*object_index_pointer)++;
569 if (environment->ObjectIsDuplicateAt(object_index)) {
570 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
571 translation->DuplicateObject(dupe_of);
574 int object_length = environment->ObjectLengthAt(object_index);
575 if (environment->ObjectIsArgumentsAt(object_index)) {
576 translation->BeginArgumentsObject(object_length);
578 translation->BeginCapturedObject(object_length);
580 int dematerialized_index = *dematerialized_index_pointer;
581 int env_offset = environment->translation_size() + dematerialized_index;
582 *dematerialized_index_pointer += object_length;
583 for (int i = 0; i < object_length; ++i) {
584 LOperand* value = environment->values()->at(env_offset + i);
585 AddToTranslation(environment,
588 environment->HasTaggedValueAt(env_offset + i),
589 environment->HasUint32ValueAt(env_offset + i),
590 object_index_pointer,
591 dematerialized_index_pointer);
596 if (op->IsStackSlot()) {
598 translation->StoreStackSlot(op->index());
599 } else if (is_uint32) {
600 translation->StoreUint32StackSlot(op->index());
602 translation->StoreInt32StackSlot(op->index());
604 } else if (op->IsDoubleStackSlot()) {
605 translation->StoreDoubleStackSlot(op->index());
606 } else if (op->IsFloat32x4StackSlot()) {
607 translation->StoreSIMD128StackSlot(op->index(),
608 Translation::FLOAT32x4_STACK_SLOT);
609 } else if (op->IsFloat64x2StackSlot()) {
610 translation->StoreSIMD128StackSlot(op->index(),
611 Translation::FLOAT64x2_STACK_SLOT);
612 } else if (op->IsInt32x4StackSlot()) {
613 translation->StoreSIMD128StackSlot(op->index(),
614 Translation::INT32x4_STACK_SLOT);
615 } else if (op->IsRegister()) {
616 Register reg = ToRegister(op);
618 translation->StoreRegister(reg);
619 } else if (is_uint32) {
620 translation->StoreUint32Register(reg);
622 translation->StoreInt32Register(reg);
624 } else if (op->IsDoubleRegister()) {
625 XMMRegister reg = ToDoubleRegister(op);
626 translation->StoreDoubleRegister(reg);
627 } else if (op->IsFloat32x4Register()) {
628 XMMRegister reg = ToFloat32x4Register(op);
629 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
630 } else if (op->IsFloat64x2Register()) {
631 XMMRegister reg = ToFloat64x2Register(op);
632 translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
633 } else if (op->IsInt32x4Register()) {
634 XMMRegister reg = ToInt32x4Register(op);
635 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
636 } else if (op->IsConstantOperand()) {
637 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
638 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
639 translation->StoreLiteral(src_index);
646 void LCodeGen::CallCodeGeneric(Handle<Code> code,
647 RelocInfo::Mode mode,
649 SafepointMode safepoint_mode,
651 ASSERT(instr != NULL);
653 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
655 // Signal that we don't inline smi code before these stubs in the
656 // optimizing code generator.
657 if (code->kind() == Code::BINARY_OP_IC ||
658 code->kind() == Code::COMPARE_IC) {
664 void LCodeGen::CallCode(Handle<Code> code,
665 RelocInfo::Mode mode,
666 LInstruction* instr) {
667 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
671 void LCodeGen::CallRuntime(const Runtime::Function* function,
674 SaveFPRegsMode save_doubles) {
675 ASSERT(instr != NULL);
676 ASSERT(instr->HasPointerMap());
678 __ CallRuntime(function, num_arguments, save_doubles);
680 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
684 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
685 if (context->IsRegister()) {
686 if (!ToRegister(context).is(rsi)) {
687 __ movp(rsi, ToRegister(context));
689 } else if (context->IsStackSlot()) {
690 __ movp(rsi, ToOperand(context));
691 } else if (context->IsConstantOperand()) {
692 HConstant* constant =
693 chunk_->LookupConstant(LConstantOperand::cast(context));
694 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
702 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
706 LoadContextFromDeferred(context);
708 __ CallRuntimeSaveDoubles(id);
709 RecordSafepointWithRegisters(
710 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
714 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
715 Safepoint::DeoptMode mode) {
716 environment->set_has_been_used();
717 if (!environment->HasBeenRegistered()) {
718 // Physical stack frame layout:
719 // -x ............. -4 0 ..................................... y
720 // [incoming arguments] [spill slots] [pushed outgoing arguments]
722 // Layout of the environment:
723 // 0 ..................................................... size-1
724 // [parameters] [locals] [expression stack including arguments]
726 // Layout of the translation:
727 // 0 ........................................................ size - 1 + 4
728 // [expression stack including arguments] [locals] [4 words] [parameters]
729 // |>------------ translation_size ------------<|
732 int jsframe_count = 0;
733 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
735 if (e->frame_type() == JS_FUNCTION) {
739 Translation translation(&translations_, frame_count, jsframe_count, zone());
740 WriteTranslation(environment, &translation);
741 int deoptimization_index = deoptimizations_.length();
742 int pc_offset = masm()->pc_offset();
743 environment->Register(deoptimization_index,
745 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
746 deoptimizations_.Add(environment, environment->zone());
751 void LCodeGen::DeoptimizeIf(Condition cc,
752 LEnvironment* environment,
753 Deoptimizer::BailoutType bailout_type) {
754 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
755 ASSERT(environment->HasBeenRegistered());
756 int id = environment->deoptimization_index();
757 ASSERT(info()->IsOptimizing() || info()->IsStub());
759 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
761 Abort(kBailoutWasNotPrepared);
765 if (DeoptEveryNTimes()) {
766 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
770 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
771 __ movl(rax, count_operand);
772 __ subl(rax, Immediate(1));
773 __ j(not_zero, &no_deopt, Label::kNear);
774 if (FLAG_trap_on_deopt) __ int3();
775 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
776 __ movl(count_operand, rax);
779 ASSERT(frame_is_built_);
780 __ call(entry, RelocInfo::RUNTIME_ENTRY);
782 __ movl(count_operand, rax);
787 if (info()->ShouldTrapOnDeopt()) {
789 if (cc != no_condition) {
790 __ j(NegateCondition(cc), &done, Label::kNear);
796 ASSERT(info()->IsStub() || frame_is_built_);
797 // Go through jump table if we need to handle condition, build frame, or
798 // restore caller doubles.
799 if (cc == no_condition && frame_is_built_ &&
800 !info()->saves_caller_doubles()) {
801 __ call(entry, RelocInfo::RUNTIME_ENTRY);
803 // We often have several deopts to the same entry, reuse the last
804 // jump entry if this is the case.
805 if (jump_table_.is_empty() ||
806 jump_table_.last().address != entry ||
807 jump_table_.last().needs_frame != !frame_is_built_ ||
808 jump_table_.last().bailout_type != bailout_type) {
809 Deoptimizer::JumpTableEntry table_entry(entry,
812 jump_table_.Add(table_entry, zone());
814 if (cc == no_condition) {
815 __ jmp(&jump_table_.last().label);
817 __ j(cc, &jump_table_.last().label);
823 void LCodeGen::DeoptimizeIf(Condition cc,
824 LEnvironment* environment) {
825 Deoptimizer::BailoutType bailout_type = info()->IsStub()
827 : Deoptimizer::EAGER;
828 DeoptimizeIf(cc, environment, bailout_type);
832 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
833 int length = deoptimizations_.length();
834 if (length == 0) return;
835 Handle<DeoptimizationInputData> data =
836 DeoptimizationInputData::New(isolate(), length, TENURED);
838 Handle<ByteArray> translations =
839 translations_.CreateByteArray(isolate()->factory());
840 data->SetTranslationByteArray(*translations);
841 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
842 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
843 if (info_->IsOptimizing()) {
844 // Reference to shared function info does not change between phases.
845 AllowDeferredHandleDereference allow_handle_dereference;
846 data->SetSharedFunctionInfo(*info_->shared_info());
848 data->SetSharedFunctionInfo(Smi::FromInt(0));
851 Handle<FixedArray> literals =
852 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
853 { AllowDeferredHandleDereference copy_handles;
854 for (int i = 0; i < deoptimization_literals_.length(); i++) {
855 literals->set(i, *deoptimization_literals_[i]);
857 data->SetLiteralArray(*literals);
860 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
861 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
863 // Populate the deoptimization entries.
864 for (int i = 0; i < length; i++) {
865 LEnvironment* env = deoptimizations_[i];
866 data->SetAstId(i, env->ast_id());
867 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
868 data->SetArgumentsStackHeight(i,
869 Smi::FromInt(env->arguments_stack_height()));
870 data->SetPc(i, Smi::FromInt(env->pc_offset()));
872 code->set_deoptimization_data(*data);
876 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
877 int result = deoptimization_literals_.length();
878 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
879 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
881 deoptimization_literals_.Add(literal, zone());
886 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
887 ASSERT(deoptimization_literals_.length() == 0);
889 const ZoneList<Handle<JSFunction> >* inlined_closures =
890 chunk()->inlined_closures();
892 for (int i = 0, length = inlined_closures->length();
895 DefineDeoptimizationLiteral(inlined_closures->at(i));
898 inlined_function_count_ = deoptimization_literals_.length();
902 void LCodeGen::RecordSafepointWithLazyDeopt(
903 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
904 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
905 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
907 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
908 RecordSafepointWithRegisters(
909 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
914 void LCodeGen::RecordSafepoint(
915 LPointerMap* pointers,
916 Safepoint::Kind kind,
918 Safepoint::DeoptMode deopt_mode) {
919 ASSERT(kind == expected_safepoint_kind_);
921 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
923 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
924 kind, arguments, deopt_mode);
925 for (int i = 0; i < operands->length(); i++) {
926 LOperand* pointer = operands->at(i);
927 if (pointer->IsStackSlot()) {
928 safepoint.DefinePointerSlot(pointer->index(), zone());
929 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
930 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
936 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
937 Safepoint::DeoptMode deopt_mode) {
938 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
942 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
943 LPointerMap empty_pointers(zone());
944 RecordSafepoint(&empty_pointers, deopt_mode);
948 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
950 Safepoint::DeoptMode deopt_mode) {
951 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
955 void LCodeGen::RecordAndWritePosition(int position) {
956 if (position == RelocInfo::kNoPosition) return;
957 masm()->positions_recorder()->RecordPosition(position);
958 masm()->positions_recorder()->WriteRecordedPositions();
962 static const char* LabelType(LLabel* label) {
963 if (label->is_loop_header()) return " (loop header)";
964 if (label->is_osr_entry()) return " (OSR entry)";
969 void LCodeGen::DoLabel(LLabel* label) {
970 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
971 current_instruction_,
972 label->hydrogen_value()->id(),
975 __ bind(label->label());
976 current_block_ = label->block_id();
981 void LCodeGen::DoParallelMove(LParallelMove* move) {
982 resolver_.Resolve(move);
986 void LCodeGen::DoGap(LGap* gap) {
987 for (int i = LGap::FIRST_INNER_POSITION;
988 i <= LGap::LAST_INNER_POSITION;
990 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
991 LParallelMove* move = gap->GetParallelMove(inner_pos);
992 if (move != NULL) DoParallelMove(move);
997 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1002 void LCodeGen::DoParameter(LParameter* instr) {
1007 void LCodeGen::DoCallStub(LCallStub* instr) {
1008 ASSERT(ToRegister(instr->context()).is(rsi));
1009 ASSERT(ToRegister(instr->result()).is(rax));
1010 switch (instr->hydrogen()->major_key()) {
1011 case CodeStub::RegExpExec: {
1012 RegExpExecStub stub(isolate());
1013 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1016 case CodeStub::SubString: {
1017 SubStringStub stub(isolate());
1018 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1021 case CodeStub::StringCompare: {
1022 StringCompareStub stub(isolate());
1023 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1032 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1033 GenerateOsrPrologue();
1037 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1038 Register dividend = ToRegister(instr->dividend());
1039 int32_t divisor = instr->divisor();
1040 ASSERT(dividend.is(ToRegister(instr->result())));
1042 // Theoretically, a variation of the branch-free code for integer division by
1043 // a power of 2 (calculating the remainder via an additional multiplication
1044 // (which gets simplified to an 'and') and subtraction) should be faster, and
1045 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1046 // indicate that positive dividends are heavily favored, so the branching
1047 // version performs better.
1048 HMod* hmod = instr->hydrogen();
1049 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1050 Label dividend_is_not_negative, done;
1051 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1052 __ testl(dividend, dividend);
1053 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1054 // Note that this is correct even for kMinInt operands.
1056 __ andl(dividend, Immediate(mask));
1058 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1059 DeoptimizeIf(zero, instr->environment());
1061 __ jmp(&done, Label::kNear);
1064 __ bind(÷nd_is_not_negative);
1065 __ andl(dividend, Immediate(mask));
1070 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1071 Register dividend = ToRegister(instr->dividend());
1072 int32_t divisor = instr->divisor();
1073 ASSERT(ToRegister(instr->result()).is(rax));
1076 DeoptimizeIf(no_condition, instr->environment());
1080 __ TruncatingDiv(dividend, Abs(divisor));
1081 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1082 __ movl(rax, dividend);
1085 // Check for negative zero.
1086 HMod* hmod = instr->hydrogen();
1087 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1088 Label remainder_not_zero;
1089 __ j(not_zero, &remainder_not_zero, Label::kNear);
1090 __ cmpl(dividend, Immediate(0));
1091 DeoptimizeIf(less, instr->environment());
1092 __ bind(&remainder_not_zero);
1097 void LCodeGen::DoModI(LModI* instr) {
1098 HMod* hmod = instr->hydrogen();
1100 Register left_reg = ToRegister(instr->left());
1101 ASSERT(left_reg.is(rax));
1102 Register right_reg = ToRegister(instr->right());
1103 ASSERT(!right_reg.is(rax));
1104 ASSERT(!right_reg.is(rdx));
1105 Register result_reg = ToRegister(instr->result());
1106 ASSERT(result_reg.is(rdx));
1109 // Check for x % 0, idiv would signal a divide error. We have to
1110 // deopt in this case because we can't return a NaN.
1111 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1112 __ testl(right_reg, right_reg);
1113 DeoptimizeIf(zero, instr->environment());
1116 // Check for kMinInt % -1, idiv would signal a divide error. We
1117 // have to deopt if we care about -0, because we can't return that.
1118 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1119 Label no_overflow_possible;
1120 __ cmpl(left_reg, Immediate(kMinInt));
1121 __ j(not_zero, &no_overflow_possible, Label::kNear);
1122 __ cmpl(right_reg, Immediate(-1));
1123 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1124 DeoptimizeIf(equal, instr->environment());
1126 __ j(not_equal, &no_overflow_possible, Label::kNear);
1127 __ Set(result_reg, 0);
1128 __ jmp(&done, Label::kNear);
1130 __ bind(&no_overflow_possible);
1133 // Sign extend dividend in eax into edx:eax, since we are using only the low
1134 // 32 bits of the values.
1137 // If we care about -0, test if the dividend is <0 and the result is 0.
1138 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139 Label positive_left;
1140 __ testl(left_reg, left_reg);
1141 __ j(not_sign, &positive_left, Label::kNear);
1142 __ idivl(right_reg);
1143 __ testl(result_reg, result_reg);
1144 DeoptimizeIf(zero, instr->environment());
1145 __ jmp(&done, Label::kNear);
1146 __ bind(&positive_left);
1148 __ idivl(right_reg);
1153 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1154 Register dividend = ToRegister(instr->dividend());
1155 int32_t divisor = instr->divisor();
1156 ASSERT(dividend.is(ToRegister(instr->result())));
1158 // If the divisor is positive, things are easy: There can be no deopts and we
1159 // can simply do an arithmetic right shift.
1160 if (divisor == 1) return;
1161 int32_t shift = WhichPowerOf2Abs(divisor);
1163 __ sarl(dividend, Immediate(shift));
1167 // If the divisor is negative, we have to negate and handle edge cases.
1169 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1170 DeoptimizeIf(zero, instr->environment());
1173 // If the negation could not overflow, simply shifting is OK.
1174 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1175 __ sarl(dividend, Immediate(shift));
1179 // Note that we could emit branch-free code, but that would need one more
1181 if (divisor == -1) {
1182 DeoptimizeIf(overflow, instr->environment());
1186 Label not_kmin_int, done;
1187 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1188 __ movl(dividend, Immediate(kMinInt / divisor));
1189 __ jmp(&done, Label::kNear);
1190 __ bind(¬_kmin_int);
1191 __ sarl(dividend, Immediate(shift));
1196 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1197 Register dividend = ToRegister(instr->dividend());
1198 int32_t divisor = instr->divisor();
1199 ASSERT(ToRegister(instr->result()).is(rdx));
1202 DeoptimizeIf(no_condition, instr->environment());
1206 // Check for (0 / -x) that will produce negative zero.
1207 HMathFloorOfDiv* hdiv = instr->hydrogen();
1208 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1209 __ testl(dividend, dividend);
1210 DeoptimizeIf(zero, instr->environment());
1213 // Easy case: We need no dynamic check for the dividend and the flooring
1214 // division is the same as the truncating division.
1215 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1216 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1217 __ TruncatingDiv(dividend, Abs(divisor));
1218 if (divisor < 0) __ negl(rdx);
1222 // In the general case we may need to adjust before and after the truncating
1223 // division to get a flooring division.
1224 Register temp = ToRegister(instr->temp3());
1225 ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1226 Label needs_adjustment, done;
1227 __ cmpl(dividend, Immediate(0));
1228 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1229 __ TruncatingDiv(dividend, Abs(divisor));
1230 if (divisor < 0) __ negl(rdx);
1231 __ jmp(&done, Label::kNear);
1232 __ bind(&needs_adjustment);
1233 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1234 __ TruncatingDiv(temp, Abs(divisor));
1235 if (divisor < 0) __ negl(rdx);
1241 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1242 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1243 HBinaryOperation* hdiv = instr->hydrogen();
1244 Register dividend = ToRegister(instr->dividend());
1245 Register divisor = ToRegister(instr->divisor());
1246 Register remainder = ToRegister(instr->temp());
1247 Register result = ToRegister(instr->result());
1248 ASSERT(dividend.is(rax));
1249 ASSERT(remainder.is(rdx));
1250 ASSERT(result.is(rax));
1251 ASSERT(!divisor.is(rax));
1252 ASSERT(!divisor.is(rdx));
1255 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1256 __ testl(divisor, divisor);
1257 DeoptimizeIf(zero, instr->environment());
1260 // Check for (0 / -x) that will produce negative zero.
1261 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1262 Label dividend_not_zero;
1263 __ testl(dividend, dividend);
1264 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1265 __ testl(divisor, divisor);
1266 DeoptimizeIf(sign, instr->environment());
1267 __ bind(÷nd_not_zero);
1270 // Check for (kMinInt / -1).
1271 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1272 Label dividend_not_min_int;
1273 __ cmpl(dividend, Immediate(kMinInt));
1274 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1275 __ cmpl(divisor, Immediate(-1));
1276 DeoptimizeIf(zero, instr->environment());
1277 __ bind(÷nd_not_min_int);
1280 // Sign extend to rdx (= remainder).
1285 __ testl(remainder, remainder);
1286 __ j(zero, &done, Label::kNear);
1287 __ xorl(remainder, divisor);
1288 __ sarl(remainder, Immediate(31));
1289 __ addl(result, remainder);
1294 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1295 Register dividend = ToRegister(instr->dividend());
1296 int32_t divisor = instr->divisor();
1297 Register result = ToRegister(instr->result());
1298 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1299 ASSERT(!result.is(dividend));
1301 // Check for (0 / -x) that will produce negative zero.
1302 HDiv* hdiv = instr->hydrogen();
1303 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1304 __ testl(dividend, dividend);
1305 DeoptimizeIf(zero, instr->environment());
1307 // Check for (kMinInt / -1).
1308 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1309 __ cmpl(dividend, Immediate(kMinInt));
1310 DeoptimizeIf(zero, instr->environment());
1312 // Deoptimize if remainder will not be 0.
1313 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1314 divisor != 1 && divisor != -1) {
1315 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1316 __ testl(dividend, Immediate(mask));
1317 DeoptimizeIf(not_zero, instr->environment());
1319 __ Move(result, dividend);
1320 int32_t shift = WhichPowerOf2Abs(divisor);
1322 // The arithmetic shift is always OK, the 'if' is an optimization only.
1323 if (shift > 1) __ sarl(result, Immediate(31));
1324 __ shrl(result, Immediate(32 - shift));
1325 __ addl(result, dividend);
1326 __ sarl(result, Immediate(shift));
1328 if (divisor < 0) __ negl(result);
1332 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1333 Register dividend = ToRegister(instr->dividend());
1334 int32_t divisor = instr->divisor();
1335 ASSERT(ToRegister(instr->result()).is(rdx));
1338 DeoptimizeIf(no_condition, instr->environment());
1342 // Check for (0 / -x) that will produce negative zero.
1343 HDiv* hdiv = instr->hydrogen();
1344 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1345 __ testl(dividend, dividend);
1346 DeoptimizeIf(zero, instr->environment());
1349 __ TruncatingDiv(dividend, Abs(divisor));
1350 if (divisor < 0) __ negl(rdx);
1352 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1354 __ imull(rax, rax, Immediate(divisor));
1355 __ subl(rax, dividend);
1356 DeoptimizeIf(not_equal, instr->environment());
1361 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1362 void LCodeGen::DoDivI(LDivI* instr) {
1363 HBinaryOperation* hdiv = instr->hydrogen();
1364 Register dividend = ToRegister(instr->dividend());
1365 Register divisor = ToRegister(instr->divisor());
1366 Register remainder = ToRegister(instr->temp());
1367 ASSERT(dividend.is(rax));
1368 ASSERT(remainder.is(rdx));
1369 ASSERT(ToRegister(instr->result()).is(rax));
1370 ASSERT(!divisor.is(rax));
1371 ASSERT(!divisor.is(rdx));
1374 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1375 __ testl(divisor, divisor);
1376 DeoptimizeIf(zero, instr->environment());
1379 // Check for (0 / -x) that will produce negative zero.
1380 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1381 Label dividend_not_zero;
1382 __ testl(dividend, dividend);
1383 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1384 __ testl(divisor, divisor);
1385 DeoptimizeIf(sign, instr->environment());
1386 __ bind(÷nd_not_zero);
1389 // Check for (kMinInt / -1).
1390 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1391 Label dividend_not_min_int;
1392 __ cmpl(dividend, Immediate(kMinInt));
1393 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1394 __ cmpl(divisor, Immediate(-1));
1395 DeoptimizeIf(zero, instr->environment());
1396 __ bind(÷nd_not_min_int);
1399 // Sign extend to rdx (= remainder).
1403 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1404 // Deoptimize if remainder is not 0.
1405 __ testl(remainder, remainder);
1406 DeoptimizeIf(not_zero, instr->environment());
1411 void LCodeGen::DoMulI(LMulI* instr) {
1412 Register left = ToRegister(instr->left());
1413 LOperand* right = instr->right();
1415 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1416 if (instr->hydrogen_value()->representation().IsSmi()) {
1417 __ movp(kScratchRegister, left);
1419 __ movl(kScratchRegister, left);
1424 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1425 if (right->IsConstantOperand()) {
1426 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1427 if (right_value == -1) {
1429 } else if (right_value == 0) {
1430 __ xorl(left, left);
1431 } else if (right_value == 2) {
1432 __ addl(left, left);
1433 } else if (!can_overflow) {
1434 // If the multiplication is known to not overflow, we
1435 // can use operations that don't set the overflow flag
1437 switch (right_value) {
1442 __ leal(left, Operand(left, left, times_2, 0));
1445 __ shll(left, Immediate(2));
1448 __ leal(left, Operand(left, left, times_4, 0));
1451 __ shll(left, Immediate(3));
1454 __ leal(left, Operand(left, left, times_8, 0));
1457 __ shll(left, Immediate(4));
1460 __ imull(left, left, Immediate(right_value));
1464 __ imull(left, left, Immediate(right_value));
1466 } else if (right->IsStackSlot()) {
1467 if (instr->hydrogen_value()->representation().IsSmi()) {
1468 __ SmiToInteger64(left, left);
1469 __ imulp(left, ToOperand(right));
1471 __ imull(left, ToOperand(right));
1474 if (instr->hydrogen_value()->representation().IsSmi()) {
1475 __ SmiToInteger64(left, left);
1476 __ imulp(left, ToRegister(right));
1478 __ imull(left, ToRegister(right));
1483 DeoptimizeIf(overflow, instr->environment());
1486 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1487 // Bail out if the result is supposed to be negative zero.
1489 if (instr->hydrogen_value()->representation().IsSmi()) {
1490 __ testp(left, left);
1492 __ testl(left, left);
1494 __ j(not_zero, &done, Label::kNear);
1495 if (right->IsConstantOperand()) {
1496 // Constant can't be represented as Smi due to immediate size limit.
1497 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1498 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1499 DeoptimizeIf(no_condition, instr->environment());
1500 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1501 __ cmpl(kScratchRegister, Immediate(0));
1502 DeoptimizeIf(less, instr->environment());
1504 } else if (right->IsStackSlot()) {
1505 if (instr->hydrogen_value()->representation().IsSmi()) {
1506 __ orp(kScratchRegister, ToOperand(right));
1508 __ orl(kScratchRegister, ToOperand(right));
1510 DeoptimizeIf(sign, instr->environment());
1512 // Test the non-zero operand for negative sign.
1513 if (instr->hydrogen_value()->representation().IsSmi()) {
1514 __ orp(kScratchRegister, ToRegister(right));
1516 __ orl(kScratchRegister, ToRegister(right));
1518 DeoptimizeIf(sign, instr->environment());
1525 void LCodeGen::DoBitI(LBitI* instr) {
1526 LOperand* left = instr->left();
1527 LOperand* right = instr->right();
1528 ASSERT(left->Equals(instr->result()));
1529 ASSERT(left->IsRegister());
1531 if (right->IsConstantOperand()) {
1532 int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
1533 switch (instr->op()) {
1534 case Token::BIT_AND:
1535 __ andl(ToRegister(left), Immediate(right_operand));
1538 __ orl(ToRegister(left), Immediate(right_operand));
1540 case Token::BIT_XOR:
1541 if (right_operand == int32_t(~0)) {
1542 __ notl(ToRegister(left));
1544 __ xorl(ToRegister(left), Immediate(right_operand));
1551 } else if (right->IsStackSlot()) {
1552 switch (instr->op()) {
1553 case Token::BIT_AND:
1554 if (instr->IsInteger32()) {
1555 __ andl(ToRegister(left), ToOperand(right));
1557 __ andp(ToRegister(left), ToOperand(right));
1561 if (instr->IsInteger32()) {
1562 __ orl(ToRegister(left), ToOperand(right));
1564 __ orp(ToRegister(left), ToOperand(right));
1567 case Token::BIT_XOR:
1568 if (instr->IsInteger32()) {
1569 __ xorl(ToRegister(left), ToOperand(right));
1571 __ xorp(ToRegister(left), ToOperand(right));
1579 ASSERT(right->IsRegister());
1580 switch (instr->op()) {
1581 case Token::BIT_AND:
1582 if (instr->IsInteger32()) {
1583 __ andl(ToRegister(left), ToRegister(right));
1585 __ andp(ToRegister(left), ToRegister(right));
1589 if (instr->IsInteger32()) {
1590 __ orl(ToRegister(left), ToRegister(right));
1592 __ orp(ToRegister(left), ToRegister(right));
1595 case Token::BIT_XOR:
1596 if (instr->IsInteger32()) {
1597 __ xorl(ToRegister(left), ToRegister(right));
1599 __ xorp(ToRegister(left), ToRegister(right));
1610 void LCodeGen::DoShiftI(LShiftI* instr) {
1611 LOperand* left = instr->left();
1612 LOperand* right = instr->right();
1613 ASSERT(left->Equals(instr->result()));
1614 ASSERT(left->IsRegister());
1615 if (right->IsRegister()) {
1616 ASSERT(ToRegister(right).is(rcx));
1618 switch (instr->op()) {
1620 __ rorl_cl(ToRegister(left));
1623 __ sarl_cl(ToRegister(left));
1626 __ shrl_cl(ToRegister(left));
1627 if (instr->can_deopt()) {
1628 __ testl(ToRegister(left), ToRegister(left));
1629 DeoptimizeIf(negative, instr->environment());
1633 __ shll_cl(ToRegister(left));
1640 int32_t value = ToInteger32(LConstantOperand::cast(right));
1641 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1642 switch (instr->op()) {
1644 if (shift_count != 0) {
1645 __ rorl(ToRegister(left), Immediate(shift_count));
1649 if (shift_count != 0) {
1650 __ sarl(ToRegister(left), Immediate(shift_count));
1654 if (shift_count == 0 && instr->can_deopt()) {
1655 __ testl(ToRegister(left), ToRegister(left));
1656 DeoptimizeIf(negative, instr->environment());
1658 __ shrl(ToRegister(left), Immediate(shift_count));
1662 if (shift_count != 0) {
1663 if (instr->hydrogen_value()->representation().IsSmi()) {
1664 __ shlp(ToRegister(left), Immediate(shift_count));
1666 __ shll(ToRegister(left), Immediate(shift_count));
1678 void LCodeGen::DoSubI(LSubI* instr) {
1679 LOperand* left = instr->left();
1680 LOperand* right = instr->right();
1681 ASSERT(left->Equals(instr->result()));
1683 if (right->IsConstantOperand()) {
1684 __ subl(ToRegister(left),
1685 Immediate(ToInteger32(LConstantOperand::cast(right))));
1686 } else if (right->IsRegister()) {
1687 if (instr->hydrogen_value()->representation().IsSmi()) {
1688 __ subp(ToRegister(left), ToRegister(right));
1690 __ subl(ToRegister(left), ToRegister(right));
1693 if (instr->hydrogen_value()->representation().IsSmi()) {
1694 __ subp(ToRegister(left), ToOperand(right));
1696 __ subl(ToRegister(left), ToOperand(right));
1700 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1701 DeoptimizeIf(overflow, instr->environment());
1706 void LCodeGen::DoConstantI(LConstantI* instr) {
1707 Register dst = ToRegister(instr->result());
1708 if (instr->value() == 0) {
1711 __ movl(dst, Immediate(instr->value()));
1716 void LCodeGen::DoConstantS(LConstantS* instr) {
1717 __ Move(ToRegister(instr->result()), instr->value());
1721 void LCodeGen::DoConstantD(LConstantD* instr) {
1722 ASSERT(instr->result()->IsDoubleRegister());
1723 XMMRegister res = ToDoubleRegister(instr->result());
1724 double v = instr->value();
1725 uint64_t int_val = BitCast<uint64_t, double>(v);
1726 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1727 // do so if the constant is -0.0.
1731 Register tmp = ToRegister(instr->temp());
1732 __ Set(tmp, int_val);
1738 void LCodeGen::DoConstantE(LConstantE* instr) {
1739 __ LoadAddress(ToRegister(instr->result()), instr->value());
1743 void LCodeGen::DoConstantT(LConstantT* instr) {
1744 Handle<Object> value = instr->value(isolate());
1745 __ Move(ToRegister(instr->result()), value);
1749 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1750 Register result = ToRegister(instr->result());
1751 Register map = ToRegister(instr->value());
1752 __ EnumLength(result, map);
1756 void LCodeGen::DoDateField(LDateField* instr) {
1757 Register object = ToRegister(instr->date());
1758 Register result = ToRegister(instr->result());
1759 Smi* index = instr->index();
1760 Label runtime, done, not_date_object;
1761 ASSERT(object.is(result));
1762 ASSERT(object.is(rax));
1764 Condition cc = masm()->CheckSmi(object);
1765 DeoptimizeIf(cc, instr->environment());
1766 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1767 DeoptimizeIf(not_equal, instr->environment());
1769 if (index->value() == 0) {
1770 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1772 if (index->value() < JSDate::kFirstUncachedField) {
1773 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1774 Operand stamp_operand = __ ExternalOperand(stamp);
1775 __ movp(kScratchRegister, stamp_operand);
1776 __ cmpp(kScratchRegister, FieldOperand(object,
1777 JSDate::kCacheStampOffset));
1778 __ j(not_equal, &runtime, Label::kNear);
1779 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1780 kPointerSize * index->value()));
1781 __ jmp(&done, Label::kNear);
1784 __ PrepareCallCFunction(2);
1785 __ movp(arg_reg_1, object);
1786 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1787 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1793 Operand LCodeGen::BuildSeqStringOperand(Register string,
1795 String::Encoding encoding) {
1796 if (index->IsConstantOperand()) {
1797 int offset = ToInteger32(LConstantOperand::cast(index));
1798 if (encoding == String::TWO_BYTE_ENCODING) {
1799 offset *= kUC16Size;
1801 STATIC_ASSERT(kCharSize == 1);
1802 return FieldOperand(string, SeqString::kHeaderSize + offset);
1804 return FieldOperand(
1805 string, ToRegister(index),
1806 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1807 SeqString::kHeaderSize);
1811 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1812 String::Encoding encoding = instr->hydrogen()->encoding();
1813 Register result = ToRegister(instr->result());
1814 Register string = ToRegister(instr->string());
1816 if (FLAG_debug_code) {
1818 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1819 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1821 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1822 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1823 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1824 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1825 ? one_byte_seq_type : two_byte_seq_type));
1826 __ Check(equal, kUnexpectedStringType);
1830 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1831 if (encoding == String::ONE_BYTE_ENCODING) {
1832 __ movzxbl(result, operand);
1834 __ movzxwl(result, operand);
1839 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1840 String::Encoding encoding = instr->hydrogen()->encoding();
1841 Register string = ToRegister(instr->string());
1843 if (FLAG_debug_code) {
1844 Register value = ToRegister(instr->value());
1845 Register index = ToRegister(instr->index());
1846 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1847 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1849 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1850 ? one_byte_seq_type : two_byte_seq_type;
1851 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1854 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1855 if (instr->value()->IsConstantOperand()) {
1856 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1857 ASSERT_LE(0, value);
1858 if (encoding == String::ONE_BYTE_ENCODING) {
1859 ASSERT_LE(value, String::kMaxOneByteCharCode);
1860 __ movb(operand, Immediate(value));
1862 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
1863 __ movw(operand, Immediate(value));
1866 Register value = ToRegister(instr->value());
1867 if (encoding == String::ONE_BYTE_ENCODING) {
1868 __ movb(operand, value);
1870 __ movw(operand, value);
1876 void LCodeGen::DoAddI(LAddI* instr) {
1877 LOperand* left = instr->left();
1878 LOperand* right = instr->right();
1880 Representation target_rep = instr->hydrogen()->representation();
1881 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1883 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1884 if (right->IsConstantOperand()) {
1885 ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
1886 int32_t offset = ToInteger32(LConstantOperand::cast(right));
1888 __ leap(ToRegister(instr->result()),
1889 MemOperand(ToRegister(left), offset));
1891 __ leal(ToRegister(instr->result()),
1892 MemOperand(ToRegister(left), offset));
1895 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1897 __ leap(ToRegister(instr->result()), address);
1899 __ leal(ToRegister(instr->result()), address);
1903 if (right->IsConstantOperand()) {
1904 ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
1906 __ addp(ToRegister(left),
1907 Immediate(ToInteger32(LConstantOperand::cast(right))));
1909 __ addl(ToRegister(left),
1910 Immediate(ToInteger32(LConstantOperand::cast(right))));
1912 } else if (right->IsRegister()) {
1914 __ addp(ToRegister(left), ToRegister(right));
1916 __ addl(ToRegister(left), ToRegister(right));
1920 __ addp(ToRegister(left), ToOperand(right));
1922 __ addl(ToRegister(left), ToOperand(right));
1925 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1926 DeoptimizeIf(overflow, instr->environment());
1932 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1933 LOperand* left = instr->left();
1934 LOperand* right = instr->right();
1935 ASSERT(left->Equals(instr->result()));
1936 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1937 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1939 Condition condition = (operation == HMathMinMax::kMathMin)
1942 Register left_reg = ToRegister(left);
1943 if (right->IsConstantOperand()) {
1944 Immediate right_imm =
1945 Immediate(ToInteger32(LConstantOperand::cast(right)));
1946 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1947 __ cmpl(left_reg, right_imm);
1948 __ j(condition, &return_left, Label::kNear);
1949 __ movp(left_reg, right_imm);
1950 } else if (right->IsRegister()) {
1951 Register right_reg = ToRegister(right);
1952 if (instr->hydrogen_value()->representation().IsSmi()) {
1953 __ cmpp(left_reg, right_reg);
1955 __ cmpl(left_reg, right_reg);
1957 __ j(condition, &return_left, Label::kNear);
1958 __ movp(left_reg, right_reg);
1960 Operand right_op = ToOperand(right);
1961 if (instr->hydrogen_value()->representation().IsSmi()) {
1962 __ cmpp(left_reg, right_op);
1964 __ cmpl(left_reg, right_op);
1966 __ j(condition, &return_left, Label::kNear);
1967 __ movp(left_reg, right_op);
1969 __ bind(&return_left);
1971 ASSERT(instr->hydrogen()->representation().IsDouble());
1972 Label check_nan_left, check_zero, return_left, return_right;
1973 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1974 XMMRegister left_reg = ToDoubleRegister(left);
1975 XMMRegister right_reg = ToDoubleRegister(right);
1976 __ ucomisd(left_reg, right_reg);
1977 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1978 __ j(equal, &check_zero, Label::kNear); // left == right.
1979 __ j(condition, &return_left, Label::kNear);
1980 __ jmp(&return_right, Label::kNear);
1982 __ bind(&check_zero);
1983 XMMRegister xmm_scratch = double_scratch0();
1984 __ xorps(xmm_scratch, xmm_scratch);
1985 __ ucomisd(left_reg, xmm_scratch);
1986 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1987 // At this point, both left and right are either 0 or -0.
1988 if (operation == HMathMinMax::kMathMin) {
1989 __ orps(left_reg, right_reg);
1991 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1992 __ addsd(left_reg, right_reg);
1994 __ jmp(&return_left, Label::kNear);
1996 __ bind(&check_nan_left);
1997 __ ucomisd(left_reg, left_reg); // NaN check.
1998 __ j(parity_even, &return_left, Label::kNear);
1999 __ bind(&return_right);
2000 __ movaps(left_reg, right_reg);
2002 __ bind(&return_left);
2007 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2008 XMMRegister left = ToDoubleRegister(instr->left());
2009 XMMRegister right = ToDoubleRegister(instr->right());
2010 XMMRegister result = ToDoubleRegister(instr->result());
2011 // All operations except MOD are computed in-place.
2012 ASSERT(instr->op() == Token::MOD || left.is(result));
2013 switch (instr->op()) {
2015 __ addsd(left, right);
2018 __ subsd(left, right);
2021 __ mulsd(left, right);
2024 __ divsd(left, right);
2025 // Don't delete this mov. It may improve performance on some CPUs,
2026 // when there is a mulsd depending on the result
2027 __ movaps(left, left);
2030 XMMRegister xmm_scratch = double_scratch0();
2031 __ PrepareCallCFunction(2);
2032 __ movaps(xmm_scratch, left);
2033 ASSERT(right.is(xmm1));
2035 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2036 __ movaps(result, xmm_scratch);
2046 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2047 ASSERT(ToRegister(instr->context()).is(rsi));
2048 ASSERT(ToRegister(instr->left()).is(rdx));
2049 ASSERT(ToRegister(instr->right()).is(rax));
2050 ASSERT(ToRegister(instr->result()).is(rax));
2052 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2053 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2057 template<class InstrType>
2058 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2059 int left_block = instr->TrueDestination(chunk_);
2060 int right_block = instr->FalseDestination(chunk_);
2062 int next_block = GetNextEmittedBlock();
2064 if (right_block == left_block || cc == no_condition) {
2065 EmitGoto(left_block);
2066 } else if (left_block == next_block) {
2067 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2068 } else if (right_block == next_block) {
2069 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2071 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2073 __ jmp(chunk_->GetAssemblyLabel(right_block));
2079 template<class InstrType>
2080 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2081 int false_block = instr->FalseDestination(chunk_);
2082 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2086 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2091 void LCodeGen::DoBranch(LBranch* instr) {
2092 Representation r = instr->hydrogen()->value()->representation();
2093 if (r.IsInteger32()) {
2094 ASSERT(!info()->IsStub());
2095 Register reg = ToRegister(instr->value());
2097 EmitBranch(instr, not_zero);
2098 } else if (r.IsSmi()) {
2099 ASSERT(!info()->IsStub());
2100 Register reg = ToRegister(instr->value());
2102 EmitBranch(instr, not_zero);
2103 } else if (r.IsDouble()) {
2104 ASSERT(!info()->IsStub());
2105 XMMRegister reg = ToDoubleRegister(instr->value());
2106 XMMRegister xmm_scratch = double_scratch0();
2107 __ xorps(xmm_scratch, xmm_scratch);
2108 __ ucomisd(reg, xmm_scratch);
2109 EmitBranch(instr, not_equal);
2110 } else if (r.IsSIMD128()) {
2111 ASSERT(!info()->IsStub());
2112 EmitBranch(instr, no_condition);
2114 ASSERT(r.IsTagged());
2115 Register reg = ToRegister(instr->value());
2116 HType type = instr->hydrogen()->value()->type();
2117 if (type.IsBoolean()) {
2118 ASSERT(!info()->IsStub());
2119 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2120 EmitBranch(instr, equal);
2121 } else if (type.IsSmi()) {
2122 ASSERT(!info()->IsStub());
2123 __ SmiCompare(reg, Smi::FromInt(0));
2124 EmitBranch(instr, not_equal);
2125 } else if (type.IsJSArray()) {
2126 ASSERT(!info()->IsStub());
2127 EmitBranch(instr, no_condition);
2128 } else if (type.IsSIMD128()) {
2129 ASSERT(!info()->IsStub());
2130 EmitBranch(instr, no_condition);
2131 } else if (type.IsHeapNumber()) {
2132 ASSERT(!info()->IsStub());
2133 XMMRegister xmm_scratch = double_scratch0();
2134 __ xorps(xmm_scratch, xmm_scratch);
2135 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2136 EmitBranch(instr, not_equal);
2137 } else if (type.IsString()) {
2138 ASSERT(!info()->IsStub());
2139 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2140 EmitBranch(instr, not_equal);
2142 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2143 // Avoid deopts in the case where we've never executed this path before.
2144 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2146 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2147 // undefined -> false.
2148 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2149 __ j(equal, instr->FalseLabel(chunk_));
2151 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2153 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2154 __ j(equal, instr->TrueLabel(chunk_));
2156 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2157 __ j(equal, instr->FalseLabel(chunk_));
2159 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2161 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2162 __ j(equal, instr->FalseLabel(chunk_));
2165 if (expected.Contains(ToBooleanStub::SMI)) {
2166 // Smis: 0 -> false, all other -> true.
2167 __ Cmp(reg, Smi::FromInt(0));
2168 __ j(equal, instr->FalseLabel(chunk_));
2169 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2170 } else if (expected.NeedsMap()) {
2171 // If we need a map later and have a Smi -> deopt.
2172 __ testb(reg, Immediate(kSmiTagMask));
2173 DeoptimizeIf(zero, instr->environment());
2176 const Register map = kScratchRegister;
2177 if (expected.NeedsMap()) {
2178 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2180 if (expected.CanBeUndetectable()) {
2181 // Undetectable -> false.
2182 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2183 Immediate(1 << Map::kIsUndetectable));
2184 __ j(not_zero, instr->FalseLabel(chunk_));
2188 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2189 // spec object -> true.
2190 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2191 __ j(above_equal, instr->TrueLabel(chunk_));
2194 if (expected.Contains(ToBooleanStub::STRING)) {
2195 // String value -> false iff empty.
2197 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2198 __ j(above_equal, ¬_string, Label::kNear);
2199 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2200 __ j(not_zero, instr->TrueLabel(chunk_));
2201 __ jmp(instr->FalseLabel(chunk_));
2202 __ bind(¬_string);
2205 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2206 // Symbol value -> true.
2207 __ CmpInstanceType(map, SYMBOL_TYPE);
2208 __ j(equal, instr->TrueLabel(chunk_));
2211 if (expected.Contains(ToBooleanStub::FLOAT32x4)) {
2212 // Float32x4 value -> true.
2213 __ CmpInstanceType(map, FLOAT32x4_TYPE);
2214 __ j(equal, instr->TrueLabel(chunk_));
2217 if (expected.Contains(ToBooleanStub::FLOAT64x2)) {
2218 // Float64x2 value -> true.
2219 __ CmpInstanceType(map, FLOAT64x2_TYPE);
2220 __ j(equal, instr->TrueLabel(chunk_));
2223 if (expected.Contains(ToBooleanStub::INT32x4)) {
2224 // Int32x4 value -> true.
2225 __ CmpInstanceType(map, INT32x4_TYPE);
2226 __ j(equal, instr->TrueLabel(chunk_));
2229 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2230 // heap number -> false iff +0, -0, or NaN.
2231 Label not_heap_number;
2232 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2233 __ j(not_equal, ¬_heap_number, Label::kNear);
2234 XMMRegister xmm_scratch = double_scratch0();
2235 __ xorps(xmm_scratch, xmm_scratch);
2236 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2237 __ j(zero, instr->FalseLabel(chunk_));
2238 __ jmp(instr->TrueLabel(chunk_));
2239 __ bind(¬_heap_number);
2242 if (!expected.IsGeneric()) {
2243 // We've seen something for the first time -> deopt.
2244 // This can only happen if we are not generic already.
2245 DeoptimizeIf(no_condition, instr->environment());
2252 void LCodeGen::EmitGoto(int block) {
2253 if (!IsNextEmittedBlock(block)) {
2254 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2259 void LCodeGen::DoGoto(LGoto* instr) {
2260 EmitGoto(instr->block_id());
2264 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2265 Condition cond = no_condition;
2268 case Token::EQ_STRICT:
2272 case Token::NE_STRICT:
2276 cond = is_unsigned ? below : less;
2279 cond = is_unsigned ? above : greater;
2282 cond = is_unsigned ? below_equal : less_equal;
2285 cond = is_unsigned ? above_equal : greater_equal;
2288 case Token::INSTANCEOF:
2296 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2297 LOperand* left = instr->left();
2298 LOperand* right = instr->right();
2299 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2301 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2302 // We can statically evaluate the comparison.
2303 double left_val = ToDouble(LConstantOperand::cast(left));
2304 double right_val = ToDouble(LConstantOperand::cast(right));
2305 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2306 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2307 EmitGoto(next_block);
2309 if (instr->is_double()) {
2310 // Don't base result on EFLAGS when a NaN is involved. Instead
2311 // jump to the false block.
2312 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2313 __ j(parity_even, instr->FalseLabel(chunk_));
2316 if (right->IsConstantOperand()) {
2317 value = ToInteger32(LConstantOperand::cast(right));
2318 if (instr->hydrogen_value()->representation().IsSmi()) {
2319 __ Cmp(ToRegister(left), Smi::FromInt(value));
2321 __ cmpl(ToRegister(left), Immediate(value));
2323 } else if (left->IsConstantOperand()) {
2324 value = ToInteger32(LConstantOperand::cast(left));
2325 if (instr->hydrogen_value()->representation().IsSmi()) {
2326 if (right->IsRegister()) {
2327 __ Cmp(ToRegister(right), Smi::FromInt(value));
2329 __ Cmp(ToOperand(right), Smi::FromInt(value));
2331 } else if (right->IsRegister()) {
2332 __ cmpl(ToRegister(right), Immediate(value));
2334 __ cmpl(ToOperand(right), Immediate(value));
2336 // We transposed the operands. Reverse the condition.
2337 cc = ReverseCondition(cc);
2338 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2339 if (right->IsRegister()) {
2340 __ cmpp(ToRegister(left), ToRegister(right));
2342 __ cmpp(ToRegister(left), ToOperand(right));
2345 if (right->IsRegister()) {
2346 __ cmpl(ToRegister(left), ToRegister(right));
2348 __ cmpl(ToRegister(left), ToOperand(right));
2352 EmitBranch(instr, cc);
2357 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2358 Register left = ToRegister(instr->left());
2360 if (instr->right()->IsConstantOperand()) {
2361 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2362 __ Cmp(left, right);
2364 Register right = ToRegister(instr->right());
2365 __ cmpp(left, right);
2367 EmitBranch(instr, equal);
2371 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2372 if (instr->hydrogen()->representation().IsTagged()) {
2373 Register input_reg = ToRegister(instr->object());
2374 __ Cmp(input_reg, factory()->the_hole_value());
2375 EmitBranch(instr, equal);
2379 XMMRegister input_reg = ToDoubleRegister(instr->object());
2380 __ ucomisd(input_reg, input_reg);
2381 EmitFalseBranch(instr, parity_odd);
2383 __ subp(rsp, Immediate(kDoubleSize));
2384 __ movsd(MemOperand(rsp, 0), input_reg);
2385 __ addp(rsp, Immediate(kDoubleSize));
2387 int offset = sizeof(kHoleNanUpper32);
2388 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2389 EmitBranch(instr, equal);
2393 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2394 Representation rep = instr->hydrogen()->value()->representation();
2395 ASSERT(!rep.IsInteger32());
2397 if (rep.IsDouble()) {
2398 XMMRegister value = ToDoubleRegister(instr->value());
2399 XMMRegister xmm_scratch = double_scratch0();
2400 __ xorps(xmm_scratch, xmm_scratch);
2401 __ ucomisd(xmm_scratch, value);
2402 EmitFalseBranch(instr, not_equal);
2403 __ movmskpd(kScratchRegister, value);
2404 __ testl(kScratchRegister, Immediate(1));
2405 EmitBranch(instr, not_zero);
2407 Register value = ToRegister(instr->value());
2408 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2409 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2410 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2412 EmitFalseBranch(instr, no_overflow);
2413 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2414 Immediate(0x00000000));
2415 EmitBranch(instr, equal);
2420 Condition LCodeGen::EmitIsObject(Register input,
2421 Label* is_not_object,
2423 ASSERT(!input.is(kScratchRegister));
2425 __ JumpIfSmi(input, is_not_object);
2427 __ CompareRoot(input, Heap::kNullValueRootIndex);
2428 __ j(equal, is_object);
2430 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2431 // Undetectable objects behave like undefined.
2432 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2433 Immediate(1 << Map::kIsUndetectable));
2434 __ j(not_zero, is_not_object);
2436 __ movzxbl(kScratchRegister,
2437 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2438 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2439 __ j(below, is_not_object);
2440 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2445 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2446 Register reg = ToRegister(instr->value());
2448 Condition true_cond = EmitIsObject(
2449 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2451 EmitBranch(instr, true_cond);
2455 Condition LCodeGen::EmitIsString(Register input,
2457 Label* is_not_string,
2458 SmiCheck check_needed = INLINE_SMI_CHECK) {
2459 if (check_needed == INLINE_SMI_CHECK) {
2460 __ JumpIfSmi(input, is_not_string);
2463 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2469 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2470 Register reg = ToRegister(instr->value());
2471 Register temp = ToRegister(instr->temp());
2473 SmiCheck check_needed =
2474 instr->hydrogen()->value()->IsHeapObject()
2475 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2477 Condition true_cond = EmitIsString(
2478 reg, temp, instr->FalseLabel(chunk_), check_needed);
2480 EmitBranch(instr, true_cond);
2484 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2486 if (instr->value()->IsRegister()) {
2487 Register input = ToRegister(instr->value());
2488 is_smi = masm()->CheckSmi(input);
2490 Operand input = ToOperand(instr->value());
2491 is_smi = masm()->CheckSmi(input);
2493 EmitBranch(instr, is_smi);
2497 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2498 Register input = ToRegister(instr->value());
2499 Register temp = ToRegister(instr->temp());
2501 if (!instr->hydrogen()->value()->IsHeapObject()) {
2502 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2504 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2505 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2506 Immediate(1 << Map::kIsUndetectable));
2507 EmitBranch(instr, not_zero);
2511 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2512 ASSERT(ToRegister(instr->context()).is(rsi));
2513 Token::Value op = instr->op();
2515 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2516 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2518 Condition condition = TokenToCondition(op, false);
2521 EmitBranch(instr, condition);
2525 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2526 InstanceType from = instr->from();
2527 InstanceType to = instr->to();
2528 if (from == FIRST_TYPE) return to;
2529 ASSERT(from == to || to == LAST_TYPE);
2534 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2535 InstanceType from = instr->from();
2536 InstanceType to = instr->to();
2537 if (from == to) return equal;
2538 if (to == LAST_TYPE) return above_equal;
2539 if (from == FIRST_TYPE) return below_equal;
2545 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2546 Register input = ToRegister(instr->value());
2548 if (!instr->hydrogen()->value()->IsHeapObject()) {
2549 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2552 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2553 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2557 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2558 Register input = ToRegister(instr->value());
2559 Register result = ToRegister(instr->result());
2561 __ AssertString(input);
2563 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2564 ASSERT(String::kHashShift >= kSmiTagSize);
2565 __ IndexFromHash(result, result);
2569 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2570 LHasCachedArrayIndexAndBranch* instr) {
2571 Register input = ToRegister(instr->value());
2573 __ testl(FieldOperand(input, String::kHashFieldOffset),
2574 Immediate(String::kContainsCachedArrayIndexMask));
2575 EmitBranch(instr, equal);
2579 // Branches to a label or falls through with the answer in the z flag.
2580 // Trashes the temp register.
2581 void LCodeGen::EmitClassOfTest(Label* is_true,
2583 Handle<String> class_name,
2587 ASSERT(!input.is(temp));
2588 ASSERT(!input.is(temp2));
2589 ASSERT(!temp.is(temp2));
2591 __ JumpIfSmi(input, is_false);
2593 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2594 // Assuming the following assertions, we can use the same compares to test
2595 // for both being a function type and being in the object type range.
2596 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2597 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2598 FIRST_SPEC_OBJECT_TYPE + 1);
2599 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2600 LAST_SPEC_OBJECT_TYPE - 1);
2601 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2602 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2603 __ j(below, is_false);
2604 __ j(equal, is_true);
2605 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2606 __ j(equal, is_true);
2608 // Faster code path to avoid two compares: subtract lower bound from the
2609 // actual type and do a signed compare with the width of the type range.
2610 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2611 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2612 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2613 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2614 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2615 __ j(above, is_false);
2618 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2619 // Check if the constructor in the map is a function.
2620 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2622 // Objects with a non-function constructor have class 'Object'.
2623 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2624 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2625 __ j(not_equal, is_true);
2627 __ j(not_equal, is_false);
2630 // temp now contains the constructor function. Grab the
2631 // instance class name from there.
2632 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2633 __ movp(temp, FieldOperand(temp,
2634 SharedFunctionInfo::kInstanceClassNameOffset));
2635 // The class name we are testing against is internalized since it's a literal.
2636 // The name in the constructor is internalized because of the way the context
2637 // is booted. This routine isn't expected to work for random API-created
2638 // classes and it doesn't have to because you can't access it with natives
2639 // syntax. Since both sides are internalized it is sufficient to use an
2640 // identity comparison.
2641 ASSERT(class_name->IsInternalizedString());
2642 __ Cmp(temp, class_name);
2643 // End with the answer in the z flag.
2647 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2648 Register input = ToRegister(instr->value());
2649 Register temp = ToRegister(instr->temp());
2650 Register temp2 = ToRegister(instr->temp2());
2651 Handle<String> class_name = instr->hydrogen()->class_name();
2653 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2654 class_name, input, temp, temp2);
2656 EmitBranch(instr, equal);
2660 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2661 Register reg = ToRegister(instr->value());
2663 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2664 EmitBranch(instr, equal);
2668 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2669 ASSERT(ToRegister(instr->context()).is(rsi));
2670 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2671 __ Push(ToRegister(instr->left()));
2672 __ Push(ToRegister(instr->right()));
2673 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2674 Label true_value, done;
2676 __ j(zero, &true_value, Label::kNear);
2677 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2678 __ jmp(&done, Label::kNear);
2679 __ bind(&true_value);
2680 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2685 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2686 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2688 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2689 LInstanceOfKnownGlobal* instr)
2690 : LDeferredCode(codegen), instr_(instr) { }
2691 virtual void Generate() V8_OVERRIDE {
2692 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2694 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2695 Label* map_check() { return &map_check_; }
2697 LInstanceOfKnownGlobal* instr_;
2701 ASSERT(ToRegister(instr->context()).is(rsi));
2702 DeferredInstanceOfKnownGlobal* deferred;
2703 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2705 Label done, false_result;
2706 Register object = ToRegister(instr->value());
2708 // A Smi is not an instance of anything.
2709 __ JumpIfSmi(object, &false_result, Label::kNear);
2711 // This is the inlined call site instanceof cache. The two occurences of the
2712 // hole value will be patched to the last map/result pair generated by the
2715 // Use a temp register to avoid memory operands with variable lengths.
2716 Register map = ToRegister(instr->temp());
2717 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2718 __ bind(deferred->map_check()); // Label for calculating code patching.
2719 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2720 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2721 __ cmpp(map, Operand(kScratchRegister, 0));
2722 __ j(not_equal, &cache_miss, Label::kNear);
2723 // Patched to load either true or false.
2724 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2726 // Check that the code size between patch label and patch sites is invariant.
2727 Label end_of_patched_code;
2728 __ bind(&end_of_patched_code);
2731 __ jmp(&done, Label::kNear);
2733 // The inlined call site cache did not match. Check for null and string
2734 // before calling the deferred code.
2735 __ bind(&cache_miss); // Null is not an instance of anything.
2736 __ CompareRoot(object, Heap::kNullValueRootIndex);
2737 __ j(equal, &false_result, Label::kNear);
2739 // String values are not instances of anything.
2740 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2742 __ bind(&false_result);
2743 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2745 __ bind(deferred->exit());
2750 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2753 PushSafepointRegistersScope scope(this);
2754 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2755 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2756 InstanceofStub stub(isolate(), flags);
2758 __ Push(ToRegister(instr->value()));
2759 __ Push(instr->function());
2761 static const int kAdditionalDelta = 10;
2763 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2765 __ PushImm32(delta);
2767 // We are pushing three values on the stack but recording a
2768 // safepoint with two arguments because stub is going to
2769 // remove the third argument from the stack before jumping
2770 // to instanceof builtin on the slow path.
2771 CallCodeGeneric(stub.GetCode(),
2772 RelocInfo::CODE_TARGET,
2774 RECORD_SAFEPOINT_WITH_REGISTERS,
2776 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2777 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2778 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2779 // Move result to a register that survives the end of the
2780 // PushSafepointRegisterScope.
2781 __ movp(kScratchRegister, rax);
2783 __ testp(kScratchRegister, kScratchRegister);
2786 __ j(not_zero, &load_false, Label::kNear);
2787 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2788 __ jmp(&done, Label::kNear);
2789 __ bind(&load_false);
2790 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2795 void LCodeGen::DoCmpT(LCmpT* instr) {
2796 ASSERT(ToRegister(instr->context()).is(rsi));
2797 Token::Value op = instr->op();
2799 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2800 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2802 Condition condition = TokenToCondition(op, false);
2803 Label true_value, done;
2805 __ j(condition, &true_value, Label::kNear);
2806 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2807 __ jmp(&done, Label::kNear);
2808 __ bind(&true_value);
2809 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2814 void LCodeGen::DoReturn(LReturn* instr) {
2815 if (FLAG_trace && info()->IsOptimizing()) {
2816 // Preserve the return value on the stack and rely on the runtime call
2817 // to return the value in the same register. We're leaving the code
2818 // managed by the register allocator and tearing down the frame, it's
2819 // safe to write to the context register.
2821 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2822 __ CallRuntime(Runtime::kTraceExit, 1);
2824 if (info()->saves_caller_doubles()) {
2825 RestoreCallerDoubles();
2827 int no_frame_start = -1;
2828 if (NeedsEagerFrame()) {
2831 no_frame_start = masm_->pc_offset();
2833 if (instr->has_constant_parameter_count()) {
2834 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2837 Register reg = ToRegister(instr->parameter_count());
2838 // The argument count parameter is a smi
2839 __ SmiToInteger32(reg, reg);
2840 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2841 __ PopReturnAddressTo(return_addr_reg);
2842 __ shlp(reg, Immediate(kPointerSizeLog2));
2844 __ jmp(return_addr_reg);
2846 if (no_frame_start != -1) {
2847 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2852 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2853 Register result = ToRegister(instr->result());
2854 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2855 if (instr->hydrogen()->RequiresHoleCheck()) {
2856 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2857 DeoptimizeIf(equal, instr->environment());
2862 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2863 ASSERT(ToRegister(instr->context()).is(rsi));
2864 ASSERT(ToRegister(instr->global_object()).is(rax));
2865 ASSERT(ToRegister(instr->result()).is(rax));
2867 __ Move(rcx, instr->name());
2868 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2869 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2870 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2874 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2875 Register value = ToRegister(instr->value());
2876 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2878 // If the cell we are storing to contains the hole it could have
2879 // been deleted from the property dictionary. In that case, we need
2880 // to update the property details in the property dictionary to mark
2881 // it as no longer deleted. We deoptimize in that case.
2882 if (instr->hydrogen()->RequiresHoleCheck()) {
2883 // We have a temp because CompareRoot might clobber kScratchRegister.
2884 Register cell = ToRegister(instr->temp());
2885 ASSERT(!value.is(cell));
2886 __ Move(cell, cell_handle, RelocInfo::CELL);
2887 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2888 DeoptimizeIf(equal, instr->environment());
2890 __ movp(Operand(cell, 0), value);
2893 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2894 __ movp(Operand(kScratchRegister, 0), value);
2896 // Cells are always rescanned, so no write barrier here.
2900 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2901 Register context = ToRegister(instr->context());
2902 Register result = ToRegister(instr->result());
2903 __ movp(result, ContextOperand(context, instr->slot_index()));
2904 if (instr->hydrogen()->RequiresHoleCheck()) {
2905 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2906 if (instr->hydrogen()->DeoptimizesOnHole()) {
2907 DeoptimizeIf(equal, instr->environment());
2910 __ j(not_equal, &is_not_hole, Label::kNear);
2911 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2912 __ bind(&is_not_hole);
2918 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2919 Register context = ToRegister(instr->context());
2920 Register value = ToRegister(instr->value());
2922 Operand target = ContextOperand(context, instr->slot_index());
2924 Label skip_assignment;
2925 if (instr->hydrogen()->RequiresHoleCheck()) {
2926 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2927 if (instr->hydrogen()->DeoptimizesOnHole()) {
2928 DeoptimizeIf(equal, instr->environment());
2930 __ j(not_equal, &skip_assignment);
2933 __ movp(target, value);
2935 if (instr->hydrogen()->NeedsWriteBarrier()) {
2936 SmiCheck check_needed =
2937 instr->hydrogen()->value()->IsHeapObject()
2938 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2939 int offset = Context::SlotOffset(instr->slot_index());
2940 Register scratch = ToRegister(instr->temp());
2941 __ RecordWriteContextSlot(context,
2946 EMIT_REMEMBERED_SET,
2950 __ bind(&skip_assignment);
2954 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2955 HObjectAccess access = instr->hydrogen()->access();
2956 int offset = access.offset();
2958 if (access.IsExternalMemory()) {
2959 Register result = ToRegister(instr->result());
2960 if (instr->object()->IsConstantOperand()) {
2961 ASSERT(result.is(rax));
2962 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2964 Register object = ToRegister(instr->object());
2965 __ Load(result, MemOperand(object, offset), access.representation());
2970 Register object = ToRegister(instr->object());
2971 if (instr->hydrogen()->representation().IsDouble()) {
2972 XMMRegister result = ToDoubleRegister(instr->result());
2973 __ movsd(result, FieldOperand(object, offset));
2977 Register result = ToRegister(instr->result());
2978 if (!access.IsInobject()) {
2979 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2983 Representation representation = access.representation();
2984 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2985 instr->hydrogen()->representation().IsInteger32()) {
2986 if (FLAG_debug_code) {
2987 Register scratch = kScratchRegister;
2988 __ Load(scratch, FieldOperand(object, offset), representation);
2989 __ AssertSmi(scratch);
2992 // Read int value directly from upper half of the smi.
2993 STATIC_ASSERT(kSmiTag == 0);
2994 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2995 offset += kPointerSize / 2;
2996 representation = Representation::Integer32();
2998 __ Load(result, FieldOperand(object, offset), representation);
3002 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3003 ASSERT(ToRegister(instr->context()).is(rsi));
3004 ASSERT(ToRegister(instr->object()).is(rax));
3005 ASSERT(ToRegister(instr->result()).is(rax));
3007 __ Move(rcx, instr->name());
3008 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3009 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3013 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3014 Register function = ToRegister(instr->function());
3015 Register result = ToRegister(instr->result());
3017 // Check that the function really is a function.
3018 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3019 DeoptimizeIf(not_equal, instr->environment());
3021 // Check whether the function has an instance prototype.
3023 __ testb(FieldOperand(result, Map::kBitFieldOffset),
3024 Immediate(1 << Map::kHasNonInstancePrototype));
3025 __ j(not_zero, &non_instance, Label::kNear);
3027 // Get the prototype or initial map from the function.
3029 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3031 // Check that the function has a prototype or an initial map.
3032 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3033 DeoptimizeIf(equal, instr->environment());
3035 // If the function does not have an initial map, we're done.
3037 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3038 __ j(not_equal, &done, Label::kNear);
3040 // Get the prototype from the initial map.
3041 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3042 __ jmp(&done, Label::kNear);
3044 // Non-instance prototype: Fetch prototype from constructor field
3045 // in the function's map.
3046 __ bind(&non_instance);
3047 __ movp(result, FieldOperand(result, Map::kConstructorOffset));
3054 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3055 Register result = ToRegister(instr->result());
3056 __ LoadRoot(result, instr->index());
3060 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3061 Register arguments = ToRegister(instr->arguments());
3062 Register result = ToRegister(instr->result());
3064 if (instr->length()->IsConstantOperand() &&
3065 instr->index()->IsConstantOperand()) {
3066 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3067 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3068 if (const_index >= 0 && const_index < const_length) {
3069 StackArgumentsAccessor args(arguments, const_length,
3070 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3071 __ movp(result, args.GetArgumentOperand(const_index));
3072 } else if (FLAG_debug_code) {
3076 Register length = ToRegister(instr->length());
3077 // There are two words between the frame pointer and the last argument.
3078 // Subtracting from length accounts for one of them add one more.
3079 if (instr->index()->IsRegister()) {
3080 __ subl(length, ToRegister(instr->index()));
3082 __ subl(length, ToOperand(instr->index()));
3084 StackArgumentsAccessor args(arguments, length,
3085 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3086 __ movp(result, args.GetArgumentOperand(0));
3091 void LCodeGen::HandleExternalArrayOpRequiresPreScale(
3093 ElementsKind elements_kind) {
3094 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3095 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
3096 static_cast<int>(maximal_scale_factor);
3097 ASSERT(pre_shift_size > 0);
3098 __ shll(ToRegister(key), Immediate(pre_shift_size));
3103 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3104 ElementsKind elements_kind = instr->elements_kind();
3105 LOperand* key = instr->key();
3106 if (!key->IsConstantOperand()) {
3107 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
3109 int base_offset = instr->is_fixed_typed_array()
3110 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3112 Operand operand(BuildFastArrayOperand(
3117 instr->additional_index()));
3119 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3120 elements_kind == FLOAT32_ELEMENTS) {
3121 XMMRegister result(ToDoubleRegister(instr->result()));
3122 __ movss(result, operand);
3123 __ cvtss2sd(result, result);
3124 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3125 elements_kind == FLOAT64_ELEMENTS) {
3126 __ movsd(ToDoubleRegister(instr->result()), operand);
3127 } else if (IsSIMD128ElementsKind(elements_kind)) {
3128 __ movups(ToSIMD128Register(instr->result()), operand);
3130 Register result(ToRegister(instr->result()));
3131 switch (elements_kind) {
3132 case EXTERNAL_INT8_ELEMENTS:
3134 __ movsxbl(result, operand);
3136 case EXTERNAL_UINT8_ELEMENTS:
3137 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3138 case UINT8_ELEMENTS:
3139 case UINT8_CLAMPED_ELEMENTS:
3140 __ movzxbl(result, operand);
3142 case EXTERNAL_INT16_ELEMENTS:
3143 case INT16_ELEMENTS:
3144 __ movsxwl(result, operand);
3146 case EXTERNAL_UINT16_ELEMENTS:
3147 case UINT16_ELEMENTS:
3148 __ movzxwl(result, operand);
3150 case EXTERNAL_INT32_ELEMENTS:
3151 case INT32_ELEMENTS:
3152 __ movl(result, operand);
3154 case EXTERNAL_UINT32_ELEMENTS:
3155 case UINT32_ELEMENTS:
3156 __ movl(result, operand);
3157 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3158 __ testl(result, result);
3159 DeoptimizeIf(negative, instr->environment());
3162 case EXTERNAL_FLOAT32_ELEMENTS:
3163 case EXTERNAL_FLOAT64_ELEMENTS:
3164 case EXTERNAL_FLOAT32x4_ELEMENTS:
3165 case EXTERNAL_FLOAT64x2_ELEMENTS:
3166 case EXTERNAL_INT32x4_ELEMENTS:
3167 case FLOAT32_ELEMENTS:
3168 case FLOAT64_ELEMENTS:
3169 case FLOAT32x4_ELEMENTS:
3170 case FLOAT64x2_ELEMENTS:
3171 case INT32x4_ELEMENTS:
3173 case FAST_SMI_ELEMENTS:
3174 case FAST_DOUBLE_ELEMENTS:
3175 case FAST_HOLEY_ELEMENTS:
3176 case FAST_HOLEY_SMI_ELEMENTS:
3177 case FAST_HOLEY_DOUBLE_ELEMENTS:
3178 case DICTIONARY_ELEMENTS:
3179 case SLOPPY_ARGUMENTS_ELEMENTS:
3187 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3188 XMMRegister result(ToDoubleRegister(instr->result()));
3189 LOperand* key = instr->key();
3190 if (instr->hydrogen()->RequiresHoleCheck()) {
3191 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3192 sizeof(kHoleNanLower32);
3193 Operand hole_check_operand = BuildFastArrayOperand(
3196 FAST_DOUBLE_ELEMENTS,
3198 instr->additional_index());
3199 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3200 DeoptimizeIf(equal, instr->environment());
3203 Operand double_load_operand = BuildFastArrayOperand(
3206 FAST_DOUBLE_ELEMENTS,
3207 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3208 instr->additional_index());
3209 __ movsd(result, double_load_operand);
3213 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3214 HLoadKeyed* hinstr = instr->hydrogen();
3215 Register result = ToRegister(instr->result());
3216 LOperand* key = instr->key();
3217 bool requires_hole_check = hinstr->RequiresHoleCheck();
3218 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3219 Representation representation = hinstr->representation();
3221 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3222 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3223 ASSERT(!requires_hole_check);
3224 if (FLAG_debug_code) {
3225 Register scratch = kScratchRegister;
3227 BuildFastArrayOperand(instr->elements(),
3231 instr->additional_index()),
3232 Representation::Smi());
3233 __ AssertSmi(scratch);
3235 // Read int value directly from upper half of the smi.
3236 STATIC_ASSERT(kSmiTag == 0);
3237 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3238 offset += kPointerSize / 2;
3242 BuildFastArrayOperand(instr->elements(),
3246 instr->additional_index()),
3249 // Check for the hole value.
3250 if (requires_hole_check) {
3251 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3252 Condition smi = __ CheckSmi(result);
3253 DeoptimizeIf(NegateCondition(smi), instr->environment());
3255 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3256 DeoptimizeIf(equal, instr->environment());
3262 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3263 if (instr->is_typed_elements()) {
3264 DoLoadKeyedExternalArray(instr);
3265 } else if (instr->hydrogen()->representation().IsDouble()) {
3266 DoLoadKeyedFixedDoubleArray(instr);
3268 DoLoadKeyedFixedArray(instr);
3273 Operand LCodeGen::BuildFastArrayOperand(
3274 LOperand* elements_pointer,
3276 ElementsKind elements_kind,
3278 uint32_t additional_index) {
3279 Register elements_pointer_reg = ToRegister(elements_pointer);
3280 int shift_size = ElementsKindToShiftSize(elements_kind);
3281 if (key->IsConstantOperand()) {
3282 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3283 if (constant_value & 0xF0000000) {
3284 Abort(kArrayIndexConstantValueTooBig);
3287 return Operand(elements_pointer_reg,
3288 ((constant_value + additional_index) << shift_size)
3291 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3292 // Make sure the key is pre-scaled against maximal_scale_factor.
3293 shift_size = static_cast<int>(maximal_scale_factor);
3295 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3296 return Operand(elements_pointer_reg,
3299 offset + (additional_index << shift_size));
3304 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3305 ASSERT(ToRegister(instr->context()).is(rsi));
3306 ASSERT(ToRegister(instr->object()).is(rdx));
3307 ASSERT(ToRegister(instr->key()).is(rax));
3309 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3310 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3314 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3315 Register result = ToRegister(instr->result());
3317 if (instr->hydrogen()->from_inlined()) {
3318 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3320 // Check for arguments adapter frame.
3321 Label done, adapted;
3322 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3323 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3324 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3325 __ j(equal, &adapted, Label::kNear);
3327 // No arguments adaptor frame.
3328 __ movp(result, rbp);
3329 __ jmp(&done, Label::kNear);
3331 // Arguments adaptor frame present.
3333 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3335 // Result is the frame pointer for the frame if not adapted and for the real
3336 // frame below the adaptor frame if adapted.
3342 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3343 Register result = ToRegister(instr->result());
3347 // If no arguments adaptor frame the number of arguments is fixed.
3348 if (instr->elements()->IsRegister()) {
3349 __ cmpp(rbp, ToRegister(instr->elements()));
3351 __ cmpp(rbp, ToOperand(instr->elements()));
3353 __ movl(result, Immediate(scope()->num_parameters()));
3354 __ j(equal, &done, Label::kNear);
3356 // Arguments adaptor frame present. Get argument length from there.
3357 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3358 __ SmiToInteger32(result,
3360 ArgumentsAdaptorFrameConstants::kLengthOffset));
3362 // Argument length is in result register.
3367 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3368 Register receiver = ToRegister(instr->receiver());
3369 Register function = ToRegister(instr->function());
3371 // If the receiver is null or undefined, we have to pass the global
3372 // object as a receiver to normal functions. Values have to be
3373 // passed unchanged to builtins and strict-mode functions.
3374 Label global_object, receiver_ok;
3375 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3377 if (!instr->hydrogen()->known_function()) {
3378 // Do not transform the receiver to object for strict mode
3380 __ movp(kScratchRegister,
3381 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3382 __ testb(FieldOperand(kScratchRegister,
3383 SharedFunctionInfo::kStrictModeByteOffset),
3384 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3385 __ j(not_equal, &receiver_ok, dist);
3387 // Do not transform the receiver to object for builtins.
3388 __ testb(FieldOperand(kScratchRegister,
3389 SharedFunctionInfo::kNativeByteOffset),
3390 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3391 __ j(not_equal, &receiver_ok, dist);
3394 // Normal function. Replace undefined or null with global receiver.
3395 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3396 __ j(equal, &global_object, Label::kNear);
3397 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3398 __ j(equal, &global_object, Label::kNear);
3400 // The receiver should be a JS object.
3401 Condition is_smi = __ CheckSmi(receiver);
3402 DeoptimizeIf(is_smi, instr->environment());
3403 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3404 DeoptimizeIf(below, instr->environment());
3406 __ jmp(&receiver_ok, Label::kNear);
3407 __ bind(&global_object);
3408 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3411 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3413 FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
3415 __ bind(&receiver_ok);
3419 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3420 Register receiver = ToRegister(instr->receiver());
3421 Register function = ToRegister(instr->function());
3422 Register length = ToRegister(instr->length());
3423 Register elements = ToRegister(instr->elements());
3424 ASSERT(receiver.is(rax)); // Used for parameter count.
3425 ASSERT(function.is(rdi)); // Required by InvokeFunction.
3426 ASSERT(ToRegister(instr->result()).is(rax));
3428 // Copy the arguments to this function possibly from the
3429 // adaptor frame below it.
3430 const uint32_t kArgumentsLimit = 1 * KB;
3431 __ cmpp(length, Immediate(kArgumentsLimit));
3432 DeoptimizeIf(above, instr->environment());
3435 __ movp(receiver, length);
3437 // Loop through the arguments pushing them onto the execution
3440 // length is a small non-negative integer, due to the test above.
3441 __ testl(length, length);
3442 __ j(zero, &invoke, Label::kNear);
3444 StackArgumentsAccessor args(elements, length,
3445 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3446 __ Push(args.GetArgumentOperand(0));
3448 __ j(not_zero, &loop);
3450 // Invoke the function.
3452 ASSERT(instr->HasPointerMap());
3453 LPointerMap* pointers = instr->pointer_map();
3454 SafepointGenerator safepoint_generator(
3455 this, pointers, Safepoint::kLazyDeopt);
3456 ParameterCount actual(rax);
3457 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3461 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3462 LOperand* argument = instr->value();
3463 EmitPushTaggedOperand(argument);
3467 void LCodeGen::DoDrop(LDrop* instr) {
3468 __ Drop(instr->count());
3472 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3473 Register result = ToRegister(instr->result());
3474 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3478 void LCodeGen::DoContext(LContext* instr) {
3479 Register result = ToRegister(instr->result());
3480 if (info()->IsOptimizing()) {
3481 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3483 // If there is no frame, the context must be in rsi.
3484 ASSERT(result.is(rsi));
3489 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3490 ASSERT(ToRegister(instr->context()).is(rsi));
3491 __ Push(rsi); // The context is the first argument.
3492 __ Push(instr->hydrogen()->pairs());
3493 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3494 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3498 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3499 int formal_parameter_count,
3501 LInstruction* instr,
3502 RDIState rdi_state) {
3503 bool dont_adapt_arguments =
3504 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3505 bool can_invoke_directly =
3506 dont_adapt_arguments || formal_parameter_count == arity;
3508 LPointerMap* pointers = instr->pointer_map();
3510 if (can_invoke_directly) {
3511 if (rdi_state == RDI_UNINITIALIZED) {
3512 __ Move(rdi, function);
3516 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3518 // Set rax to arguments count if adaption is not needed. Assumes that rax
3519 // is available to write to at this point.
3520 if (dont_adapt_arguments) {
3525 if (function.is_identical_to(info()->closure())) {
3528 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3531 // Set up deoptimization.
3532 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3534 // We need to adapt arguments.
3535 SafepointGenerator generator(
3536 this, pointers, Safepoint::kLazyDeopt);
3537 ParameterCount count(arity);
3538 ParameterCount expected(formal_parameter_count);
3539 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3544 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3545 ASSERT(ToRegister(instr->result()).is(rax));
3547 LPointerMap* pointers = instr->pointer_map();
3548 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3550 if (instr->target()->IsConstantOperand()) {
3551 LConstantOperand* target = LConstantOperand::cast(instr->target());
3552 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3553 generator.BeforeCall(__ CallSize(code));
3554 __ call(code, RelocInfo::CODE_TARGET);
3556 ASSERT(instr->target()->IsRegister());
3557 Register target = ToRegister(instr->target());
3558 generator.BeforeCall(__ CallSize(target));
3559 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3562 generator.AfterCall();
3566 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3567 ASSERT(ToRegister(instr->function()).is(rdi));
3568 ASSERT(ToRegister(instr->result()).is(rax));
3570 if (instr->hydrogen()->pass_argument_count()) {
3571 __ Set(rax, instr->arity());
3575 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3577 LPointerMap* pointers = instr->pointer_map();
3578 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3580 bool is_self_call = false;
3581 if (instr->hydrogen()->function()->IsConstant()) {
3582 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3583 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3584 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3585 is_self_call = jsfun.is_identical_to(info()->closure());
3591 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3592 generator.BeforeCall(__ CallSize(target));
3595 generator.AfterCall();
3599 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3600 Register input_reg = ToRegister(instr->value());
3601 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3602 Heap::kHeapNumberMapRootIndex);
3603 DeoptimizeIf(not_equal, instr->environment());
3605 Label slow, allocated, done;
3606 Register tmp = input_reg.is(rax) ? rcx : rax;
3607 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3609 // Preserve the value of all registers.
3610 PushSafepointRegistersScope scope(this);
3612 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3613 // Check the sign of the argument. If the argument is positive, just
3614 // return it. We do not need to patch the stack since |input| and
3615 // |result| are the same register and |input| will be restored
3616 // unchanged by popping safepoint registers.
3617 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3620 __ AllocateHeapNumber(tmp, tmp2, &slow);
3621 __ jmp(&allocated, Label::kNear);
3623 // Slow case: Call the runtime system to do the number allocation.
3625 CallRuntimeFromDeferred(
3626 Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3627 // Set the pointer to the new heap number in tmp.
3628 if (!tmp.is(rax)) __ movp(tmp, rax);
3629 // Restore input_reg after call to runtime.
3630 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3632 __ bind(&allocated);
3633 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3634 __ shlq(tmp2, Immediate(1));
3635 __ shrq(tmp2, Immediate(1));
3636 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3637 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3643 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3644 Register input_reg = ToRegister(instr->value());
3645 __ testl(input_reg, input_reg);
3647 __ j(not_sign, &is_positive, Label::kNear);
3648 __ negl(input_reg); // Sets flags.
3649 DeoptimizeIf(negative, instr->environment());
3650 __ bind(&is_positive);
3654 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3655 Register input_reg = ToRegister(instr->value());
3656 __ testp(input_reg, input_reg);
3658 __ j(not_sign, &is_positive, Label::kNear);
3659 __ negp(input_reg); // Sets flags.
3660 DeoptimizeIf(negative, instr->environment());
3661 __ bind(&is_positive);
3665 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3666 // Class for deferred case.
3667 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3669 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3670 : LDeferredCode(codegen), instr_(instr) { }
3671 virtual void Generate() V8_OVERRIDE {
3672 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3674 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3679 ASSERT(instr->value()->Equals(instr->result()));
3680 Representation r = instr->hydrogen()->value()->representation();
3683 XMMRegister scratch = double_scratch0();
3684 XMMRegister input_reg = ToDoubleRegister(instr->value());
3685 __ xorps(scratch, scratch);
3686 __ subsd(scratch, input_reg);
3687 __ andps(input_reg, scratch);
3688 } else if (r.IsInteger32()) {
3689 EmitIntegerMathAbs(instr);
3690 } else if (r.IsSmi()) {
3691 EmitSmiMathAbs(instr);
3692 } else { // Tagged case.
3693 DeferredMathAbsTaggedHeapNumber* deferred =
3694 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3695 Register input_reg = ToRegister(instr->value());
3697 __ JumpIfNotSmi(input_reg, deferred->entry());
3698 EmitSmiMathAbs(instr);
3699 __ bind(deferred->exit());
3704 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3705 XMMRegister xmm_scratch = double_scratch0();
3706 Register output_reg = ToRegister(instr->result());
3707 XMMRegister input_reg = ToDoubleRegister(instr->value());
3709 if (CpuFeatures::IsSupported(SSE4_1)) {
3710 CpuFeatureScope scope(masm(), SSE4_1);
3711 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3712 // Deoptimize if minus zero.
3713 __ movq(output_reg, input_reg);
3714 __ subq(output_reg, Immediate(1));
3715 DeoptimizeIf(overflow, instr->environment());
3717 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3718 __ cvttsd2si(output_reg, xmm_scratch);
3719 __ cmpl(output_reg, Immediate(0x1));
3720 DeoptimizeIf(overflow, instr->environment());
3722 Label negative_sign, done;
3723 // Deoptimize on unordered.
3724 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3725 __ ucomisd(input_reg, xmm_scratch);
3726 DeoptimizeIf(parity_even, instr->environment());
3727 __ j(below, &negative_sign, Label::kNear);
3729 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3730 // Check for negative zero.
3731 Label positive_sign;
3732 __ j(above, &positive_sign, Label::kNear);
3733 __ movmskpd(output_reg, input_reg);
3734 __ testq(output_reg, Immediate(1));
3735 DeoptimizeIf(not_zero, instr->environment());
3736 __ Set(output_reg, 0);
3737 __ jmp(&done, Label::kNear);
3738 __ bind(&positive_sign);
3741 // Use truncating instruction (OK because input is positive).
3742 __ cvttsd2si(output_reg, input_reg);
3743 // Overflow is signalled with minint.
3744 __ cmpl(output_reg, Immediate(0x1));
3745 DeoptimizeIf(overflow, instr->environment());
3746 __ jmp(&done, Label::kNear);
3748 // Non-zero negative reaches here.
3749 __ bind(&negative_sign);
3750 // Truncate, then compare and compensate.
3751 __ cvttsd2si(output_reg, input_reg);
3752 __ Cvtlsi2sd(xmm_scratch, output_reg);
3753 __ ucomisd(input_reg, xmm_scratch);
3754 __ j(equal, &done, Label::kNear);
3755 __ subl(output_reg, Immediate(1));
3756 DeoptimizeIf(overflow, instr->environment());
3763 void LCodeGen::DoMathRound(LMathRound* instr) {
3764 const XMMRegister xmm_scratch = double_scratch0();
3765 Register output_reg = ToRegister(instr->result());
3766 XMMRegister input_reg = ToDoubleRegister(instr->value());
3767 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3768 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3769 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3771 Label done, round_to_zero, below_one_half;
3772 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3773 __ movq(kScratchRegister, one_half);
3774 __ movq(xmm_scratch, kScratchRegister);
3775 __ ucomisd(xmm_scratch, input_reg);
3776 __ j(above, &below_one_half, Label::kNear);
3778 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3779 __ addsd(xmm_scratch, input_reg);
3780 __ cvttsd2si(output_reg, xmm_scratch);
3781 // Overflow is signalled with minint.
3782 __ cmpl(output_reg, Immediate(0x1));
3783 __ RecordComment("D2I conversion overflow");
3784 DeoptimizeIf(overflow, instr->environment());
3785 __ jmp(&done, dist);
3787 __ bind(&below_one_half);
3788 __ movq(kScratchRegister, minus_one_half);
3789 __ movq(xmm_scratch, kScratchRegister);
3790 __ ucomisd(xmm_scratch, input_reg);
3791 __ j(below_equal, &round_to_zero, Label::kNear);
3793 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3794 // compare and compensate.
3795 __ movq(input_temp, input_reg); // Do not alter input_reg.
3796 __ subsd(input_temp, xmm_scratch);
3797 __ cvttsd2si(output_reg, input_temp);
3798 // Catch minint due to overflow, and to prevent overflow when compensating.
3799 __ cmpl(output_reg, Immediate(0x1));
3800 __ RecordComment("D2I conversion overflow");
3801 DeoptimizeIf(overflow, instr->environment());
3803 __ Cvtlsi2sd(xmm_scratch, output_reg);
3804 __ ucomisd(xmm_scratch, input_temp);
3805 __ j(equal, &done, dist);
3806 __ subl(output_reg, Immediate(1));
3807 // No overflow because we already ruled out minint.
3808 __ jmp(&done, dist);
3810 __ bind(&round_to_zero);
3811 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3812 // we can ignore the difference between a result of -0 and +0.
3813 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3814 __ movq(output_reg, input_reg);
3815 __ testq(output_reg, output_reg);
3816 __ RecordComment("Minus zero");
3817 DeoptimizeIf(negative, instr->environment());
3819 __ Set(output_reg, 0);
3824 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3825 XMMRegister input_reg = ToDoubleRegister(instr->value());
3826 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3827 __ sqrtsd(input_reg, input_reg);
3831 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3832 XMMRegister xmm_scratch = double_scratch0();
3833 XMMRegister input_reg = ToDoubleRegister(instr->value());
3834 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3836 // Note that according to ECMA-262 15.8.2.13:
3837 // Math.pow(-Infinity, 0.5) == Infinity
3838 // Math.sqrt(-Infinity) == NaN
3840 // Check base for -Infinity. According to IEEE-754, double-precision
3841 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3842 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3843 __ movq(xmm_scratch, kScratchRegister);
3844 __ ucomisd(xmm_scratch, input_reg);
3845 // Comparing -Infinity with NaN results in "unordered", which sets the
3846 // zero flag as if both were equal. However, it also sets the carry flag.
3847 __ j(not_equal, &sqrt, Label::kNear);
3848 __ j(carry, &sqrt, Label::kNear);
3849 // If input is -Infinity, return Infinity.
3850 __ xorps(input_reg, input_reg);
3851 __ subsd(input_reg, xmm_scratch);
3852 __ jmp(&done, Label::kNear);
3856 __ xorps(xmm_scratch, xmm_scratch);
3857 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3858 __ sqrtsd(input_reg, input_reg);
3863 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
3864 switch (instr->op()) {
3865 case kFloat32x4Zero: {
3866 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3867 __ xorps(result_reg, result_reg);
3870 case kFloat64x2Zero: {
3871 XMMRegister result_reg = ToFloat64x2Register(instr->result());
3872 __ xorpd(result_reg, result_reg);
3875 case kInt32x4Zero: {
3876 XMMRegister result_reg = ToInt32x4Register(instr->result());
3877 __ xorps(result_reg, result_reg);
3887 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
3889 switch (instr->op()) {
3890 case kSIMD128Change: {
3891 Comment(";;; deoptimize: can not perform representation change"
3892 "for float32x4 or int32x4");
3893 DeoptimizeIf(no_condition, instr->environment());
3898 case kFloat32x4Reciprocal:
3899 case kFloat32x4ReciprocalSqrt:
3900 case kFloat32x4Sqrt: {
3901 ASSERT(instr->value()->Equals(instr->result()));
3902 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3903 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3904 switch (instr->op()) {
3906 __ absps(input_reg);
3909 __ negateps(input_reg);
3911 case kFloat32x4Reciprocal:
3912 __ rcpps(input_reg, input_reg);
3914 case kFloat32x4ReciprocalSqrt:
3915 __ rsqrtps(input_reg, input_reg);
3917 case kFloat32x4Sqrt:
3918 __ sqrtps(input_reg, input_reg);
3928 case kFloat64x2Sqrt: {
3929 ASSERT(instr->value()->Equals(instr->result()));
3930 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
3931 XMMRegister input_reg = ToFloat64x2Register(instr->value());
3932 switch (instr->op()) {
3934 __ abspd(input_reg);
3937 __ negatepd(input_reg);
3939 case kFloat64x2Sqrt:
3940 __ sqrtpd(input_reg, input_reg);
3950 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3951 XMMRegister input_reg = ToInt32x4Register(instr->value());
3952 switch (instr->op()) {
3954 __ notps(input_reg);
3957 __ pnegd(input_reg);
3965 case kFloat32x4BitsToInt32x4:
3966 case kFloat32x4ToInt32x4: {
3967 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3968 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3969 XMMRegister result_reg = ToInt32x4Register(instr->result());
3970 if (instr->op() == kFloat32x4BitsToInt32x4) {
3971 if (!result_reg.is(input_reg)) {
3972 __ movaps(result_reg, input_reg);
3975 ASSERT(instr->op() == kFloat32x4ToInt32x4);
3976 __ cvtps2dq(result_reg, input_reg);
3980 case kInt32x4BitsToFloat32x4:
3981 case kInt32x4ToFloat32x4: {
3982 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3983 XMMRegister input_reg = ToInt32x4Register(instr->value());
3984 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3985 if (instr->op() == kInt32x4BitsToFloat32x4) {
3986 if (!result_reg.is(input_reg)) {
3987 __ movaps(result_reg, input_reg);
3990 ASSERT(instr->op() == kInt32x4ToFloat32x4);
3991 __ cvtdq2ps(result_reg, input_reg);
3995 case kFloat32x4Splat: {
3996 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
3997 XMMRegister input_reg = ToDoubleRegister(instr->value());
3998 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3999 XMMRegister xmm_scratch = xmm0;
4000 __ xorps(xmm_scratch, xmm_scratch);
4001 __ cvtsd2ss(xmm_scratch, input_reg);
4002 __ shufps(xmm_scratch, xmm_scratch, 0x0);
4003 __ movaps(result_reg, xmm_scratch);
4006 case kInt32x4Splat: {
4007 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4008 Register input_reg = ToRegister(instr->value());
4009 XMMRegister result_reg = ToInt32x4Register(instr->result());
4010 __ movd(result_reg, input_reg);
4011 __ shufps(result_reg, result_reg, 0x0);
4014 case kInt32x4GetSignMask: {
4015 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4016 XMMRegister input_reg = ToInt32x4Register(instr->value());
4017 Register result = ToRegister(instr->result());
4018 __ movmskps(result, input_reg);
4021 case kFloat32x4GetSignMask: {
4022 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
4023 XMMRegister input_reg = ToFloat32x4Register(instr->value());
4024 Register result = ToRegister(instr->result());
4025 __ movmskps(result, input_reg);
4028 case kFloat32x4GetW:
4030 case kFloat32x4GetZ:
4032 case kFloat32x4GetY:
4034 case kFloat32x4GetX: {
4035 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
4036 XMMRegister input_reg = ToFloat32x4Register(instr->value());
4037 XMMRegister result = ToDoubleRegister(instr->result());
4038 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
4040 if (select == 0x0) {
4041 __ xorps(xmm_scratch, xmm_scratch);
4042 __ cvtss2sd(xmm_scratch, input_reg);
4043 if (!xmm_scratch.is(result)) {
4044 __ movaps(result, xmm_scratch);
4047 __ pshufd(xmm_scratch, input_reg, select);
4048 if (!xmm_scratch.is(result)) {
4049 __ xorps(result, result);
4051 __ cvtss2sd(result, xmm_scratch);
4055 case kFloat64x2GetSignMask: {
4056 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4057 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4058 Register result = ToRegister(instr->result());
4059 __ movmskpd(result, input_reg);
4062 case kFloat64x2GetX: {
4063 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4064 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4065 XMMRegister result = ToDoubleRegister(instr->result());
4067 if (!input_reg.is(result)) {
4068 __ movaps(result, input_reg);
4072 case kFloat64x2GetY: {
4073 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4074 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4075 XMMRegister result = ToDoubleRegister(instr->result());
4077 if (!input_reg.is(result)) {
4078 __ movaps(result, input_reg);
4080 __ shufpd(result, input_reg, 0x1);
4087 case kInt32x4GetFlagX:
4088 case kInt32x4GetFlagY:
4089 case kInt32x4GetFlagZ:
4090 case kInt32x4GetFlagW: {
4091 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4093 switch (instr->op()) {
4094 case kInt32x4GetFlagX:
4098 case kInt32x4GetFlagY:
4103 case kInt32x4GetFlagZ:
4108 case kInt32x4GetFlagW:
4117 XMMRegister input_reg = ToInt32x4Register(instr->value());
4118 Register result = ToRegister(instr->result());
4119 if (select == 0x0) {
4120 __ movd(result, input_reg);
4122 if (CpuFeatures::IsSupported(SSE4_1)) {
4123 CpuFeatureScope scope(masm(), SSE4_1);
4124 __ extractps(result, input_reg, select);
4126 XMMRegister xmm_scratch = xmm0;
4127 __ pshufd(xmm_scratch, input_reg, select);
4128 __ movd(result, xmm_scratch);
4133 Label false_value, done;
4134 __ testl(result, result);
4135 __ j(zero, &false_value, Label::kNear);
4136 __ LoadRoot(result, Heap::kTrueValueRootIndex);
4137 __ jmp(&done, Label::kNear);
4138 __ bind(&false_value);
4139 __ LoadRoot(result, Heap::kFalseValueRootIndex);
4151 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
4152 uint8_t imm8 = 0; // for with operation
4153 switch (instr->op()) {
4159 case kFloat32x4Max: {
4160 ASSERT(instr->left()->Equals(instr->result()));
4161 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4162 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4163 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4164 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4165 switch (instr->op()) {
4167 __ addps(left_reg, right_reg);
4170 __ subps(left_reg, right_reg);
4173 __ mulps(left_reg, right_reg);
4176 __ divps(left_reg, right_reg);
4179 __ minps(left_reg, right_reg);
4182 __ maxps(left_reg, right_reg);
4190 case kFloat32x4Scale: {
4191 ASSERT(instr->left()->Equals(instr->result()));
4192 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4193 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4194 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4195 XMMRegister right_reg = ToDoubleRegister(instr->right());
4196 XMMRegister scratch_reg = xmm0;
4197 __ xorps(scratch_reg, scratch_reg);
4198 __ cvtsd2ss(scratch_reg, right_reg);
4199 __ shufps(scratch_reg, scratch_reg, 0x0);
4200 __ mulps(left_reg, scratch_reg);
4208 case kFloat64x2Max: {
4209 ASSERT(instr->left()->Equals(instr->result()));
4210 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4211 ASSERT(instr->hydrogen()->right()->representation().IsFloat64x2());
4212 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4213 XMMRegister right_reg = ToFloat64x2Register(instr->right());
4214 switch (instr->op()) {
4216 __ addpd(left_reg, right_reg);
4219 __ subpd(left_reg, right_reg);
4222 __ mulpd(left_reg, right_reg);
4225 __ divpd(left_reg, right_reg);
4228 __ minpd(left_reg, right_reg);
4231 __ maxpd(left_reg, right_reg);
4239 case kFloat64x2Scale: {
4240 ASSERT(instr->left()->Equals(instr->result()));
4241 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4242 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4243 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4244 XMMRegister right_reg = ToDoubleRegister(instr->right());
4245 __ shufpd(right_reg, right_reg, 0x0);
4246 __ mulpd(left_reg, right_reg);
4249 case kFloat32x4Shuffle: {
4250 ASSERT(instr->left()->Equals(instr->result()));
4251 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4252 if (instr->hydrogen()->right()->IsConstant() &&
4253 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4254 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4255 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4256 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4257 __ shufps(left_reg, left_reg, select);
4260 Comment(";;; deoptimize: non-constant selector for shuffle");
4261 DeoptimizeIf(no_condition, instr->environment());
4265 case kInt32x4Shuffle: {
4266 ASSERT(instr->left()->Equals(instr->result()));
4267 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4268 if (instr->hydrogen()->right()->IsConstant() &&
4269 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4270 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4271 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4272 XMMRegister left_reg = ToInt32x4Register(instr->left());
4273 __ pshufd(left_reg, left_reg, select);
4276 Comment(";;; deoptimize: non-constant selector for shuffle");
4277 DeoptimizeIf(no_condition, instr->environment());
4281 case kInt32x4ShiftLeft:
4282 case kInt32x4ShiftRight:
4283 case kInt32x4ShiftRightArithmetic: {
4284 ASSERT(instr->left()->Equals(instr->result()));
4285 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4286 if (instr->hydrogen()->right()->IsConstant() &&
4287 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4288 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4289 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
4290 XMMRegister left_reg = ToInt32x4Register(instr->left());
4291 switch (instr->op()) {
4292 case kInt32x4ShiftLeft:
4293 __ pslld(left_reg, shift);
4295 case kInt32x4ShiftRight:
4296 __ psrld(left_reg, shift);
4298 case kInt32x4ShiftRightArithmetic:
4299 __ psrad(left_reg, shift);
4306 XMMRegister left_reg = ToInt32x4Register(instr->left());
4307 Register shift = ToRegister(instr->right());
4308 XMMRegister xmm_scratch = double_scratch0();
4309 __ movd(xmm_scratch, shift);
4310 switch (instr->op()) {
4311 case kInt32x4ShiftLeft:
4312 __ pslld(left_reg, xmm_scratch);
4314 case kInt32x4ShiftRight:
4315 __ psrld(left_reg, xmm_scratch);
4317 case kInt32x4ShiftRightArithmetic:
4318 __ psrad(left_reg, xmm_scratch);
4326 case kFloat32x4LessThan:
4327 case kFloat32x4LessThanOrEqual:
4328 case kFloat32x4Equal:
4329 case kFloat32x4NotEqual:
4330 case kFloat32x4GreaterThanOrEqual:
4331 case kFloat32x4GreaterThan: {
4332 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4333 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4334 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4335 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4336 XMMRegister result_reg = ToInt32x4Register(instr->result());
4337 switch (instr->op()) {
4338 case kFloat32x4LessThan:
4339 if (result_reg.is(left_reg)) {
4340 __ cmpltps(result_reg, right_reg);
4341 } else if (result_reg.is(right_reg)) {
4342 __ cmpnltps(result_reg, left_reg);
4344 __ movaps(result_reg, left_reg);
4345 __ cmpltps(result_reg, right_reg);
4348 case kFloat32x4LessThanOrEqual:
4349 if (result_reg.is(left_reg)) {
4350 __ cmpleps(result_reg, right_reg);
4351 } else if (result_reg.is(right_reg)) {
4352 __ cmpnleps(result_reg, left_reg);
4354 __ movaps(result_reg, left_reg);
4355 __ cmpleps(result_reg, right_reg);
4358 case kFloat32x4Equal:
4359 if (result_reg.is(left_reg)) {
4360 __ cmpeqps(result_reg, right_reg);
4361 } else if (result_reg.is(right_reg)) {
4362 __ cmpeqps(result_reg, left_reg);
4364 __ movaps(result_reg, left_reg);
4365 __ cmpeqps(result_reg, right_reg);
4368 case kFloat32x4NotEqual:
4369 if (result_reg.is(left_reg)) {
4370 __ cmpneqps(result_reg, right_reg);
4371 } else if (result_reg.is(right_reg)) {
4372 __ cmpneqps(result_reg, left_reg);
4374 __ movaps(result_reg, left_reg);
4375 __ cmpneqps(result_reg, right_reg);
4378 case kFloat32x4GreaterThanOrEqual:
4379 if (result_reg.is(left_reg)) {
4380 __ cmpnltps(result_reg, right_reg);
4381 } else if (result_reg.is(right_reg)) {
4382 __ cmpltps(result_reg, left_reg);
4384 __ movaps(result_reg, left_reg);
4385 __ cmpnltps(result_reg, right_reg);
4388 case kFloat32x4GreaterThan:
4389 if (result_reg.is(left_reg)) {
4390 __ cmpnleps(result_reg, right_reg);
4391 } else if (result_reg.is(right_reg)) {
4392 __ cmpleps(result_reg, left_reg);
4394 __ movaps(result_reg, left_reg);
4395 __ cmpnleps(result_reg, right_reg);
4410 case kInt32x4GreaterThan:
4412 case kInt32x4LessThan: {
4413 ASSERT(instr->left()->Equals(instr->result()));
4414 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4415 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
4416 XMMRegister left_reg = ToInt32x4Register(instr->left());
4417 XMMRegister right_reg = ToInt32x4Register(instr->right());
4418 switch (instr->op()) {
4420 __ andps(left_reg, right_reg);
4423 __ orps(left_reg, right_reg);
4426 __ xorps(left_reg, right_reg);
4429 __ paddd(left_reg, right_reg);
4432 __ psubd(left_reg, right_reg);
4435 if (CpuFeatures::IsSupported(SSE4_1)) {
4436 CpuFeatureScope scope(masm(), SSE4_1);
4437 __ pmulld(left_reg, right_reg);
4439 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
4440 XMMRegister xmm_scratch = xmm0;
4441 __ movaps(xmm_scratch, left_reg);
4442 __ pmuludq(left_reg, right_reg);
4443 __ psrldq(xmm_scratch, 4);
4444 __ psrldq(right_reg, 4);
4445 __ pmuludq(xmm_scratch, right_reg);
4446 __ pshufd(left_reg, left_reg, 8);
4447 __ pshufd(xmm_scratch, xmm_scratch, 8);
4448 __ punpackldq(left_reg, xmm_scratch);
4451 case kInt32x4GreaterThan:
4452 __ pcmpgtd(left_reg, right_reg);
4455 __ pcmpeqd(left_reg, right_reg);
4457 case kInt32x4LessThan: {
4458 XMMRegister xmm_scratch = xmm0;
4459 __ movaps(xmm_scratch, right_reg);
4460 __ pcmpgtd(xmm_scratch, left_reg);
4461 __ movaps(left_reg, xmm_scratch);
4470 case kFloat32x4WithW:
4472 case kFloat32x4WithZ:
4474 case kFloat32x4WithY:
4476 case kFloat32x4WithX: {
4477 ASSERT(instr->left()->Equals(instr->result()));
4478 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4479 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4480 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4481 XMMRegister right_reg = ToDoubleRegister(instr->right());
4482 XMMRegister xmm_scratch = xmm0;
4483 __ xorps(xmm_scratch, xmm_scratch);
4484 __ cvtsd2ss(xmm_scratch, right_reg);
4485 if (CpuFeatures::IsSupported(SSE4_1)) {
4487 CpuFeatureScope scope(masm(), SSE4_1);
4488 __ insertps(left_reg, xmm_scratch, imm8);
4490 __ subq(rsp, Immediate(kFloat32x4Size));
4491 __ movups(Operand(rsp, 0), left_reg);
4492 __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
4493 __ movups(left_reg, Operand(rsp, 0));
4494 __ addq(rsp, Immediate(kFloat32x4Size));
4498 case kFloat64x2WithX: {
4499 ASSERT(instr->left()->Equals(instr->result()));
4500 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4501 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4502 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4503 XMMRegister right_reg = ToDoubleRegister(instr->right());
4504 __ subq(rsp, Immediate(kFloat64x2Size));
4505 __ movups(Operand(rsp, 0), left_reg);
4506 __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
4507 __ movups(left_reg, Operand(rsp, 0));
4508 __ addq(rsp, Immediate(kFloat64x2Size));
4511 case kFloat64x2WithY: {
4512 ASSERT(instr->left()->Equals(instr->result()));
4513 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4514 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4515 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4516 XMMRegister right_reg = ToDoubleRegister(instr->right());
4517 __ subq(rsp, Immediate(kFloat64x2Size));
4518 __ movups(Operand(rsp, 0), left_reg);
4519 __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
4520 __ movups(left_reg, Operand(rsp, 0));
4521 __ addq(rsp, Immediate(kFloat64x2Size));
4524 case kFloat64x2Constructor: {
4525 ASSERT(instr->hydrogen()->left()->representation().IsDouble());
4526 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4527 XMMRegister left_reg = ToDoubleRegister(instr->left());
4528 XMMRegister right_reg = ToDoubleRegister(instr->right());
4529 XMMRegister result_reg = ToFloat64x2Register(instr->result());
4530 __ subq(rsp, Immediate(kFloat64x2Size));
4531 __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
4532 __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
4533 __ movups(result_reg, Operand(rsp, 0));
4534 __ addq(rsp, Immediate(kFloat64x2Size));
4543 case kInt32x4WithX: {
4544 ASSERT(instr->left()->Equals(instr->result()));
4545 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4546 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
4547 XMMRegister left_reg = ToInt32x4Register(instr->left());
4548 Register right_reg = ToRegister(instr->right());
4549 if (CpuFeatures::IsSupported(SSE4_1)) {
4550 CpuFeatureScope scope(masm(), SSE4_1);
4551 __ pinsrd(left_reg, right_reg, imm8);
4553 __ subq(rsp, Immediate(kInt32x4Size));
4554 __ movdqu(Operand(rsp, 0), left_reg);
4555 __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
4556 __ movdqu(left_reg, Operand(rsp, 0));
4557 __ addq(rsp, Immediate(kInt32x4Size));
4561 case kInt32x4WithFlagW:
4563 case kInt32x4WithFlagZ:
4565 case kInt32x4WithFlagY:
4567 case kInt32x4WithFlagX: {
4568 ASSERT(instr->left()->Equals(instr->result()));
4569 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4570 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
4571 HType type = instr->hydrogen()->right()->type();
4572 XMMRegister left_reg = ToInt32x4Register(instr->left());
4573 Register right_reg = ToRegister(instr->right());
4574 Label load_false_value, done;
4575 if (type.IsBoolean()) {
4576 __ subq(rsp, Immediate(kInt32x4Size));
4577 __ movups(Operand(rsp, 0), left_reg);
4578 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
4579 __ j(not_equal, &load_false_value, Label::kNear);
4581 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
4582 DeoptimizeIf(no_condition, instr->environment());
4586 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
4587 __ jmp(&done, Label::kNear);
4588 __ bind(&load_false_value);
4589 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
4591 __ movups(left_reg, Operand(rsp, 0));
4592 __ addq(rsp, Immediate(kInt32x4Size));
4602 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
4603 switch (instr->op()) {
4604 case kInt32x4Select: {
4605 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
4606 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4607 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4609 XMMRegister mask_reg = ToInt32x4Register(instr->first());
4610 XMMRegister left_reg = ToFloat32x4Register(instr->second());
4611 XMMRegister right_reg = ToFloat32x4Register(instr->third());
4612 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4613 XMMRegister temp_reg = xmm0;
4616 __ movaps(temp_reg, mask_reg);
4619 // temp_reg = temp_reg & falseValue.
4620 __ andps(temp_reg, right_reg);
4622 if (!result_reg.is(mask_reg)) {
4623 if (result_reg.is(left_reg)) {
4624 // result_reg = result_reg & trueValue.
4625 __ andps(result_reg, mask_reg);
4626 // out = result_reg | temp_reg.
4627 __ orps(result_reg, temp_reg);
4629 __ movaps(result_reg, mask_reg);
4630 // result_reg = result_reg & trueValue.
4631 __ andps(result_reg, left_reg);
4632 // out = result_reg | temp_reg.
4633 __ orps(result_reg, temp_reg);
4636 // result_reg = result_reg & trueValue.
4637 __ andps(result_reg, left_reg);
4638 // out = result_reg | temp_reg.
4639 __ orps(result_reg, temp_reg);
4643 case kFloat32x4ShuffleMix: {
4644 ASSERT(instr->first()->Equals(instr->result()));
4645 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4646 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4647 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
4648 if (instr->hydrogen()->third()->IsConstant() &&
4649 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
4650 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
4651 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4652 XMMRegister first_reg = ToFloat32x4Register(instr->first());
4653 XMMRegister second_reg = ToFloat32x4Register(instr->second());
4654 __ shufps(first_reg, second_reg, select);
4657 Comment(";;; deoptimize: non-constant selector for shuffle");
4658 DeoptimizeIf(no_condition, instr->environment());
4662 case kFloat32x4Clamp: {
4663 ASSERT(instr->first()->Equals(instr->result()));
4664 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4665 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4666 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4668 XMMRegister value_reg = ToFloat32x4Register(instr->first());
4669 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
4670 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
4671 __ minps(value_reg, upper_reg);
4672 __ maxps(value_reg, lower_reg);
4675 case kFloat64x2Clamp: {
4676 ASSERT(instr->first()->Equals(instr->result()));
4677 ASSERT(instr->hydrogen()->first()->representation().IsFloat64x2());
4678 ASSERT(instr->hydrogen()->second()->representation().IsFloat64x2());
4679 ASSERT(instr->hydrogen()->third()->representation().IsFloat64x2());
4681 XMMRegister value_reg = ToFloat64x2Register(instr->first());
4682 XMMRegister lower_reg = ToFloat64x2Register(instr->second());
4683 XMMRegister upper_reg = ToFloat64x2Register(instr->third());
4684 __ minpd(value_reg, upper_reg);
4685 __ maxpd(value_reg, lower_reg);
4695 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
4696 switch (instr->op()) {
4697 case kFloat32x4Constructor: {
4698 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
4699 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
4700 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
4701 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
4702 XMMRegister x_reg = ToDoubleRegister(instr->x());
4703 XMMRegister y_reg = ToDoubleRegister(instr->y());
4704 XMMRegister z_reg = ToDoubleRegister(instr->z());
4705 XMMRegister w_reg = ToDoubleRegister(instr->w());
4706 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4707 __ subq(rsp, Immediate(kFloat32x4Size));
4708 __ xorps(xmm0, xmm0);
4709 __ cvtsd2ss(xmm0, x_reg);
4710 __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
4711 __ xorps(xmm0, xmm0);
4712 __ cvtsd2ss(xmm0, y_reg);
4713 __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
4714 __ xorps(xmm0, xmm0);
4715 __ cvtsd2ss(xmm0, z_reg);
4716 __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
4717 __ xorps(xmm0, xmm0);
4718 __ cvtsd2ss(xmm0, w_reg);
4719 __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
4720 __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
4721 __ addq(rsp, Immediate(kFloat32x4Size));
4724 case kInt32x4Constructor: {
4725 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
4726 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
4727 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
4728 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
4729 Register x_reg = ToRegister(instr->x());
4730 Register y_reg = ToRegister(instr->y());
4731 Register z_reg = ToRegister(instr->z());
4732 Register w_reg = ToRegister(instr->w());
4733 XMMRegister result_reg = ToInt32x4Register(instr->result());
4734 __ subq(rsp, Immediate(kInt32x4Size));
4735 __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
4736 __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
4737 __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
4738 __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
4739 __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
4740 __ addq(rsp, Immediate(kInt32x4Size));
4743 case kInt32x4Bool: {
4744 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
4745 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
4746 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
4747 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
4748 HType x_type = instr->hydrogen()->x()->type();
4749 HType y_type = instr->hydrogen()->y()->type();
4750 HType z_type = instr->hydrogen()->z()->type();
4751 HType w_type = instr->hydrogen()->w()->type();
4752 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
4753 !z_type.IsBoolean() || !w_type.IsBoolean()) {
4754 Comment(";;; deoptimize: other types for int32x4.bool.");
4755 DeoptimizeIf(no_condition, instr->environment());
4758 XMMRegister result_reg = ToInt32x4Register(instr->result());
4759 Register x_reg = ToRegister(instr->x());
4760 Register y_reg = ToRegister(instr->y());
4761 Register z_reg = ToRegister(instr->z());
4762 Register w_reg = ToRegister(instr->w());
4763 Label load_false_x, done_x, load_false_y, done_y,
4764 load_false_z, done_z, load_false_w, done_w;
4765 __ subq(rsp, Immediate(kInt32x4Size));
4767 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
4768 __ j(not_equal, &load_false_x, Label::kNear);
4769 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
4770 __ jmp(&done_x, Label::kNear);
4771 __ bind(&load_false_x);
4772 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
4775 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
4776 __ j(not_equal, &load_false_y, Label::kNear);
4777 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
4778 __ jmp(&done_y, Label::kNear);
4779 __ bind(&load_false_y);
4780 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
4783 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
4784 __ j(not_equal, &load_false_z, Label::kNear);
4785 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
4786 __ jmp(&done_z, Label::kNear);
4787 __ bind(&load_false_z);
4788 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
4791 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
4792 __ j(not_equal, &load_false_w, Label::kNear);
4793 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
4794 __ jmp(&done_w, Label::kNear);
4795 __ bind(&load_false_w);
4796 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
4799 __ movups(result_reg, Operand(rsp, 0));
4800 __ addq(rsp, Immediate(kInt32x4Size));
4810 void LCodeGen::DoPower(LPower* instr) {
4811 Representation exponent_type = instr->hydrogen()->right()->representation();
4812 // Having marked this as a call, we can use any registers.
4813 // Just make sure that the input/output registers are the expected ones.
4815 Register exponent = rdx;
4816 ASSERT(!instr->right()->IsRegister() ||
4817 ToRegister(instr->right()).is(exponent));
4818 ASSERT(!instr->right()->IsDoubleRegister() ||
4819 ToDoubleRegister(instr->right()).is(xmm1));
4820 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4821 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4823 if (exponent_type.IsSmi()) {
4824 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4826 } else if (exponent_type.IsTagged()) {
4828 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
4829 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
4830 DeoptimizeIf(not_equal, instr->environment());
4832 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4834 } else if (exponent_type.IsInteger32()) {
4835 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4838 ASSERT(exponent_type.IsDouble());
4839 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4845 void LCodeGen::DoMathExp(LMathExp* instr) {
4846 XMMRegister input = ToDoubleRegister(instr->value());
4847 XMMRegister result = ToDoubleRegister(instr->result());
4848 XMMRegister temp0 = double_scratch0();
4849 Register temp1 = ToRegister(instr->temp1());
4850 Register temp2 = ToRegister(instr->temp2());
4852 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4856 void LCodeGen::DoMathLog(LMathLog* instr) {
4857 ASSERT(instr->value()->Equals(instr->result()));
4858 XMMRegister input_reg = ToDoubleRegister(instr->value());
4859 XMMRegister xmm_scratch = double_scratch0();
4860 Label positive, done, zero;
4861 __ xorps(xmm_scratch, xmm_scratch);
4862 __ ucomisd(input_reg, xmm_scratch);
4863 __ j(above, &positive, Label::kNear);
4864 __ j(not_carry, &zero, Label::kNear);
4865 ExternalReference nan =
4866 ExternalReference::address_of_canonical_non_hole_nan();
4867 Operand nan_operand = masm()->ExternalOperand(nan);
4868 __ movsd(input_reg, nan_operand);
4869 __ jmp(&done, Label::kNear);
4871 ExternalReference ninf =
4872 ExternalReference::address_of_negative_infinity();
4873 Operand ninf_operand = masm()->ExternalOperand(ninf);
4874 __ movsd(input_reg, ninf_operand);
4875 __ jmp(&done, Label::kNear);
4878 __ subp(rsp, Immediate(kDoubleSize));
4879 __ movsd(Operand(rsp, 0), input_reg);
4880 __ fld_d(Operand(rsp, 0));
4882 __ fstp_d(Operand(rsp, 0));
4883 __ movsd(input_reg, Operand(rsp, 0));
4884 __ addp(rsp, Immediate(kDoubleSize));
4889 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4890 Register input = ToRegister(instr->value());
4891 Register result = ToRegister(instr->result());
4892 Label not_zero_input;
4893 __ bsrl(result, input);
4895 __ j(not_zero, ¬_zero_input);
4896 __ Set(result, 63); // 63^31 == 32
4898 __ bind(¬_zero_input);
4899 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4903 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4904 ASSERT(ToRegister(instr->context()).is(rsi));
4905 ASSERT(ToRegister(instr->function()).is(rdi));
4906 ASSERT(instr->HasPointerMap());
4908 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4909 if (known_function.is_null()) {
4910 LPointerMap* pointers = instr->pointer_map();
4911 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4912 ParameterCount count(instr->arity());
4913 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4915 CallKnownFunction(known_function,
4916 instr->hydrogen()->formal_parameter_count(),
4919 RDI_CONTAINS_TARGET);
4924 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4925 ASSERT(ToRegister(instr->context()).is(rsi));
4926 ASSERT(ToRegister(instr->function()).is(rdi));
4927 ASSERT(ToRegister(instr->result()).is(rax));
4929 int arity = instr->arity();
4930 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4931 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4935 void LCodeGen::DoCallNew(LCallNew* instr) {
4936 ASSERT(ToRegister(instr->context()).is(rsi));
4937 ASSERT(ToRegister(instr->constructor()).is(rdi));
4938 ASSERT(ToRegister(instr->result()).is(rax));
4940 __ Set(rax, instr->arity());
4941 // No cell in ebx for construct type feedback in optimized code
4942 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4943 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4944 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4948 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4949 ASSERT(ToRegister(instr->context()).is(rsi));
4950 ASSERT(ToRegister(instr->constructor()).is(rdi));
4951 ASSERT(ToRegister(instr->result()).is(rax));
4953 __ Set(rax, instr->arity());
4954 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4955 ElementsKind kind = instr->hydrogen()->elements_kind();
4956 AllocationSiteOverrideMode override_mode =
4957 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4958 ? DISABLE_ALLOCATION_SITES
4961 if (instr->arity() == 0) {
4962 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4963 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4964 } else if (instr->arity() == 1) {
4966 if (IsFastPackedElementsKind(kind)) {
4968 // We might need a change here
4969 // look at the first argument
4970 __ movp(rcx, Operand(rsp, 0));
4972 __ j(zero, &packed_case, Label::kNear);
4974 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4975 ArraySingleArgumentConstructorStub stub(isolate(),
4978 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4979 __ jmp(&done, Label::kNear);
4980 __ bind(&packed_case);
4983 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4984 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4987 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4988 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4993 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4994 ASSERT(ToRegister(instr->context()).is(rsi));
4995 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4999 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5000 Register function = ToRegister(instr->function());
5001 Register code_object = ToRegister(instr->code_object());
5002 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
5003 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
5007 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
5008 Register result = ToRegister(instr->result());
5009 Register base = ToRegister(instr->base_object());
5010 if (instr->offset()->IsConstantOperand()) {
5011 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
5012 __ leap(result, Operand(base, ToInteger32(offset)));
5014 Register offset = ToRegister(instr->offset());
5015 __ leap(result, Operand(base, offset, times_1, 0));
5020 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5021 HStoreNamedField* hinstr = instr->hydrogen();
5022 Representation representation = instr->representation();
5024 HObjectAccess access = hinstr->access();
5025 int offset = access.offset();
5027 if (access.IsExternalMemory()) {
5028 ASSERT(!hinstr->NeedsWriteBarrier());
5029 Register value = ToRegister(instr->value());
5030 if (instr->object()->IsConstantOperand()) {
5031 ASSERT(value.is(rax));
5032 LConstantOperand* object = LConstantOperand::cast(instr->object());
5033 __ store_rax(ToExternalReference(object));
5035 Register object = ToRegister(instr->object());
5036 __ Store(MemOperand(object, offset), value, representation);
5041 Register object = ToRegister(instr->object());
5042 SmiCheck check_needed = hinstr->value()->IsHeapObject()
5043 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5045 ASSERT(!(representation.IsSmi() &&
5046 instr->value()->IsConstantOperand() &&
5047 !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
5048 if (representation.IsHeapObject()) {
5049 if (instr->value()->IsConstantOperand()) {
5050 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5051 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
5052 DeoptimizeIf(no_condition, instr->environment());
5055 if (!hinstr->value()->type().IsHeapObject()) {
5056 Register value = ToRegister(instr->value());
5057 Condition cc = masm()->CheckSmi(value);
5058 DeoptimizeIf(cc, instr->environment());
5060 // We know now that value is not a smi, so we can omit the check below.
5061 check_needed = OMIT_SMI_CHECK;
5064 } else if (representation.IsDouble()) {
5065 ASSERT(access.IsInobject());
5066 ASSERT(!hinstr->has_transition());
5067 ASSERT(!hinstr->NeedsWriteBarrier());
5068 XMMRegister value = ToDoubleRegister(instr->value());
5069 __ movsd(FieldOperand(object, offset), value);
5073 if (hinstr->has_transition()) {
5074 Handle<Map> transition = hinstr->transition_map();
5075 AddDeprecationDependency(transition);
5076 if (!hinstr->NeedsWriteBarrierForMap()) {
5077 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
5079 Register temp = ToRegister(instr->temp());
5080 __ Move(kScratchRegister, transition);
5081 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
5082 // Update the write barrier for the map field.
5083 __ RecordWriteField(object,
5084 HeapObject::kMapOffset,
5088 OMIT_REMEMBERED_SET,
5094 Register write_register = object;
5095 if (!access.IsInobject()) {
5096 write_register = ToRegister(instr->temp());
5097 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
5100 if (representation.IsSmi() && SmiValuesAre32Bits() &&
5101 hinstr->value()->representation().IsInteger32()) {
5102 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5103 if (FLAG_debug_code) {
5104 Register scratch = kScratchRegister;
5105 __ Load(scratch, FieldOperand(write_register, offset), representation);
5106 __ AssertSmi(scratch);
5108 // Store int value directly to upper half of the smi.
5109 STATIC_ASSERT(kSmiTag == 0);
5110 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5111 offset += kPointerSize / 2;
5112 representation = Representation::Integer32();
5115 Operand operand = FieldOperand(write_register, offset);
5117 if (instr->value()->IsRegister()) {
5118 Register value = ToRegister(instr->value());
5119 __ Store(operand, value, representation);
5121 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5122 if (IsInteger32Constant(operand_value)) {
5123 ASSERT(!hinstr->NeedsWriteBarrier());
5124 int32_t value = ToInteger32(operand_value);
5125 if (representation.IsSmi()) {
5126 __ Move(operand, Smi::FromInt(value));
5129 __ movl(operand, Immediate(value));
5133 Handle<Object> handle_value = ToHandle(operand_value);
5134 ASSERT(!hinstr->NeedsWriteBarrier());
5135 __ Move(operand, handle_value);
5139 if (hinstr->NeedsWriteBarrier()) {
5140 Register value = ToRegister(instr->value());
5141 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
5142 // Update the write barrier for the object for in-object properties.
5143 __ RecordWriteField(write_register,
5148 EMIT_REMEMBERED_SET,
5154 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5155 ASSERT(ToRegister(instr->context()).is(rsi));
5156 ASSERT(ToRegister(instr->object()).is(rdx));
5157 ASSERT(ToRegister(instr->value()).is(rax));
5159 __ Move(rcx, instr->hydrogen()->name());
5160 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5161 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5165 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
5166 Representation representation = instr->hydrogen()->length()->representation();
5167 ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
5168 ASSERT(representation.IsSmiOrInteger32());
5170 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
5171 if (instr->length()->IsConstantOperand()) {
5172 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
5173 Register index = ToRegister(instr->index());
5174 if (representation.IsSmi()) {
5175 __ Cmp(index, Smi::FromInt(length));
5177 __ cmpl(index, Immediate(length));
5179 cc = ReverseCondition(cc);
5180 } else if (instr->index()->IsConstantOperand()) {
5181 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
5182 if (instr->length()->IsRegister()) {
5183 Register length = ToRegister(instr->length());
5184 if (representation.IsSmi()) {
5185 __ Cmp(length, Smi::FromInt(index));
5187 __ cmpl(length, Immediate(index));
5190 Operand length = ToOperand(instr->length());
5191 if (representation.IsSmi()) {
5192 __ Cmp(length, Smi::FromInt(index));
5194 __ cmpl(length, Immediate(index));
5198 Register index = ToRegister(instr->index());
5199 if (instr->length()->IsRegister()) {
5200 Register length = ToRegister(instr->length());
5201 if (representation.IsSmi()) {
5202 __ cmpp(length, index);
5204 __ cmpl(length, index);
5207 Operand length = ToOperand(instr->length());
5208 if (representation.IsSmi()) {
5209 __ cmpp(length, index);
5211 __ cmpl(length, index);
5215 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
5217 __ j(NegateCondition(cc), &done, Label::kNear);
5221 DeoptimizeIf(cc, instr->environment());
5226 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
5227 ElementsKind elements_kind = instr->elements_kind();
5228 LOperand* key = instr->key();
5229 if (!key->IsConstantOperand()) {
5230 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
5232 int base_offset = instr->is_fixed_typed_array()
5233 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
5235 Operand operand(BuildFastArrayOperand(
5240 instr->additional_index()));
5242 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
5243 elements_kind == FLOAT32_ELEMENTS) {
5244 XMMRegister value(ToDoubleRegister(instr->value()));
5245 __ cvtsd2ss(value, value);
5246 __ movss(operand, value);
5247 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
5248 elements_kind == FLOAT64_ELEMENTS) {
5249 __ movsd(operand, ToDoubleRegister(instr->value()));
5250 } else if (IsSIMD128ElementsKind(elements_kind)) {
5251 __ movups(operand, ToSIMD128Register(instr->value()));
5253 Register value(ToRegister(instr->value()));
5254 switch (elements_kind) {
5255 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5256 case EXTERNAL_INT8_ELEMENTS:
5257 case EXTERNAL_UINT8_ELEMENTS:
5259 case UINT8_ELEMENTS:
5260 case UINT8_CLAMPED_ELEMENTS:
5261 __ movb(operand, value);
5263 case EXTERNAL_INT16_ELEMENTS:
5264 case EXTERNAL_UINT16_ELEMENTS:
5265 case INT16_ELEMENTS:
5266 case UINT16_ELEMENTS:
5267 __ movw(operand, value);
5269 case EXTERNAL_INT32_ELEMENTS:
5270 case EXTERNAL_UINT32_ELEMENTS:
5271 case INT32_ELEMENTS:
5272 case UINT32_ELEMENTS:
5273 __ movl(operand, value);
5275 case EXTERNAL_FLOAT32_ELEMENTS:
5276 case EXTERNAL_FLOAT32x4_ELEMENTS:
5277 case EXTERNAL_FLOAT64x2_ELEMENTS:
5278 case EXTERNAL_INT32x4_ELEMENTS:
5279 case EXTERNAL_FLOAT64_ELEMENTS:
5280 case FLOAT32_ELEMENTS:
5281 case FLOAT64_ELEMENTS:
5282 case FLOAT32x4_ELEMENTS:
5283 case FLOAT64x2_ELEMENTS:
5284 case INT32x4_ELEMENTS:
5286 case FAST_SMI_ELEMENTS:
5287 case FAST_DOUBLE_ELEMENTS:
5288 case FAST_HOLEY_ELEMENTS:
5289 case FAST_HOLEY_SMI_ELEMENTS:
5290 case FAST_HOLEY_DOUBLE_ELEMENTS:
5291 case DICTIONARY_ELEMENTS:
5292 case SLOPPY_ARGUMENTS_ELEMENTS:
5300 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
5301 XMMRegister value = ToDoubleRegister(instr->value());
5302 LOperand* key = instr->key();
5303 if (instr->NeedsCanonicalization()) {
5306 __ ucomisd(value, value);
5307 __ j(parity_odd, &have_value, Label::kNear); // NaN.
5309 __ Set(kScratchRegister, BitCast<uint64_t>(
5310 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
5311 __ movq(value, kScratchRegister);
5313 __ bind(&have_value);
5316 Operand double_store_operand = BuildFastArrayOperand(
5319 FAST_DOUBLE_ELEMENTS,
5320 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
5321 instr->additional_index());
5323 __ movsd(double_store_operand, value);
5327 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
5328 HStoreKeyed* hinstr = instr->hydrogen();
5329 LOperand* key = instr->key();
5330 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
5331 Representation representation = hinstr->value()->representation();
5333 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
5334 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5335 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
5336 if (FLAG_debug_code) {
5337 Register scratch = kScratchRegister;
5339 BuildFastArrayOperand(instr->elements(),
5343 instr->additional_index()),
5344 Representation::Smi());
5345 __ AssertSmi(scratch);
5347 // Store int value directly to upper half of the smi.
5348 STATIC_ASSERT(kSmiTag == 0);
5349 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5350 offset += kPointerSize / 2;
5354 BuildFastArrayOperand(instr->elements(),
5358 instr->additional_index());
5360 if (instr->value()->IsRegister()) {
5361 __ Store(operand, ToRegister(instr->value()), representation);
5363 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5364 if (IsInteger32Constant(operand_value)) {
5365 int32_t value = ToInteger32(operand_value);
5366 if (representation.IsSmi()) {
5367 __ Move(operand, Smi::FromInt(value));
5370 __ movl(operand, Immediate(value));
5373 Handle<Object> handle_value = ToHandle(operand_value);
5374 __ Move(operand, handle_value);
5378 if (hinstr->NeedsWriteBarrier()) {
5379 Register elements = ToRegister(instr->elements());
5380 ASSERT(instr->value()->IsRegister());
5381 Register value = ToRegister(instr->value());
5382 ASSERT(!key->IsConstantOperand());
5383 SmiCheck check_needed = hinstr->value()->IsHeapObject()
5384 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5385 // Compute address of modified element and store it into key register.
5386 Register key_reg(ToRegister(key));
5387 __ leap(key_reg, operand);
5388 __ RecordWrite(elements,
5392 EMIT_REMEMBERED_SET,
5398 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
5399 if (instr->is_typed_elements()) {
5400 DoStoreKeyedExternalArray(instr);
5401 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
5402 DoStoreKeyedFixedDoubleArray(instr);
5404 DoStoreKeyedFixedArray(instr);
5409 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5410 ASSERT(ToRegister(instr->context()).is(rsi));
5411 ASSERT(ToRegister(instr->object()).is(rdx));
5412 ASSERT(ToRegister(instr->key()).is(rcx));
5413 ASSERT(ToRegister(instr->value()).is(rax));
5415 Handle<Code> ic = instr->strict_mode() == STRICT
5416 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5417 : isolate()->builtins()->KeyedStoreIC_Initialize();
5418 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5422 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5423 Register object_reg = ToRegister(instr->object());
5425 Handle<Map> from_map = instr->original_map();
5426 Handle<Map> to_map = instr->transitioned_map();
5427 ElementsKind from_kind = instr->from_kind();
5428 ElementsKind to_kind = instr->to_kind();
5430 Label not_applicable;
5431 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
5432 __ j(not_equal, ¬_applicable);
5433 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5434 Register new_map_reg = ToRegister(instr->new_map_temp());
5435 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
5436 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
5438 ASSERT_NE(instr->temp(), NULL);
5439 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
5440 ToRegister(instr->temp()), kDontSaveFPRegs);
5442 ASSERT(object_reg.is(rax));
5443 ASSERT(ToRegister(instr->context()).is(rsi));
5444 PushSafepointRegistersScope scope(this);
5445 __ Move(rbx, to_map);
5446 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5447 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5449 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5451 __ bind(¬_applicable);
5455 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5456 Register object = ToRegister(instr->object());
5457 Register temp = ToRegister(instr->temp());
5458 Label no_memento_found;
5459 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
5460 DeoptimizeIf(equal, instr->environment());
5461 __ bind(&no_memento_found);
5465 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5466 ASSERT(ToRegister(instr->context()).is(rsi));
5467 ASSERT(ToRegister(instr->left()).is(rdx));
5468 ASSERT(ToRegister(instr->right()).is(rax));
5469 StringAddStub stub(isolate(),
5470 instr->hydrogen()->flags(),
5471 instr->hydrogen()->pretenure_flag());
5472 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5476 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5477 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
5479 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5480 : LDeferredCode(codegen), instr_(instr) { }
5481 virtual void Generate() V8_OVERRIDE {
5482 codegen()->DoDeferredStringCharCodeAt(instr_);
5484 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5486 LStringCharCodeAt* instr_;
5489 DeferredStringCharCodeAt* deferred =
5490 new(zone()) DeferredStringCharCodeAt(this, instr);
5492 StringCharLoadGenerator::Generate(masm(),
5493 ToRegister(instr->string()),
5494 ToRegister(instr->index()),
5495 ToRegister(instr->result()),
5497 __ bind(deferred->exit());
5501 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5502 Register string = ToRegister(instr->string());
5503 Register result = ToRegister(instr->result());
5505 // TODO(3095996): Get rid of this. For now, we need to make the
5506 // result register contain a valid pointer because it is already
5507 // contained in the register pointer map.
5510 PushSafepointRegistersScope scope(this);
5512 // Push the index as a smi. This is safe because of the checks in
5513 // DoStringCharCodeAt above.
5514 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5515 if (instr->index()->IsConstantOperand()) {
5516 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
5517 __ Push(Smi::FromInt(const_index));
5519 Register index = ToRegister(instr->index());
5520 __ Integer32ToSmi(index, index);
5523 CallRuntimeFromDeferred(
5524 Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
5526 __ SmiToInteger32(rax, rax);
5527 __ StoreToSafepointRegisterSlot(result, rax);
5531 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5532 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5534 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5535 : LDeferredCode(codegen), instr_(instr) { }
5536 virtual void Generate() V8_OVERRIDE {
5537 codegen()->DoDeferredStringCharFromCode(instr_);
5539 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5541 LStringCharFromCode* instr_;
5544 DeferredStringCharFromCode* deferred =
5545 new(zone()) DeferredStringCharFromCode(this, instr);
5547 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5548 Register char_code = ToRegister(instr->char_code());
5549 Register result = ToRegister(instr->result());
5550 ASSERT(!char_code.is(result));
5552 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
5553 __ j(above, deferred->entry());
5554 __ movsxlq(char_code, char_code);
5555 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5556 __ movp(result, FieldOperand(result,
5557 char_code, times_pointer_size,
5558 FixedArray::kHeaderSize));
5559 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5560 __ j(equal, deferred->entry());
5561 __ bind(deferred->exit());
5565 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5566 Register char_code = ToRegister(instr->char_code());
5567 Register result = ToRegister(instr->result());
5569 // TODO(3095996): Get rid of this. For now, we need to make the
5570 // result register contain a valid pointer because it is already
5571 // contained in the register pointer map.
5574 PushSafepointRegistersScope scope(this);
5575 __ Integer32ToSmi(char_code, char_code);
5577 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5578 __ StoreToSafepointRegisterSlot(result, rax);
5582 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5583 LOperand* input = instr->value();
5584 ASSERT(input->IsRegister() || input->IsStackSlot());
5585 LOperand* output = instr->result();
5586 ASSERT(output->IsDoubleRegister());
5587 if (input->IsRegister()) {
5588 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
5590 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
5595 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5596 LOperand* input = instr->value();
5597 LOperand* output = instr->result();
5598 LOperand* temp = instr->temp();
5600 __ LoadUint32(ToDoubleRegister(output),
5602 ToDoubleRegister(temp));
5606 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5607 LOperand* input = instr->value();
5608 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5609 Register reg = ToRegister(input);
5611 __ Integer32ToSmi(reg, reg);
5615 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5616 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5618 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5619 : LDeferredCode(codegen), instr_(instr) { }
5620 virtual void Generate() V8_OVERRIDE {
5621 codegen()->DoDeferredNumberTagU(instr_);
5623 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5625 LNumberTagU* instr_;
5628 LOperand* input = instr->value();
5629 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5630 Register reg = ToRegister(input);
5632 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
5633 __ cmpl(reg, Immediate(Smi::kMaxValue));
5634 __ j(above, deferred->entry());
5635 __ Integer32ToSmi(reg, reg);
5636 __ bind(deferred->exit());
5640 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
5642 Register reg = ToRegister(instr->value());
5643 Register tmp = ToRegister(instr->temp1());
5644 XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
5646 // Load value into temp_xmm which will be preserved across potential call to
5647 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
5648 // XMM registers on x64).
5649 XMMRegister xmm_scratch = double_scratch0();
5650 __ LoadUint32(temp_xmm, reg, xmm_scratch);
5652 if (FLAG_inline_new) {
5653 __ AllocateHeapNumber(reg, tmp, &slow);
5654 __ jmp(&done, Label::kNear);
5657 // Slow case: Call the runtime system to do the number allocation.
5660 // Put a valid pointer value in the stack slot where the result
5661 // register is stored, as this register is in the pointer map, but contains
5662 // an integer value.
5665 // Preserve the value of all registers.
5666 PushSafepointRegistersScope scope(this);
5668 // NumberTagU uses the context from the frame, rather than
5669 // the environment's HContext or HInlinedContext value.
5670 // They only call Runtime::kHiddenAllocateHeapNumber.
5671 // The corresponding HChange instructions are added in a phase that does
5672 // not have easy access to the local context.
5673 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5674 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5675 RecordSafepointWithRegisters(
5676 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5677 __ StoreToSafepointRegisterSlot(reg, rax);
5680 // Done. Put the value in temp_xmm into the value of the allocated heap
5683 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
5687 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5688 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5690 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5691 : LDeferredCode(codegen), instr_(instr) { }
5692 virtual void Generate() V8_OVERRIDE {
5693 codegen()->DoDeferredNumberTagD(instr_);
5695 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5697 LNumberTagD* instr_;
5700 XMMRegister input_reg = ToDoubleRegister(instr->value());
5701 Register reg = ToRegister(instr->result());
5702 Register tmp = ToRegister(instr->temp());
5704 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
5705 if (FLAG_inline_new) {
5706 __ AllocateHeapNumber(reg, tmp, deferred->entry());
5708 __ jmp(deferred->entry());
5710 __ bind(deferred->exit());
5711 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5715 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5716 // TODO(3095996): Get rid of this. For now, we need to make the
5717 // result register contain a valid pointer because it is already
5718 // contained in the register pointer map.
5719 Register reg = ToRegister(instr->result());
5720 __ Move(reg, Smi::FromInt(0));
5723 PushSafepointRegistersScope scope(this);
5724 // NumberTagD uses the context from the frame, rather than
5725 // the environment's HContext or HInlinedContext value.
5726 // They only call Runtime::kHiddenAllocateHeapNumber.
5727 // The corresponding HChange instructions are added in a phase that does
5728 // not have easy access to the local context.
5729 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5730 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5731 RecordSafepointWithRegisters(
5732 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5733 __ movp(kScratchRegister, rax);
5735 __ movp(reg, kScratchRegister);
5739 void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
5740 Runtime::FunctionId id) {
5741 // TODO(3095996): Get rid of this. For now, we need to make the
5742 // result register contain a valid pointer because it is already
5743 // contained in the register pointer map.
5744 Register reg = ToRegister(instr->result());
5745 __ Move(reg, Smi::FromInt(0));
5748 PushSafepointRegistersScope scope(this);
5749 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5750 __ CallRuntimeSaveDoubles(id);
5751 RecordSafepointWithRegisters(
5752 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5753 __ movp(kScratchRegister, rax);
5755 __ movp(reg, kScratchRegister);
5760 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
5761 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
5763 DeferredSIMD128ToTagged(LCodeGen* codegen,
5764 LSIMD128ToTagged* instr,
5765 Runtime::FunctionId id)
5766 : LDeferredCode(codegen), instr_(instr), id_(id) { }
5767 virtual void Generate() V8_OVERRIDE {
5768 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
5770 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5772 LSIMD128ToTagged* instr_;
5773 Runtime::FunctionId id_;
5776 XMMRegister input_reg = ToSIMD128Register(instr->value());
5777 Register reg = ToRegister(instr->result());
5778 Register tmp = ToRegister(instr->temp());
5780 DeferredSIMD128ToTagged* deferred =
5781 new(zone()) DeferredSIMD128ToTagged(this, instr,
5782 static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
5783 if (FLAG_inline_new) {
5784 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
5785 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5787 __ jmp(deferred->entry());
5789 __ bind(deferred->exit());
5790 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
5794 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
5795 if (instr->value()->IsFloat32x4Register()) {
5796 HandleSIMD128ToTagged<Float32x4>(instr);
5797 } else if (instr->value()->IsFloat64x2Register()) {
5798 HandleSIMD128ToTagged<Float64x2>(instr);
5800 ASSERT(instr->value()->IsInt32x4Register());
5801 HandleSIMD128ToTagged<Int32x4>(instr);
5806 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5807 HChange* hchange = instr->hydrogen();
5808 Register input = ToRegister(instr->value());
5809 Register output = ToRegister(instr->result());
5810 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5811 hchange->value()->CheckFlag(HValue::kUint32)) {
5812 __ testl(input, input);
5813 DeoptimizeIf(sign, instr->environment());
5815 __ Integer32ToSmi(output, input);
5816 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5817 !hchange->value()->CheckFlag(HValue::kUint32)) {
5818 DeoptimizeIf(overflow, instr->environment());
5823 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5824 ASSERT(instr->value()->Equals(instr->result()));
5825 Register input = ToRegister(instr->value());
5826 if (instr->needs_check()) {
5827 Condition is_smi = __ CheckSmi(input);
5828 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5830 __ AssertSmi(input);
5832 __ SmiToInteger32(input, input);
5836 void LCodeGen::EmitNumberUntagD(Register input_reg,
5837 XMMRegister result_reg,
5838 bool can_convert_undefined_to_nan,
5839 bool deoptimize_on_minus_zero,
5841 NumberUntagDMode mode) {
5842 Label convert, load_smi, done;
5844 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5846 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5848 // Heap number map check.
5849 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5850 Heap::kHeapNumberMapRootIndex);
5852 // On x64 it is safe to load at heap number offset before evaluating the map
5853 // check, since all heap objects are at least two words long.
5854 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5856 if (can_convert_undefined_to_nan) {
5857 __ j(not_equal, &convert, Label::kNear);
5859 DeoptimizeIf(not_equal, env);
5862 if (deoptimize_on_minus_zero) {
5863 XMMRegister xmm_scratch = double_scratch0();
5864 __ xorps(xmm_scratch, xmm_scratch);
5865 __ ucomisd(xmm_scratch, result_reg);
5866 __ j(not_equal, &done, Label::kNear);
5867 __ movmskpd(kScratchRegister, result_reg);
5868 __ testq(kScratchRegister, Immediate(1));
5869 DeoptimizeIf(not_zero, env);
5871 __ jmp(&done, Label::kNear);
5873 if (can_convert_undefined_to_nan) {
5876 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5877 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5878 DeoptimizeIf(not_equal, env);
5880 __ xorps(result_reg, result_reg);
5881 __ divsd(result_reg, result_reg);
5882 __ jmp(&done, Label::kNear);
5885 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5888 // Smi to XMM conversion
5890 __ SmiToInteger32(kScratchRegister, input_reg);
5891 __ Cvtlsi2sd(result_reg, kScratchRegister);
5896 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5897 Register input_reg = ToRegister(instr->value());
5899 if (instr->truncating()) {
5900 Label no_heap_number, check_bools, check_false;
5902 // Heap number map check.
5903 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5904 Heap::kHeapNumberMapRootIndex);
5905 __ j(not_equal, &no_heap_number, Label::kNear);
5906 __ TruncateHeapNumberToI(input_reg, input_reg);
5909 __ bind(&no_heap_number);
5910 // Check for Oddballs. Undefined/False is converted to zero and True to one
5911 // for truncating conversions.
5912 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5913 __ j(not_equal, &check_bools, Label::kNear);
5914 __ Set(input_reg, 0);
5917 __ bind(&check_bools);
5918 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5919 __ j(not_equal, &check_false, Label::kNear);
5920 __ Set(input_reg, 1);
5923 __ bind(&check_false);
5924 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5925 __ RecordComment("Deferred TaggedToI: cannot truncate");
5926 DeoptimizeIf(not_equal, instr->environment());
5927 __ Set(input_reg, 0);
5931 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5932 __ TaggedToI(input_reg, input_reg, xmm_temp,
5933 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5937 DeoptimizeIf(no_condition, instr->environment());
5942 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5943 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5945 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5946 : LDeferredCode(codegen), instr_(instr) { }
5947 virtual void Generate() V8_OVERRIDE {
5948 codegen()->DoDeferredTaggedToI(instr_, done());
5950 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5955 LOperand* input = instr->value();
5956 ASSERT(input->IsRegister());
5957 ASSERT(input->Equals(instr->result()));
5958 Register input_reg = ToRegister(input);
5960 if (instr->hydrogen()->value()->representation().IsSmi()) {
5961 __ SmiToInteger32(input_reg, input_reg);
5963 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5964 __ JumpIfNotSmi(input_reg, deferred->entry());
5965 __ SmiToInteger32(input_reg, input_reg);
5966 __ bind(deferred->exit());
5971 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5972 LOperand* input = instr->value();
5973 ASSERT(input->IsRegister());
5974 LOperand* result = instr->result();
5975 ASSERT(result->IsDoubleRegister());
5977 Register input_reg = ToRegister(input);
5978 XMMRegister result_reg = ToDoubleRegister(result);
5980 HValue* value = instr->hydrogen()->value();
5981 NumberUntagDMode mode = value->representation().IsSmi()
5982 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5984 EmitNumberUntagD(input_reg, result_reg,
5985 instr->hydrogen()->can_convert_undefined_to_nan(),
5986 instr->hydrogen()->deoptimize_on_minus_zero(),
5987 instr->environment(),
5993 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
5994 LOperand* input = instr->value();
5995 ASSERT(input->IsRegister());
5996 LOperand* result = instr->result();
5997 ASSERT(result->IsSIMD128Register());
5999 Register input_reg = ToRegister(input);
6000 XMMRegister result_reg = ToSIMD128Register(result);
6002 Condition cc = masm()->CheckSmi(input_reg);
6003 DeoptimizeIf(cc, instr->environment());
6004 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
6005 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
6006 DeoptimizeIf(not_equal, instr->environment());
6007 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
6011 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
6012 if (instr->representation().IsFloat32x4()) {
6013 HandleTaggedToSIMD128<Float32x4>(instr);
6014 } else if (instr->representation().IsFloat64x2()) {
6015 HandleTaggedToSIMD128<Float64x2>(instr);
6017 ASSERT(instr->representation().IsInt32x4());
6018 HandleTaggedToSIMD128<Int32x4>(instr);
6023 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
6024 LOperand* input = instr->value();
6025 ASSERT(input->IsDoubleRegister());
6026 LOperand* result = instr->result();
6027 ASSERT(result->IsRegister());
6029 XMMRegister input_reg = ToDoubleRegister(input);
6030 Register result_reg = ToRegister(result);
6032 if (instr->truncating()) {
6033 __ TruncateDoubleToI(result_reg, input_reg);
6035 Label bailout, done;
6036 XMMRegister xmm_scratch = double_scratch0();
6037 __ DoubleToI(result_reg, input_reg, xmm_scratch,
6038 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
6040 __ jmp(&done, Label::kNear);
6042 DeoptimizeIf(no_condition, instr->environment());
6048 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
6049 LOperand* input = instr->value();
6050 ASSERT(input->IsDoubleRegister());
6051 LOperand* result = instr->result();
6052 ASSERT(result->IsRegister());
6054 XMMRegister input_reg = ToDoubleRegister(input);
6055 Register result_reg = ToRegister(result);
6057 Label bailout, done;
6058 XMMRegister xmm_scratch = double_scratch0();
6059 __ DoubleToI(result_reg, input_reg, xmm_scratch,
6060 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
6062 __ jmp(&done, Label::kNear);
6064 DeoptimizeIf(no_condition, instr->environment());
6067 __ Integer32ToSmi(result_reg, result_reg);
6068 DeoptimizeIf(overflow, instr->environment());
6072 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
6073 LOperand* input = instr->value();
6074 Condition cc = masm()->CheckSmi(ToRegister(input));
6075 DeoptimizeIf(NegateCondition(cc), instr->environment());
6079 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
6080 if (!instr->hydrogen()->value()->IsHeapObject()) {
6081 LOperand* input = instr->value();
6082 Condition cc = masm()->CheckSmi(ToRegister(input));
6083 DeoptimizeIf(cc, instr->environment());
6088 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
6089 Register input = ToRegister(instr->value());
6091 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
6093 if (instr->hydrogen()->is_interval_check()) {
6096 instr->hydrogen()->GetCheckInterval(&first, &last);
6098 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6099 Immediate(static_cast<int8_t>(first)));
6101 // If there is only one type in the interval check for equality.
6102 if (first == last) {
6103 DeoptimizeIf(not_equal, instr->environment());
6105 DeoptimizeIf(below, instr->environment());
6106 // Omit check for the last type.
6107 if (last != LAST_TYPE) {
6108 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6109 Immediate(static_cast<int8_t>(last)));
6110 DeoptimizeIf(above, instr->environment());
6116 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
6118 if (IsPowerOf2(mask)) {
6119 ASSERT(tag == 0 || IsPowerOf2(tag));
6120 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6122 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
6124 __ movzxbl(kScratchRegister,
6125 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
6126 __ andb(kScratchRegister, Immediate(mask));
6127 __ cmpb(kScratchRegister, Immediate(tag));
6128 DeoptimizeIf(not_equal, instr->environment());
6134 void LCodeGen::DoCheckValue(LCheckValue* instr) {
6135 Register reg = ToRegister(instr->value());
6136 __ Cmp(reg, instr->hydrogen()->object().handle());
6137 DeoptimizeIf(not_equal, instr->environment());
6141 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
6143 PushSafepointRegistersScope scope(this);
6146 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
6147 RecordSafepointWithRegisters(
6148 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
6150 __ testp(rax, Immediate(kSmiTagMask));
6152 DeoptimizeIf(zero, instr->environment());
6156 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
6157 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
6159 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
6160 : LDeferredCode(codegen), instr_(instr), object_(object) {
6161 SetExit(check_maps());
6163 virtual void Generate() V8_OVERRIDE {
6164 codegen()->DoDeferredInstanceMigration(instr_, object_);
6166 Label* check_maps() { return &check_maps_; }
6167 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6174 if (instr->hydrogen()->IsStabilityCheck()) {
6175 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
6176 for (int i = 0; i < maps->size(); ++i) {
6177 AddStabilityDependency(maps->at(i).handle());
6182 LOperand* input = instr->value();
6183 ASSERT(input->IsRegister());
6184 Register reg = ToRegister(input);
6186 DeferredCheckMaps* deferred = NULL;
6187 if (instr->hydrogen()->HasMigrationTarget()) {
6188 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
6189 __ bind(deferred->check_maps());
6192 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
6194 for (int i = 0; i < maps->size() - 1; i++) {
6195 Handle<Map> map = maps->at(i).handle();
6196 __ CompareMap(reg, map);
6197 __ j(equal, &success, Label::kNear);
6200 Handle<Map> map = maps->at(maps->size() - 1).handle();
6201 __ CompareMap(reg, map);
6202 if (instr->hydrogen()->HasMigrationTarget()) {
6203 __ j(not_equal, deferred->entry());
6205 DeoptimizeIf(not_equal, instr->environment());
6212 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
6213 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
6214 XMMRegister xmm_scratch = double_scratch0();
6215 Register result_reg = ToRegister(instr->result());
6216 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
6220 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
6221 ASSERT(instr->unclamped()->Equals(instr->result()));
6222 Register value_reg = ToRegister(instr->result());
6223 __ ClampUint8(value_reg);
6227 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
6228 ASSERT(instr->unclamped()->Equals(instr->result()));
6229 Register input_reg = ToRegister(instr->unclamped());
6230 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
6231 XMMRegister xmm_scratch = double_scratch0();
6232 Label is_smi, done, heap_number;
6233 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
6234 __ JumpIfSmi(input_reg, &is_smi, dist);
6236 // Check for heap number
6237 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
6238 factory()->heap_number_map());
6239 __ j(equal, &heap_number, Label::kNear);
6241 // Check for undefined. Undefined is converted to zero for clamping
6243 __ Cmp(input_reg, factory()->undefined_value());
6244 DeoptimizeIf(not_equal, instr->environment());
6245 __ xorl(input_reg, input_reg);
6246 __ jmp(&done, Label::kNear);
6249 __ bind(&heap_number);
6250 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
6251 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
6252 __ jmp(&done, Label::kNear);
6256 __ SmiToInteger32(input_reg, input_reg);
6257 __ ClampUint8(input_reg);
6263 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
6264 XMMRegister value_reg = ToDoubleRegister(instr->value());
6265 Register result_reg = ToRegister(instr->result());
6266 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
6267 __ movq(result_reg, value_reg);
6268 __ shrq(result_reg, Immediate(32));
6270 __ movd(result_reg, value_reg);
6275 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
6276 Register hi_reg = ToRegister(instr->hi());
6277 Register lo_reg = ToRegister(instr->lo());
6278 XMMRegister result_reg = ToDoubleRegister(instr->result());
6279 XMMRegister xmm_scratch = double_scratch0();
6280 __ movd(result_reg, hi_reg);
6281 __ psllq(result_reg, 32);
6282 __ movd(xmm_scratch, lo_reg);
6283 __ orps(result_reg, xmm_scratch);
6287 void LCodeGen::DoAllocate(LAllocate* instr) {
6288 class DeferredAllocate V8_FINAL : public LDeferredCode {
6290 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
6291 : LDeferredCode(codegen), instr_(instr) { }
6292 virtual void Generate() V8_OVERRIDE {
6293 codegen()->DoDeferredAllocate(instr_);
6295 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6300 DeferredAllocate* deferred =
6301 new(zone()) DeferredAllocate(this, instr);
6303 Register result = ToRegister(instr->result());
6304 Register temp = ToRegister(instr->temp());
6306 // Allocate memory for the object.
6307 AllocationFlags flags = TAG_OBJECT;
6308 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
6309 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6311 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6312 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6313 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6314 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6315 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6316 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6317 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6320 if (instr->size()->IsConstantOperand()) {
6321 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6322 if (size <= Page::kMaxRegularHeapObjectSize) {
6323 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6325 __ jmp(deferred->entry());
6328 Register size = ToRegister(instr->size());
6329 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6332 __ bind(deferred->exit());
6334 if (instr->hydrogen()->MustPrefillWithFiller()) {
6335 if (instr->size()->IsConstantOperand()) {
6336 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6337 __ movl(temp, Immediate((size / kPointerSize) - 1));
6339 temp = ToRegister(instr->size());
6340 __ sarp(temp, Immediate(kPointerSizeLog2));
6345 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
6346 isolate()->factory()->one_pointer_filler_map());
6348 __ j(not_zero, &loop);
6353 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6354 Register result = ToRegister(instr->result());
6356 // TODO(3095996): Get rid of this. For now, we need to make the
6357 // result register contain a valid pointer because it is already
6358 // contained in the register pointer map.
6359 __ Move(result, Smi::FromInt(0));
6361 PushSafepointRegistersScope scope(this);
6362 if (instr->size()->IsRegister()) {
6363 Register size = ToRegister(instr->size());
6364 ASSERT(!size.is(result));
6365 __ Integer32ToSmi(size, size);
6368 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6369 __ Push(Smi::FromInt(size));
6373 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6374 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6375 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6376 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6377 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6378 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6379 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6381 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6383 __ Push(Smi::FromInt(flags));
6385 CallRuntimeFromDeferred(
6386 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
6387 __ StoreToSafepointRegisterSlot(result, rax);
6391 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6392 ASSERT(ToRegister(instr->value()).is(rax));
6394 CallRuntime(Runtime::kToFastProperties, 1, instr);
6398 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6399 ASSERT(ToRegister(instr->context()).is(rsi));
6401 // Registers will be used as follows:
6402 // rcx = literals array.
6403 // rbx = regexp literal.
6404 // rax = regexp literal clone.
6405 int literal_offset =
6406 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6407 __ Move(rcx, instr->hydrogen()->literals());
6408 __ movp(rbx, FieldOperand(rcx, literal_offset));
6409 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
6410 __ j(not_equal, &materialized, Label::kNear);
6412 // Create regexp literal using runtime function
6413 // Result will be in rax.
6415 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
6416 __ Push(instr->hydrogen()->pattern());
6417 __ Push(instr->hydrogen()->flags());
6418 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
6421 __ bind(&materialized);
6422 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6423 Label allocated, runtime_allocate;
6424 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
6425 __ jmp(&allocated, Label::kNear);
6427 __ bind(&runtime_allocate);
6429 __ Push(Smi::FromInt(size));
6430 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
6433 __ bind(&allocated);
6434 // Copy the content into the newly allocated memory.
6435 // (Unroll copy loop once for better throughput).
6436 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6437 __ movp(rdx, FieldOperand(rbx, i));
6438 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
6439 __ movp(FieldOperand(rax, i), rdx);
6440 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
6442 if ((size % (2 * kPointerSize)) != 0) {
6443 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
6444 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
6449 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6450 ASSERT(ToRegister(instr->context()).is(rsi));
6451 // Use the fast case closure allocation code that allocates in new
6452 // space for nested functions that don't need literals cloning.
6453 bool pretenure = instr->hydrogen()->pretenure();
6454 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6455 FastNewClosureStub stub(isolate(),
6456 instr->hydrogen()->strict_mode(),
6457 instr->hydrogen()->is_generator());
6458 __ Move(rbx, instr->hydrogen()->shared_info());
6459 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
6462 __ Push(instr->hydrogen()->shared_info());
6463 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
6464 Heap::kFalseValueRootIndex);
6465 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6470 void LCodeGen::DoTypeof(LTypeof* instr) {
6471 ASSERT(ToRegister(instr->context()).is(rsi));
6472 LOperand* input = instr->value();
6473 EmitPushTaggedOperand(input);
6474 CallRuntime(Runtime::kTypeof, 1, instr);
6478 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
6479 ASSERT(!operand->IsDoubleRegister());
6480 if (operand->IsConstantOperand()) {
6481 __ Push(ToHandle(LConstantOperand::cast(operand)));
6482 } else if (operand->IsRegister()) {
6483 __ Push(ToRegister(operand));
6485 __ Push(ToOperand(operand));
6490 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6491 Register input = ToRegister(instr->value());
6492 Condition final_branch_condition = EmitTypeofIs(instr, input);
6493 if (final_branch_condition != no_condition) {
6494 EmitBranch(instr, final_branch_condition);
6499 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6500 Label* true_label = instr->TrueLabel(chunk_);
6501 Label* false_label = instr->FalseLabel(chunk_);
6502 Handle<String> type_name = instr->type_literal();
6503 int left_block = instr->TrueDestination(chunk_);
6504 int right_block = instr->FalseDestination(chunk_);
6505 int next_block = GetNextEmittedBlock();
6507 Label::Distance true_distance = left_block == next_block ? Label::kNear
6509 Label::Distance false_distance = right_block == next_block ? Label::kNear
6511 Condition final_branch_condition = no_condition;
6512 Factory* factory = isolate()->factory();
6513 if (String::Equals(type_name, factory->number_string())) {
6514 __ JumpIfSmi(input, true_label, true_distance);
6515 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
6516 Heap::kHeapNumberMapRootIndex);
6518 final_branch_condition = equal;
6520 } else if (String::Equals(type_name, factory->float32x4_string())) {
6521 __ JumpIfSmi(input, false_label, false_distance);
6522 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6523 final_branch_condition = equal;
6525 } else if (String::Equals(type_name, factory->float64x2_string())) {
6526 __ JumpIfSmi(input, false_label, false_distance);
6527 __ CmpObjectType(input, FLOAT64x2_TYPE, input);
6528 final_branch_condition = equal;
6530 } else if (String::Equals(type_name, factory->int32x4_string())) {
6531 __ JumpIfSmi(input, false_label, false_distance);
6532 __ CmpObjectType(input, INT32x4_TYPE, input);
6533 final_branch_condition = equal;
6535 } else if (String::Equals(type_name, factory->string_string())) {
6536 __ JumpIfSmi(input, false_label, false_distance);
6537 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6538 __ j(above_equal, false_label, false_distance);
6539 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6540 Immediate(1 << Map::kIsUndetectable));
6541 final_branch_condition = zero;
6543 } else if (String::Equals(type_name, factory->symbol_string())) {
6544 __ JumpIfSmi(input, false_label, false_distance);
6545 __ CmpObjectType(input, SYMBOL_TYPE, input);
6546 final_branch_condition = equal;
6548 } else if (String::Equals(type_name, factory->boolean_string())) {
6549 __ CompareRoot(input, Heap::kTrueValueRootIndex);
6550 __ j(equal, true_label, true_distance);
6551 __ CompareRoot(input, Heap::kFalseValueRootIndex);
6552 final_branch_condition = equal;
6554 } else if (FLAG_harmony_typeof &&
6555 String::Equals(type_name, factory->null_string())) {
6556 __ CompareRoot(input, Heap::kNullValueRootIndex);
6557 final_branch_condition = equal;
6559 } else if (String::Equals(type_name, factory->undefined_string())) {
6560 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
6561 __ j(equal, true_label, true_distance);
6562 __ JumpIfSmi(input, false_label, false_distance);
6563 // Check for undetectable objects => true.
6564 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
6565 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6566 Immediate(1 << Map::kIsUndetectable));
6567 final_branch_condition = not_zero;
6569 } else if (String::Equals(type_name, factory->function_string())) {
6570 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6571 __ JumpIfSmi(input, false_label, false_distance);
6572 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6573 __ j(equal, true_label, true_distance);
6574 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6575 final_branch_condition = equal;
6577 } else if (String::Equals(type_name, factory->object_string())) {
6578 __ JumpIfSmi(input, false_label, false_distance);
6579 if (!FLAG_harmony_typeof) {
6580 __ CompareRoot(input, Heap::kNullValueRootIndex);
6581 __ j(equal, true_label, true_distance);
6583 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6584 __ j(below, false_label, false_distance);
6585 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6586 __ j(above, false_label, false_distance);
6587 // Check for undetectable objects => false.
6588 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6589 Immediate(1 << Map::kIsUndetectable));
6590 final_branch_condition = zero;
6593 __ jmp(false_label, false_distance);
6596 return final_branch_condition;
6600 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6601 Register temp = ToRegister(instr->temp());
6603 EmitIsConstructCall(temp);
6604 EmitBranch(instr, equal);
6608 void LCodeGen::EmitIsConstructCall(Register temp) {
6609 // Get the frame pointer for the calling frame.
6610 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6612 // Skip the arguments adaptor frame if it exists.
6613 Label check_frame_marker;
6614 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6615 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6616 __ j(not_equal, &check_frame_marker, Label::kNear);
6617 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6619 // Check the marker in the calling frame.
6620 __ bind(&check_frame_marker);
6621 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6622 Smi::FromInt(StackFrame::CONSTRUCT));
6626 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6627 if (!info()->IsStub()) {
6628 // Ensure that we have enough space after the previous lazy-bailout
6629 // instruction for patching the code here.
6630 int current_pc = masm()->pc_offset();
6631 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6632 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6633 __ Nop(padding_size);
6636 last_lazy_deopt_pc_ = masm()->pc_offset();
6640 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6641 last_lazy_deopt_pc_ = masm()->pc_offset();
6642 ASSERT(instr->HasEnvironment());
6643 LEnvironment* env = instr->environment();
6644 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6645 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6649 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6650 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6651 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6652 // needed return address), even though the implementation of LAZY and EAGER is
6653 // now identical. When LAZY is eventually completely folded into EAGER, remove
6654 // the special case below.
6655 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6656 type = Deoptimizer::LAZY;
6659 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6660 DeoptimizeIf(no_condition, instr->environment(), type);
6664 void LCodeGen::DoDummy(LDummy* instr) {
6665 // Nothing to see here, move on!
6669 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6670 // Nothing to see here, move on!
6674 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6675 PushSafepointRegistersScope scope(this);
6676 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6677 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6678 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
6679 ASSERT(instr->HasEnvironment());
6680 LEnvironment* env = instr->environment();
6681 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6685 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6686 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6688 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6689 : LDeferredCode(codegen), instr_(instr) { }
6690 virtual void Generate() V8_OVERRIDE {
6691 codegen()->DoDeferredStackCheck(instr_);
6693 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6695 LStackCheck* instr_;
6698 ASSERT(instr->HasEnvironment());
6699 LEnvironment* env = instr->environment();
6700 // There is no LLazyBailout instruction for stack-checks. We have to
6701 // prepare for lazy deoptimization explicitly here.
6702 if (instr->hydrogen()->is_function_entry()) {
6703 // Perform stack overflow check.
6705 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6706 __ j(above_equal, &done, Label::kNear);
6708 ASSERT(instr->context()->IsRegister());
6709 ASSERT(ToRegister(instr->context()).is(rsi));
6710 CallCode(isolate()->builtins()->StackCheck(),
6711 RelocInfo::CODE_TARGET,
6715 ASSERT(instr->hydrogen()->is_backwards_branch());
6716 // Perform stack overflow check if this goto needs it before jumping.
6717 DeferredStackCheck* deferred_stack_check =
6718 new(zone()) DeferredStackCheck(this, instr);
6719 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6720 __ j(below, deferred_stack_check->entry());
6721 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6722 __ bind(instr->done_label());
6723 deferred_stack_check->SetExit(instr->done_label());
6724 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6725 // Don't record a deoptimization index for the safepoint here.
6726 // This will be done explicitly when emitting call and the safepoint in
6727 // the deferred code.
6732 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6733 // This is a pseudo-instruction that ensures that the environment here is
6734 // properly registered for deoptimization and records the assembler's PC
6736 LEnvironment* environment = instr->environment();
6738 // If the environment were already registered, we would have no way of
6739 // backpatching it with the spill slot operands.
6740 ASSERT(!environment->HasBeenRegistered());
6741 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6743 GenerateOsrPrologue();
6747 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6748 ASSERT(ToRegister(instr->context()).is(rsi));
6749 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
6750 DeoptimizeIf(equal, instr->environment());
6752 Register null_value = rdi;
6753 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6754 __ cmpp(rax, null_value);
6755 DeoptimizeIf(equal, instr->environment());
6757 Condition cc = masm()->CheckSmi(rax);
6758 DeoptimizeIf(cc, instr->environment());
6760 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6761 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
6762 DeoptimizeIf(below_equal, instr->environment());
6764 Label use_cache, call_runtime;
6765 __ CheckEnumCache(null_value, &call_runtime);
6767 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
6768 __ jmp(&use_cache, Label::kNear);
6770 // Get the set of properties to enumerate.
6771 __ bind(&call_runtime);
6773 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6775 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
6776 Heap::kMetaMapRootIndex);
6777 DeoptimizeIf(not_equal, instr->environment());
6778 __ bind(&use_cache);
6782 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6783 Register map = ToRegister(instr->map());
6784 Register result = ToRegister(instr->result());
6785 Label load_cache, done;
6786 __ EnumLength(result, map);
6787 __ Cmp(result, Smi::FromInt(0));
6788 __ j(not_equal, &load_cache, Label::kNear);
6789 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
6790 __ jmp(&done, Label::kNear);
6791 __ bind(&load_cache);
6792 __ LoadInstanceDescriptors(map, result);
6794 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6796 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6798 Condition cc = masm()->CheckSmi(result);
6799 DeoptimizeIf(cc, instr->environment());
6803 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6804 Register object = ToRegister(instr->value());
6805 __ cmpp(ToRegister(instr->map()),
6806 FieldOperand(object, HeapObject::kMapOffset));
6807 DeoptimizeIf(not_equal, instr->environment());
6811 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6814 PushSafepointRegistersScope scope(this);
6818 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6819 RecordSafepointWithRegisters(
6820 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6821 __ StoreToSafepointRegisterSlot(object, rax);
6825 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6826 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
6828 DeferredLoadMutableDouble(LCodeGen* codegen,
6829 LLoadFieldByIndex* instr,
6832 : LDeferredCode(codegen),
6837 virtual void Generate() V8_OVERRIDE {
6838 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6840 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6842 LLoadFieldByIndex* instr_;
6847 Register object = ToRegister(instr->object());
6848 Register index = ToRegister(instr->index());
6850 DeferredLoadMutableDouble* deferred;
6851 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
6853 Label out_of_object, done;
6854 __ Move(kScratchRegister, Smi::FromInt(1));
6855 __ testp(index, kScratchRegister);
6856 __ j(not_zero, deferred->entry());
6858 __ sarp(index, Immediate(1));
6860 __ SmiToInteger32(index, index);
6861 __ cmpl(index, Immediate(0));
6862 __ j(less, &out_of_object, Label::kNear);
6863 __ movp(object, FieldOperand(object,
6866 JSObject::kHeaderSize));
6867 __ jmp(&done, Label::kNear);
6869 __ bind(&out_of_object);
6870 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6872 // Index is now equal to out of object property index plus 1.
6873 __ movp(object, FieldOperand(object,
6876 FixedArray::kHeaderSize - kPointerSize));
6877 __ bind(deferred->exit());
6884 } } // namespace v8::internal
6886 #endif // V8_TARGET_ARCH_X64