1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/x64/lithium-codegen-x64.h"
10 #include "src/code-stubs.h"
11 #include "src/stub-cache.h"
12 #include "src/hydrogen-osr.h"
18 // When invoking builtins, we need to record the safepoint in the middle of
19 // the invoke instruction sequence generated by the macro assembler.
20 class SafepointGenerator V8_FINAL : public CallWrapper {
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
28 virtual ~SafepointGenerator() {}
30 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
32 virtual void AfterCall() const V8_OVERRIDE {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
45 bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk());
50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // MANUAL indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::MANUAL);
55 return GeneratePrologue() &&
57 GenerateDeferredCode() &&
58 GenerateJumpTable() &&
59 GenerateSafepointTable();
63 void LCodeGen::FinishCode(Handle<Code> code) {
65 code->set_stack_slots(GetStackSlotCount());
66 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
67 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
68 PopulateDeoptimizationData(code);
73 void LCodeGen::MakeSureStackPagesMapped(int offset) {
74 const int kPageSize = 4 * KB;
75 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
76 __ movp(Operand(rsp, offset), rax);
82 void LCodeGen::SaveCallerDoubles() {
83 ASSERT(info()->saves_caller_doubles());
84 ASSERT(NeedsEagerFrame());
85 Comment(";;; Save clobbered callee double registers");
87 BitVector* doubles = chunk()->allocated_double_registers();
88 BitVector::Iterator save_iterator(doubles);
89 while (!save_iterator.Done()) {
90 __ movsd(MemOperand(rsp, count * kDoubleSize),
91 XMMRegister::FromAllocationIndex(save_iterator.Current()));
92 save_iterator.Advance();
98 void LCodeGen::RestoreCallerDoubles() {
99 ASSERT(info()->saves_caller_doubles());
100 ASSERT(NeedsEagerFrame());
101 Comment(";;; Restore clobbered callee double registers");
102 BitVector* doubles = chunk()->allocated_double_registers();
103 BitVector::Iterator save_iterator(doubles);
105 while (!save_iterator.Done()) {
106 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
107 MemOperand(rsp, count * kDoubleSize));
108 save_iterator.Advance();
114 bool LCodeGen::GeneratePrologue() {
115 ASSERT(is_generating());
117 if (info()->IsOptimizing()) {
118 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
121 if (strlen(FLAG_stop_at) > 0 &&
122 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
127 // Sloppy mode functions need to replace the receiver with the global proxy
128 // when called as functions (without an explicit receiver object).
129 if (info_->this_has_uses() &&
130 info_->strict_mode() == SLOPPY &&
131 !info_->is_native()) {
133 StackArgumentsAccessor args(rsp, scope()->num_parameters());
134 __ movp(rcx, args.GetReceiverOperand());
136 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
137 __ j(not_equal, &ok, Label::kNear);
139 __ movp(rcx, GlobalObjectOperand());
140 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
142 __ movp(args.GetReceiverOperand(), rcx);
148 info()->set_prologue_offset(masm_->pc_offset());
149 if (NeedsEagerFrame()) {
150 ASSERT(!frame_is_built_);
151 frame_is_built_ = true;
152 if (info()->IsStub()) {
155 __ Prologue(info()->IsCodePreAgingActive());
157 info()->AddNoFrameRange(0, masm_->pc_offset());
160 // Reserve space for the stack slots needed by the code.
161 int slots = GetStackSlotCount();
163 if (FLAG_debug_code) {
164 __ subp(rsp, Immediate(slots * kPointerSize));
166 MakeSureStackPagesMapped(slots * kPointerSize);
170 __ movq(kScratchRegister, kSlotsZapValue);
173 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
176 __ j(not_zero, &loop);
179 __ subp(rsp, Immediate(slots * kPointerSize));
181 MakeSureStackPagesMapped(slots * kPointerSize);
185 if (info()->saves_caller_doubles()) {
190 // Possibly allocate a local context.
191 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
192 if (heap_slots > 0) {
193 Comment(";;; Allocate local context");
194 bool need_write_barrier = true;
195 // Argument to NewContext is the function, which is still in rdi.
196 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
197 FastNewContextStub stub(isolate(), heap_slots);
199 // Result of FastNewContextStub is always in new space.
200 need_write_barrier = false;
203 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
205 RecordSafepoint(Safepoint::kNoLazyDeopt);
206 // Context is returned in rax. It replaces the context passed to us.
207 // It's saved in the stack and kept live in rsi.
209 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
211 // Copy any necessary parameters into the context.
212 int num_parameters = scope()->num_parameters();
213 for (int i = 0; i < num_parameters; i++) {
214 Variable* var = scope()->parameter(i);
215 if (var->IsContextSlot()) {
216 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
217 (num_parameters - 1 - i) * kPointerSize;
218 // Load parameter from stack.
219 __ movp(rax, Operand(rbp, parameter_offset));
220 // Store it in the context.
221 int context_offset = Context::SlotOffset(var->index());
222 __ movp(Operand(rsi, context_offset), rax);
223 // Update the write barrier. This clobbers rax and rbx.
224 if (need_write_barrier) {
225 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
226 } else if (FLAG_debug_code) {
228 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
229 __ Abort(kExpectedNewSpaceObject);
234 Comment(";;; End allocate local context");
238 if (FLAG_trace && info()->IsOptimizing()) {
239 __ CallRuntime(Runtime::kTraceEnter, 0);
241 return !is_aborted();
245 void LCodeGen::GenerateOsrPrologue() {
246 // Generate the OSR entry prologue at the first unknown OSR value, or if there
247 // are none, at the OSR entrypoint instruction.
248 if (osr_pc_offset_ >= 0) return;
250 osr_pc_offset_ = masm()->pc_offset();
252 // Adjust the frame size, subsuming the unoptimized frame into the
254 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
256 __ subp(rsp, Immediate(slots * kPointerSize));
260 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
261 if (instr->IsCall()) {
262 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
264 if (!instr->IsLazyBailout() && !instr->IsGap()) {
265 safepoints_.BumpLastLazySafepointIndex();
270 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
271 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
272 instr->hydrogen_value()->representation().IsInteger32() &&
273 instr->result()->IsRegister()) {
274 __ AssertZeroExtended(ToRegister(instr->result()));
277 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
278 // We sign extend the dehoisted key at the definition point when the pointer
279 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
280 // points and MustSignExtendResult is always false. We can't use
281 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
282 ASSERT(kPointerSize == kInt64Size);
283 if (instr->result()->IsRegister()) {
284 Register result_reg = ToRegister(instr->result());
285 __ movsxlq(result_reg, result_reg);
287 // Sign extend the 32bit result in the stack slots.
288 ASSERT(instr->result()->IsStackSlot());
289 Operand src = ToOperand(instr->result());
290 __ movsxlq(kScratchRegister, src);
291 __ movq(src, kScratchRegister);
297 bool LCodeGen::GenerateJumpTable() {
299 if (jump_table_.length() > 0) {
300 Comment(";;; -------------------- Jump table --------------------");
302 for (int i = 0; i < jump_table_.length(); i++) {
303 __ bind(&jump_table_[i].label);
304 Address entry = jump_table_[i].address;
305 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
306 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
307 if (id == Deoptimizer::kNotDeoptimizationEntry) {
308 Comment(";;; jump table entry %d.", i);
310 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
312 if (jump_table_[i].needs_frame) {
313 ASSERT(!info()->saves_caller_doubles());
314 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
315 if (needs_frame.is_bound()) {
316 __ jmp(&needs_frame);
318 __ bind(&needs_frame);
319 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
323 // This variant of deopt can only be used with stubs. Since we don't
324 // have a function pointer to install in the stack frame that we're
325 // building, install a special marker there instead.
326 ASSERT(info()->IsStub());
327 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
329 __ movp(rsi, MemOperand(rsp, kPointerSize));
330 __ call(kScratchRegister);
333 if (info()->saves_caller_doubles()) {
334 ASSERT(info()->IsStub());
335 RestoreCallerDoubles();
337 __ call(entry, RelocInfo::RUNTIME_ENTRY);
340 return !is_aborted();
344 bool LCodeGen::GenerateDeferredCode() {
345 ASSERT(is_generating());
346 if (deferred_.length() > 0) {
347 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
348 LDeferredCode* code = deferred_[i];
351 instructions_->at(code->instruction_index())->hydrogen_value();
352 RecordAndWritePosition(
353 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
355 Comment(";;; <@%d,#%d> "
356 "-------------------- Deferred %s --------------------",
357 code->instruction_index(),
358 code->instr()->hydrogen_value()->id(),
359 code->instr()->Mnemonic());
360 __ bind(code->entry());
361 if (NeedsDeferredFrame()) {
362 Comment(";;; Build frame");
363 ASSERT(!frame_is_built_);
364 ASSERT(info()->IsStub());
365 frame_is_built_ = true;
366 // Build the frame in such a way that esi isn't trashed.
367 __ pushq(rbp); // Caller's frame pointer.
368 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
369 __ Push(Smi::FromInt(StackFrame::STUB));
370 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
371 Comment(";;; Deferred code");
374 if (NeedsDeferredFrame()) {
375 __ bind(code->done());
376 Comment(";;; Destroy frame");
377 ASSERT(frame_is_built_);
378 frame_is_built_ = false;
382 __ jmp(code->exit());
386 // Deferred code is the last part of the instruction sequence. Mark
387 // the generated code as done unless we bailed out.
388 if (!is_aborted()) status_ = DONE;
389 return !is_aborted();
393 bool LCodeGen::GenerateSafepointTable() {
395 safepoints_.Emit(masm(), GetStackSlotCount());
396 return !is_aborted();
400 Register LCodeGen::ToRegister(int index) const {
401 return Register::FromAllocationIndex(index);
405 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
406 return XMMRegister::FromAllocationIndex(index);
410 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
411 return XMMRegister::FromAllocationIndex(index);
415 Register LCodeGen::ToRegister(LOperand* op) const {
416 ASSERT(op->IsRegister());
417 return ToRegister(op->index());
421 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
422 ASSERT(op->IsDoubleRegister());
423 return ToDoubleRegister(op->index());
427 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
428 ASSERT(op->IsFloat32x4Register());
429 return ToSIMD128Register(op->index());
433 XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
434 ASSERT(op->IsFloat64x2Register());
435 return ToSIMD128Register(op->index());
439 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
440 ASSERT(op->IsInt32x4Register());
441 return ToSIMD128Register(op->index());
445 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
446 ASSERT(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
447 op->IsInt32x4Register());
448 return ToSIMD128Register(op->index());
452 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
453 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
457 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
458 return op->IsConstantOperand() &&
459 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
463 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
464 return chunk_->LookupLiteralRepresentation(op).IsSmi();
468 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
469 return ToRepresentation(op, Representation::Integer32());
473 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
474 const Representation& r) const {
475 HConstant* constant = chunk_->LookupConstant(op);
476 int32_t value = constant->Integer32Value();
477 if (r.IsInteger32()) return value;
478 ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged());
479 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
483 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 return Smi::FromInt(constant->Integer32Value());
489 double LCodeGen::ToDouble(LConstantOperand* op) const {
490 HConstant* constant = chunk_->LookupConstant(op);
491 ASSERT(constant->HasDoubleValue());
492 return constant->DoubleValue();
496 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
497 HConstant* constant = chunk_->LookupConstant(op);
498 ASSERT(constant->HasExternalReferenceValue());
499 return constant->ExternalReferenceValue();
503 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
504 HConstant* constant = chunk_->LookupConstant(op);
505 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
506 return constant->handle(isolate());
510 static int ArgumentsOffsetWithoutFrame(int index) {
512 return -(index + 1) * kPointerSize + kPCOnStackSize;
516 Operand LCodeGen::ToOperand(LOperand* op) const {
517 // Does not handle registers. In X64 assembler, plain registers are not
518 // representable as an Operand.
519 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
520 op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
521 op->IsInt32x4StackSlot());
522 if (NeedsEagerFrame()) {
523 return Operand(rbp, StackSlotOffset(op->index()));
525 // Retrieve parameter without eager stack-frame relative to the
527 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
532 void LCodeGen::WriteTranslation(LEnvironment* environment,
533 Translation* translation) {
534 if (environment == NULL) return;
536 // The translation includes one command per value in the environment.
537 int translation_size = environment->translation_size();
538 // The output frame height does not include the parameters.
539 int height = translation_size - environment->parameter_count();
541 WriteTranslation(environment->outer(), translation);
542 bool has_closure_id = !info()->closure().is_null() &&
543 !info()->closure().is_identical_to(environment->closure());
544 int closure_id = has_closure_id
545 ? DefineDeoptimizationLiteral(environment->closure())
546 : Translation::kSelfLiteralId;
548 switch (environment->frame_type()) {
550 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
553 translation->BeginConstructStubFrame(closure_id, translation_size);
556 ASSERT(translation_size == 1);
558 translation->BeginGetterStubFrame(closure_id);
561 ASSERT(translation_size == 2);
563 translation->BeginSetterStubFrame(closure_id);
565 case ARGUMENTS_ADAPTOR:
566 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
569 translation->BeginCompiledStubFrame();
573 int object_index = 0;
574 int dematerialized_index = 0;
575 for (int i = 0; i < translation_size; ++i) {
576 LOperand* value = environment->values()->at(i);
577 AddToTranslation(environment,
580 environment->HasTaggedValueAt(i),
581 environment->HasUint32ValueAt(i),
583 &dematerialized_index);
588 void LCodeGen::AddToTranslation(LEnvironment* environment,
589 Translation* translation,
593 int* object_index_pointer,
594 int* dematerialized_index_pointer) {
595 if (op == LEnvironment::materialization_marker()) {
596 int object_index = (*object_index_pointer)++;
597 if (environment->ObjectIsDuplicateAt(object_index)) {
598 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
599 translation->DuplicateObject(dupe_of);
602 int object_length = environment->ObjectLengthAt(object_index);
603 if (environment->ObjectIsArgumentsAt(object_index)) {
604 translation->BeginArgumentsObject(object_length);
606 translation->BeginCapturedObject(object_length);
608 int dematerialized_index = *dematerialized_index_pointer;
609 int env_offset = environment->translation_size() + dematerialized_index;
610 *dematerialized_index_pointer += object_length;
611 for (int i = 0; i < object_length; ++i) {
612 LOperand* value = environment->values()->at(env_offset + i);
613 AddToTranslation(environment,
616 environment->HasTaggedValueAt(env_offset + i),
617 environment->HasUint32ValueAt(env_offset + i),
618 object_index_pointer,
619 dematerialized_index_pointer);
624 if (op->IsStackSlot()) {
626 translation->StoreStackSlot(op->index());
627 } else if (is_uint32) {
628 translation->StoreUint32StackSlot(op->index());
630 translation->StoreInt32StackSlot(op->index());
632 } else if (op->IsDoubleStackSlot()) {
633 translation->StoreDoubleStackSlot(op->index());
634 } else if (op->IsFloat32x4StackSlot()) {
635 translation->StoreSIMD128StackSlot(op->index(),
636 Translation::FLOAT32x4_STACK_SLOT);
637 } else if (op->IsFloat64x2StackSlot()) {
638 translation->StoreSIMD128StackSlot(op->index(),
639 Translation::FLOAT64x2_STACK_SLOT);
640 } else if (op->IsInt32x4StackSlot()) {
641 translation->StoreSIMD128StackSlot(op->index(),
642 Translation::INT32x4_STACK_SLOT);
643 } else if (op->IsRegister()) {
644 Register reg = ToRegister(op);
646 translation->StoreRegister(reg);
647 } else if (is_uint32) {
648 translation->StoreUint32Register(reg);
650 translation->StoreInt32Register(reg);
652 } else if (op->IsDoubleRegister()) {
653 XMMRegister reg = ToDoubleRegister(op);
654 translation->StoreDoubleRegister(reg);
655 } else if (op->IsFloat32x4Register()) {
656 XMMRegister reg = ToFloat32x4Register(op);
657 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
658 } else if (op->IsFloat64x2Register()) {
659 XMMRegister reg = ToFloat64x2Register(op);
660 translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
661 } else if (op->IsInt32x4Register()) {
662 XMMRegister reg = ToInt32x4Register(op);
663 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
664 } else if (op->IsConstantOperand()) {
665 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
666 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
667 translation->StoreLiteral(src_index);
674 void LCodeGen::CallCodeGeneric(Handle<Code> code,
675 RelocInfo::Mode mode,
677 SafepointMode safepoint_mode,
679 ASSERT(instr != NULL);
681 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
683 // Signal that we don't inline smi code before these stubs in the
684 // optimizing code generator.
685 if (code->kind() == Code::BINARY_OP_IC ||
686 code->kind() == Code::COMPARE_IC) {
692 void LCodeGen::CallCode(Handle<Code> code,
693 RelocInfo::Mode mode,
694 LInstruction* instr) {
695 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
699 void LCodeGen::CallRuntime(const Runtime::Function* function,
702 SaveFPRegsMode save_doubles) {
703 ASSERT(instr != NULL);
704 ASSERT(instr->HasPointerMap());
706 __ CallRuntime(function, num_arguments, save_doubles);
708 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
712 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
713 if (context->IsRegister()) {
714 if (!ToRegister(context).is(rsi)) {
715 __ movp(rsi, ToRegister(context));
717 } else if (context->IsStackSlot()) {
718 __ movp(rsi, ToOperand(context));
719 } else if (context->IsConstantOperand()) {
720 HConstant* constant =
721 chunk_->LookupConstant(LConstantOperand::cast(context));
722 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
730 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
734 LoadContextFromDeferred(context);
736 __ CallRuntimeSaveDoubles(id);
737 RecordSafepointWithRegisters(
738 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
742 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
743 Safepoint::DeoptMode mode) {
744 environment->set_has_been_used();
745 if (!environment->HasBeenRegistered()) {
746 // Physical stack frame layout:
747 // -x ............. -4 0 ..................................... y
748 // [incoming arguments] [spill slots] [pushed outgoing arguments]
750 // Layout of the environment:
751 // 0 ..................................................... size-1
752 // [parameters] [locals] [expression stack including arguments]
754 // Layout of the translation:
755 // 0 ........................................................ size - 1 + 4
756 // [expression stack including arguments] [locals] [4 words] [parameters]
757 // |>------------ translation_size ------------<|
760 int jsframe_count = 0;
761 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
763 if (e->frame_type() == JS_FUNCTION) {
767 Translation translation(&translations_, frame_count, jsframe_count, zone());
768 WriteTranslation(environment, &translation);
769 int deoptimization_index = deoptimizations_.length();
770 int pc_offset = masm()->pc_offset();
771 environment->Register(deoptimization_index,
773 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
774 deoptimizations_.Add(environment, environment->zone());
779 void LCodeGen::DeoptimizeIf(Condition cc,
780 LEnvironment* environment,
781 Deoptimizer::BailoutType bailout_type) {
782 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
783 ASSERT(environment->HasBeenRegistered());
784 int id = environment->deoptimization_index();
785 ASSERT(info()->IsOptimizing() || info()->IsStub());
787 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
789 Abort(kBailoutWasNotPrepared);
793 if (DeoptEveryNTimes()) {
794 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
798 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
799 __ movl(rax, count_operand);
800 __ subl(rax, Immediate(1));
801 __ j(not_zero, &no_deopt, Label::kNear);
802 if (FLAG_trap_on_deopt) __ int3();
803 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
804 __ movl(count_operand, rax);
807 ASSERT(frame_is_built_);
808 __ call(entry, RelocInfo::RUNTIME_ENTRY);
810 __ movl(count_operand, rax);
815 if (info()->ShouldTrapOnDeopt()) {
817 if (cc != no_condition) {
818 __ j(NegateCondition(cc), &done, Label::kNear);
824 ASSERT(info()->IsStub() || frame_is_built_);
825 // Go through jump table if we need to handle condition, build frame, or
826 // restore caller doubles.
827 if (cc == no_condition && frame_is_built_ &&
828 !info()->saves_caller_doubles()) {
829 __ call(entry, RelocInfo::RUNTIME_ENTRY);
831 // We often have several deopts to the same entry, reuse the last
832 // jump entry if this is the case.
833 if (jump_table_.is_empty() ||
834 jump_table_.last().address != entry ||
835 jump_table_.last().needs_frame != !frame_is_built_ ||
836 jump_table_.last().bailout_type != bailout_type) {
837 Deoptimizer::JumpTableEntry table_entry(entry,
840 jump_table_.Add(table_entry, zone());
842 if (cc == no_condition) {
843 __ jmp(&jump_table_.last().label);
845 __ j(cc, &jump_table_.last().label);
851 void LCodeGen::DeoptimizeIf(Condition cc,
852 LEnvironment* environment) {
853 Deoptimizer::BailoutType bailout_type = info()->IsStub()
855 : Deoptimizer::EAGER;
856 DeoptimizeIf(cc, environment, bailout_type);
860 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
861 int length = deoptimizations_.length();
862 if (length == 0) return;
863 Handle<DeoptimizationInputData> data =
864 DeoptimizationInputData::New(isolate(), length, TENURED);
866 Handle<ByteArray> translations =
867 translations_.CreateByteArray(isolate()->factory());
868 data->SetTranslationByteArray(*translations);
869 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
870 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
871 if (info_->IsOptimizing()) {
872 // Reference to shared function info does not change between phases.
873 AllowDeferredHandleDereference allow_handle_dereference;
874 data->SetSharedFunctionInfo(*info_->shared_info());
876 data->SetSharedFunctionInfo(Smi::FromInt(0));
879 Handle<FixedArray> literals =
880 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
881 { AllowDeferredHandleDereference copy_handles;
882 for (int i = 0; i < deoptimization_literals_.length(); i++) {
883 literals->set(i, *deoptimization_literals_[i]);
885 data->SetLiteralArray(*literals);
888 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
889 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
891 // Populate the deoptimization entries.
892 for (int i = 0; i < length; i++) {
893 LEnvironment* env = deoptimizations_[i];
894 data->SetAstId(i, env->ast_id());
895 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
896 data->SetArgumentsStackHeight(i,
897 Smi::FromInt(env->arguments_stack_height()));
898 data->SetPc(i, Smi::FromInt(env->pc_offset()));
900 code->set_deoptimization_data(*data);
904 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
905 int result = deoptimization_literals_.length();
906 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
907 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
909 deoptimization_literals_.Add(literal, zone());
914 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
915 ASSERT(deoptimization_literals_.length() == 0);
917 const ZoneList<Handle<JSFunction> >* inlined_closures =
918 chunk()->inlined_closures();
920 for (int i = 0, length = inlined_closures->length();
923 DefineDeoptimizationLiteral(inlined_closures->at(i));
926 inlined_function_count_ = deoptimization_literals_.length();
930 void LCodeGen::RecordSafepointWithLazyDeopt(
931 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
932 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
933 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
935 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
936 RecordSafepointWithRegisters(
937 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
942 void LCodeGen::RecordSafepoint(
943 LPointerMap* pointers,
944 Safepoint::Kind kind,
946 Safepoint::DeoptMode deopt_mode) {
947 ASSERT(kind == expected_safepoint_kind_);
949 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
951 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
952 kind, arguments, deopt_mode);
953 for (int i = 0; i < operands->length(); i++) {
954 LOperand* pointer = operands->at(i);
955 if (pointer->IsStackSlot()) {
956 safepoint.DefinePointerSlot(pointer->index(), zone());
957 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
958 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
964 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
965 Safepoint::DeoptMode deopt_mode) {
966 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
970 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
971 LPointerMap empty_pointers(zone());
972 RecordSafepoint(&empty_pointers, deopt_mode);
976 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
978 Safepoint::DeoptMode deopt_mode) {
979 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
983 void LCodeGen::RecordAndWritePosition(int position) {
984 if (position == RelocInfo::kNoPosition) return;
985 masm()->positions_recorder()->RecordPosition(position);
986 masm()->positions_recorder()->WriteRecordedPositions();
990 static const char* LabelType(LLabel* label) {
991 if (label->is_loop_header()) return " (loop header)";
992 if (label->is_osr_entry()) return " (OSR entry)";
997 void LCodeGen::DoLabel(LLabel* label) {
998 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
999 current_instruction_,
1000 label->hydrogen_value()->id(),
1003 __ bind(label->label());
1004 current_block_ = label->block_id();
1009 void LCodeGen::DoParallelMove(LParallelMove* move) {
1010 resolver_.Resolve(move);
1014 void LCodeGen::DoGap(LGap* gap) {
1015 for (int i = LGap::FIRST_INNER_POSITION;
1016 i <= LGap::LAST_INNER_POSITION;
1018 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1019 LParallelMove* move = gap->GetParallelMove(inner_pos);
1020 if (move != NULL) DoParallelMove(move);
1025 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1030 void LCodeGen::DoParameter(LParameter* instr) {
1035 void LCodeGen::DoCallStub(LCallStub* instr) {
1036 ASSERT(ToRegister(instr->context()).is(rsi));
1037 ASSERT(ToRegister(instr->result()).is(rax));
1038 switch (instr->hydrogen()->major_key()) {
1039 case CodeStub::RegExpExec: {
1040 RegExpExecStub stub(isolate());
1041 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1044 case CodeStub::SubString: {
1045 SubStringStub stub(isolate());
1046 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1049 case CodeStub::StringCompare: {
1050 StringCompareStub stub(isolate());
1051 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1060 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1061 GenerateOsrPrologue();
1065 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1066 Register dividend = ToRegister(instr->dividend());
1067 int32_t divisor = instr->divisor();
1068 ASSERT(dividend.is(ToRegister(instr->result())));
1070 // Theoretically, a variation of the branch-free code for integer division by
1071 // a power of 2 (calculating the remainder via an additional multiplication
1072 // (which gets simplified to an 'and') and subtraction) should be faster, and
1073 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1074 // indicate that positive dividends are heavily favored, so the branching
1075 // version performs better.
1076 HMod* hmod = instr->hydrogen();
1077 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1078 Label dividend_is_not_negative, done;
1079 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1080 __ testl(dividend, dividend);
1081 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1082 // Note that this is correct even for kMinInt operands.
1084 __ andl(dividend, Immediate(mask));
1086 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1087 DeoptimizeIf(zero, instr->environment());
1089 __ jmp(&done, Label::kNear);
1092 __ bind(÷nd_is_not_negative);
1093 __ andl(dividend, Immediate(mask));
1098 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1099 Register dividend = ToRegister(instr->dividend());
1100 int32_t divisor = instr->divisor();
1101 ASSERT(ToRegister(instr->result()).is(rax));
1104 DeoptimizeIf(no_condition, instr->environment());
1108 __ TruncatingDiv(dividend, Abs(divisor));
1109 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1110 __ movl(rax, dividend);
1113 // Check for negative zero.
1114 HMod* hmod = instr->hydrogen();
1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1116 Label remainder_not_zero;
1117 __ j(not_zero, &remainder_not_zero, Label::kNear);
1118 __ cmpl(dividend, Immediate(0));
1119 DeoptimizeIf(less, instr->environment());
1120 __ bind(&remainder_not_zero);
1125 void LCodeGen::DoModI(LModI* instr) {
1126 HMod* hmod = instr->hydrogen();
1128 Register left_reg = ToRegister(instr->left());
1129 ASSERT(left_reg.is(rax));
1130 Register right_reg = ToRegister(instr->right());
1131 ASSERT(!right_reg.is(rax));
1132 ASSERT(!right_reg.is(rdx));
1133 Register result_reg = ToRegister(instr->result());
1134 ASSERT(result_reg.is(rdx));
1137 // Check for x % 0, idiv would signal a divide error. We have to
1138 // deopt in this case because we can't return a NaN.
1139 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1140 __ testl(right_reg, right_reg);
1141 DeoptimizeIf(zero, instr->environment());
1144 // Check for kMinInt % -1, idiv would signal a divide error. We
1145 // have to deopt if we care about -0, because we can't return that.
1146 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1147 Label no_overflow_possible;
1148 __ cmpl(left_reg, Immediate(kMinInt));
1149 __ j(not_zero, &no_overflow_possible, Label::kNear);
1150 __ cmpl(right_reg, Immediate(-1));
1151 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1152 DeoptimizeIf(equal, instr->environment());
1154 __ j(not_equal, &no_overflow_possible, Label::kNear);
1155 __ Set(result_reg, 0);
1156 __ jmp(&done, Label::kNear);
1158 __ bind(&no_overflow_possible);
1161 // Sign extend dividend in eax into edx:eax, since we are using only the low
1162 // 32 bits of the values.
1165 // If we care about -0, test if the dividend is <0 and the result is 0.
1166 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1167 Label positive_left;
1168 __ testl(left_reg, left_reg);
1169 __ j(not_sign, &positive_left, Label::kNear);
1170 __ idivl(right_reg);
1171 __ testl(result_reg, result_reg);
1172 DeoptimizeIf(zero, instr->environment());
1173 __ jmp(&done, Label::kNear);
1174 __ bind(&positive_left);
1176 __ idivl(right_reg);
1181 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1182 Register dividend = ToRegister(instr->dividend());
1183 int32_t divisor = instr->divisor();
1184 ASSERT(dividend.is(ToRegister(instr->result())));
1186 // If the divisor is positive, things are easy: There can be no deopts and we
1187 // can simply do an arithmetic right shift.
1188 if (divisor == 1) return;
1189 int32_t shift = WhichPowerOf2Abs(divisor);
1191 __ sarl(dividend, Immediate(shift));
1195 // If the divisor is negative, we have to negate and handle edge cases.
1197 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1198 DeoptimizeIf(zero, instr->environment());
1201 // Dividing by -1 is basically negation, unless we overflow.
1202 if (divisor == -1) {
1203 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1204 DeoptimizeIf(overflow, instr->environment());
1209 // If the negation could not overflow, simply shifting is OK.
1210 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1211 __ sarl(dividend, Immediate(shift));
1215 Label not_kmin_int, done;
1216 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1217 __ movl(dividend, Immediate(kMinInt / divisor));
1218 __ jmp(&done, Label::kNear);
1219 __ bind(¬_kmin_int);
1220 __ sarl(dividend, Immediate(shift));
1225 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1226 Register dividend = ToRegister(instr->dividend());
1227 int32_t divisor = instr->divisor();
1228 ASSERT(ToRegister(instr->result()).is(rdx));
1231 DeoptimizeIf(no_condition, instr->environment());
1235 // Check for (0 / -x) that will produce negative zero.
1236 HMathFloorOfDiv* hdiv = instr->hydrogen();
1237 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1238 __ testl(dividend, dividend);
1239 DeoptimizeIf(zero, instr->environment());
1242 // Easy case: We need no dynamic check for the dividend and the flooring
1243 // division is the same as the truncating division.
1244 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1245 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1246 __ TruncatingDiv(dividend, Abs(divisor));
1247 if (divisor < 0) __ negl(rdx);
1251 // In the general case we may need to adjust before and after the truncating
1252 // division to get a flooring division.
1253 Register temp = ToRegister(instr->temp3());
1254 ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1255 Label needs_adjustment, done;
1256 __ cmpl(dividend, Immediate(0));
1257 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1258 __ TruncatingDiv(dividend, Abs(divisor));
1259 if (divisor < 0) __ negl(rdx);
1260 __ jmp(&done, Label::kNear);
1261 __ bind(&needs_adjustment);
1262 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1263 __ TruncatingDiv(temp, Abs(divisor));
1264 if (divisor < 0) __ negl(rdx);
1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1271 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1272 HBinaryOperation* hdiv = instr->hydrogen();
1273 Register dividend = ToRegister(instr->dividend());
1274 Register divisor = ToRegister(instr->divisor());
1275 Register remainder = ToRegister(instr->temp());
1276 Register result = ToRegister(instr->result());
1277 ASSERT(dividend.is(rax));
1278 ASSERT(remainder.is(rdx));
1279 ASSERT(result.is(rax));
1280 ASSERT(!divisor.is(rax));
1281 ASSERT(!divisor.is(rdx));
1284 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1285 __ testl(divisor, divisor);
1286 DeoptimizeIf(zero, instr->environment());
1289 // Check for (0 / -x) that will produce negative zero.
1290 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1291 Label dividend_not_zero;
1292 __ testl(dividend, dividend);
1293 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1294 __ testl(divisor, divisor);
1295 DeoptimizeIf(sign, instr->environment());
1296 __ bind(÷nd_not_zero);
1299 // Check for (kMinInt / -1).
1300 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1301 Label dividend_not_min_int;
1302 __ cmpl(dividend, Immediate(kMinInt));
1303 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1304 __ cmpl(divisor, Immediate(-1));
1305 DeoptimizeIf(zero, instr->environment());
1306 __ bind(÷nd_not_min_int);
1309 // Sign extend to rdx (= remainder).
1314 __ testl(remainder, remainder);
1315 __ j(zero, &done, Label::kNear);
1316 __ xorl(remainder, divisor);
1317 __ sarl(remainder, Immediate(31));
1318 __ addl(result, remainder);
1323 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1324 Register dividend = ToRegister(instr->dividend());
1325 int32_t divisor = instr->divisor();
1326 Register result = ToRegister(instr->result());
1327 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1328 ASSERT(!result.is(dividend));
1330 // Check for (0 / -x) that will produce negative zero.
1331 HDiv* hdiv = instr->hydrogen();
1332 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1333 __ testl(dividend, dividend);
1334 DeoptimizeIf(zero, instr->environment());
1336 // Check for (kMinInt / -1).
1337 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1338 __ cmpl(dividend, Immediate(kMinInt));
1339 DeoptimizeIf(zero, instr->environment());
1341 // Deoptimize if remainder will not be 0.
1342 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1343 divisor != 1 && divisor != -1) {
1344 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1345 __ testl(dividend, Immediate(mask));
1346 DeoptimizeIf(not_zero, instr->environment());
1348 __ Move(result, dividend);
1349 int32_t shift = WhichPowerOf2Abs(divisor);
1351 // The arithmetic shift is always OK, the 'if' is an optimization only.
1352 if (shift > 1) __ sarl(result, Immediate(31));
1353 __ shrl(result, Immediate(32 - shift));
1354 __ addl(result, dividend);
1355 __ sarl(result, Immediate(shift));
1357 if (divisor < 0) __ negl(result);
1361 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1362 Register dividend = ToRegister(instr->dividend());
1363 int32_t divisor = instr->divisor();
1364 ASSERT(ToRegister(instr->result()).is(rdx));
1367 DeoptimizeIf(no_condition, instr->environment());
1371 // Check for (0 / -x) that will produce negative zero.
1372 HDiv* hdiv = instr->hydrogen();
1373 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1374 __ testl(dividend, dividend);
1375 DeoptimizeIf(zero, instr->environment());
1378 __ TruncatingDiv(dividend, Abs(divisor));
1379 if (divisor < 0) __ negl(rdx);
1381 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1383 __ imull(rax, rax, Immediate(divisor));
1384 __ subl(rax, dividend);
1385 DeoptimizeIf(not_equal, instr->environment());
1390 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1391 void LCodeGen::DoDivI(LDivI* instr) {
1392 HBinaryOperation* hdiv = instr->hydrogen();
1393 Register dividend = ToRegister(instr->dividend());
1394 Register divisor = ToRegister(instr->divisor());
1395 Register remainder = ToRegister(instr->temp());
1396 ASSERT(dividend.is(rax));
1397 ASSERT(remainder.is(rdx));
1398 ASSERT(ToRegister(instr->result()).is(rax));
1399 ASSERT(!divisor.is(rax));
1400 ASSERT(!divisor.is(rdx));
1403 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1404 __ testl(divisor, divisor);
1405 DeoptimizeIf(zero, instr->environment());
1408 // Check for (0 / -x) that will produce negative zero.
1409 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1410 Label dividend_not_zero;
1411 __ testl(dividend, dividend);
1412 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1413 __ testl(divisor, divisor);
1414 DeoptimizeIf(sign, instr->environment());
1415 __ bind(÷nd_not_zero);
1418 // Check for (kMinInt / -1).
1419 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1420 Label dividend_not_min_int;
1421 __ cmpl(dividend, Immediate(kMinInt));
1422 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1423 __ cmpl(divisor, Immediate(-1));
1424 DeoptimizeIf(zero, instr->environment());
1425 __ bind(÷nd_not_min_int);
1428 // Sign extend to rdx (= remainder).
1432 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1433 // Deoptimize if remainder is not 0.
1434 __ testl(remainder, remainder);
1435 DeoptimizeIf(not_zero, instr->environment());
1440 void LCodeGen::DoMulI(LMulI* instr) {
1441 Register left = ToRegister(instr->left());
1442 LOperand* right = instr->right();
1444 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1445 if (instr->hydrogen_value()->representation().IsSmi()) {
1446 __ movp(kScratchRegister, left);
1448 __ movl(kScratchRegister, left);
1453 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1454 if (right->IsConstantOperand()) {
1455 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1456 if (right_value == -1) {
1458 } else if (right_value == 0) {
1459 __ xorl(left, left);
1460 } else if (right_value == 2) {
1461 __ addl(left, left);
1462 } else if (!can_overflow) {
1463 // If the multiplication is known to not overflow, we
1464 // can use operations that don't set the overflow flag
1466 switch (right_value) {
1471 __ leal(left, Operand(left, left, times_2, 0));
1474 __ shll(left, Immediate(2));
1477 __ leal(left, Operand(left, left, times_4, 0));
1480 __ shll(left, Immediate(3));
1483 __ leal(left, Operand(left, left, times_8, 0));
1486 __ shll(left, Immediate(4));
1489 __ imull(left, left, Immediate(right_value));
1493 __ imull(left, left, Immediate(right_value));
1495 } else if (right->IsStackSlot()) {
1496 if (instr->hydrogen_value()->representation().IsSmi()) {
1497 __ SmiToInteger64(left, left);
1498 __ imulp(left, ToOperand(right));
1500 __ imull(left, ToOperand(right));
1503 if (instr->hydrogen_value()->representation().IsSmi()) {
1504 __ SmiToInteger64(left, left);
1505 __ imulp(left, ToRegister(right));
1507 __ imull(left, ToRegister(right));
1512 DeoptimizeIf(overflow, instr->environment());
1515 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1516 // Bail out if the result is supposed to be negative zero.
1518 if (instr->hydrogen_value()->representation().IsSmi()) {
1519 __ testp(left, left);
1521 __ testl(left, left);
1523 __ j(not_zero, &done, Label::kNear);
1524 if (right->IsConstantOperand()) {
1525 // Constant can't be represented as 32-bit Smi due to immediate size
1527 ASSERT(SmiValuesAre32Bits()
1528 ? !instr->hydrogen_value()->representation().IsSmi()
1529 : SmiValuesAre31Bits());
1530 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1531 DeoptimizeIf(no_condition, instr->environment());
1532 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1533 __ cmpl(kScratchRegister, Immediate(0));
1534 DeoptimizeIf(less, instr->environment());
1536 } else if (right->IsStackSlot()) {
1537 if (instr->hydrogen_value()->representation().IsSmi()) {
1538 __ orp(kScratchRegister, ToOperand(right));
1540 __ orl(kScratchRegister, ToOperand(right));
1542 DeoptimizeIf(sign, instr->environment());
1544 // Test the non-zero operand for negative sign.
1545 if (instr->hydrogen_value()->representation().IsSmi()) {
1546 __ orp(kScratchRegister, ToRegister(right));
1548 __ orl(kScratchRegister, ToRegister(right));
1550 DeoptimizeIf(sign, instr->environment());
1557 void LCodeGen::DoBitI(LBitI* instr) {
1558 LOperand* left = instr->left();
1559 LOperand* right = instr->right();
1560 ASSERT(left->Equals(instr->result()));
1561 ASSERT(left->IsRegister());
1563 if (right->IsConstantOperand()) {
1564 int32_t right_operand =
1565 ToRepresentation(LConstantOperand::cast(right),
1566 instr->hydrogen()->right()->representation());
1567 switch (instr->op()) {
1568 case Token::BIT_AND:
1569 __ andl(ToRegister(left), Immediate(right_operand));
1572 __ orl(ToRegister(left), Immediate(right_operand));
1574 case Token::BIT_XOR:
1575 if (right_operand == int32_t(~0)) {
1576 __ notl(ToRegister(left));
1578 __ xorl(ToRegister(left), Immediate(right_operand));
1585 } else if (right->IsStackSlot()) {
1586 switch (instr->op()) {
1587 case Token::BIT_AND:
1588 if (instr->IsInteger32()) {
1589 __ andl(ToRegister(left), ToOperand(right));
1591 __ andp(ToRegister(left), ToOperand(right));
1595 if (instr->IsInteger32()) {
1596 __ orl(ToRegister(left), ToOperand(right));
1598 __ orp(ToRegister(left), ToOperand(right));
1601 case Token::BIT_XOR:
1602 if (instr->IsInteger32()) {
1603 __ xorl(ToRegister(left), ToOperand(right));
1605 __ xorp(ToRegister(left), ToOperand(right));
1613 ASSERT(right->IsRegister());
1614 switch (instr->op()) {
1615 case Token::BIT_AND:
1616 if (instr->IsInteger32()) {
1617 __ andl(ToRegister(left), ToRegister(right));
1619 __ andp(ToRegister(left), ToRegister(right));
1623 if (instr->IsInteger32()) {
1624 __ orl(ToRegister(left), ToRegister(right));
1626 __ orp(ToRegister(left), ToRegister(right));
1629 case Token::BIT_XOR:
1630 if (instr->IsInteger32()) {
1631 __ xorl(ToRegister(left), ToRegister(right));
1633 __ xorp(ToRegister(left), ToRegister(right));
1644 void LCodeGen::DoShiftI(LShiftI* instr) {
1645 LOperand* left = instr->left();
1646 LOperand* right = instr->right();
1647 ASSERT(left->Equals(instr->result()));
1648 ASSERT(left->IsRegister());
1649 if (right->IsRegister()) {
1650 ASSERT(ToRegister(right).is(rcx));
1652 switch (instr->op()) {
1654 __ rorl_cl(ToRegister(left));
1657 __ sarl_cl(ToRegister(left));
1660 __ shrl_cl(ToRegister(left));
1661 if (instr->can_deopt()) {
1662 __ testl(ToRegister(left), ToRegister(left));
1663 DeoptimizeIf(negative, instr->environment());
1667 __ shll_cl(ToRegister(left));
1674 int32_t value = ToInteger32(LConstantOperand::cast(right));
1675 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1676 switch (instr->op()) {
1678 if (shift_count != 0) {
1679 __ rorl(ToRegister(left), Immediate(shift_count));
1683 if (shift_count != 0) {
1684 __ sarl(ToRegister(left), Immediate(shift_count));
1688 if (shift_count != 0) {
1689 __ shrl(ToRegister(left), Immediate(shift_count));
1690 } else if (instr->can_deopt()) {
1691 __ testl(ToRegister(left), ToRegister(left));
1692 DeoptimizeIf(negative, instr->environment());
1696 if (shift_count != 0) {
1697 if (instr->hydrogen_value()->representation().IsSmi()) {
1698 if (SmiValuesAre32Bits()) {
1699 __ shlp(ToRegister(left), Immediate(shift_count));
1701 ASSERT(SmiValuesAre31Bits());
1702 if (instr->can_deopt()) {
1703 if (shift_count != 1) {
1704 __ shll(ToRegister(left), Immediate(shift_count - 1));
1706 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1707 DeoptimizeIf(overflow, instr->environment());
1709 __ shll(ToRegister(left), Immediate(shift_count));
1713 __ shll(ToRegister(left), Immediate(shift_count));
1725 void LCodeGen::DoSubI(LSubI* instr) {
1726 LOperand* left = instr->left();
1727 LOperand* right = instr->right();
1728 ASSERT(left->Equals(instr->result()));
1730 if (right->IsConstantOperand()) {
1731 int32_t right_operand =
1732 ToRepresentation(LConstantOperand::cast(right),
1733 instr->hydrogen()->right()->representation());
1734 __ subl(ToRegister(left), Immediate(right_operand));
1735 } else if (right->IsRegister()) {
1736 if (instr->hydrogen_value()->representation().IsSmi()) {
1737 __ subp(ToRegister(left), ToRegister(right));
1739 __ subl(ToRegister(left), ToRegister(right));
1742 if (instr->hydrogen_value()->representation().IsSmi()) {
1743 __ subp(ToRegister(left), ToOperand(right));
1745 __ subl(ToRegister(left), ToOperand(right));
1749 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1750 DeoptimizeIf(overflow, instr->environment());
1755 void LCodeGen::DoConstantI(LConstantI* instr) {
1756 Register dst = ToRegister(instr->result());
1757 if (instr->value() == 0) {
1760 __ movl(dst, Immediate(instr->value()));
1765 void LCodeGen::DoConstantS(LConstantS* instr) {
1766 __ Move(ToRegister(instr->result()), instr->value());
1770 void LCodeGen::DoConstantD(LConstantD* instr) {
1771 ASSERT(instr->result()->IsDoubleRegister());
1772 XMMRegister res = ToDoubleRegister(instr->result());
1773 double v = instr->value();
1774 uint64_t int_val = BitCast<uint64_t, double>(v);
1775 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1776 // do so if the constant is -0.0.
1780 Register tmp = ToRegister(instr->temp());
1781 __ Set(tmp, int_val);
1787 void LCodeGen::DoConstantE(LConstantE* instr) {
1788 __ LoadAddress(ToRegister(instr->result()), instr->value());
1792 void LCodeGen::DoConstantT(LConstantT* instr) {
1793 Handle<Object> object = instr->value(isolate());
1794 AllowDeferredHandleDereference smi_check;
1795 __ Move(ToRegister(instr->result()), object);
1799 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1800 Register result = ToRegister(instr->result());
1801 Register map = ToRegister(instr->value());
1802 __ EnumLength(result, map);
1806 void LCodeGen::DoDateField(LDateField* instr) {
1807 Register object = ToRegister(instr->date());
1808 Register result = ToRegister(instr->result());
1809 Smi* index = instr->index();
1810 Label runtime, done, not_date_object;
1811 ASSERT(object.is(result));
1812 ASSERT(object.is(rax));
1814 Condition cc = masm()->CheckSmi(object);
1815 DeoptimizeIf(cc, instr->environment());
1816 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1817 DeoptimizeIf(not_equal, instr->environment());
1819 if (index->value() == 0) {
1820 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1822 if (index->value() < JSDate::kFirstUncachedField) {
1823 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1824 Operand stamp_operand = __ ExternalOperand(stamp);
1825 __ movp(kScratchRegister, stamp_operand);
1826 __ cmpp(kScratchRegister, FieldOperand(object,
1827 JSDate::kCacheStampOffset));
1828 __ j(not_equal, &runtime, Label::kNear);
1829 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1830 kPointerSize * index->value()));
1831 __ jmp(&done, Label::kNear);
1834 __ PrepareCallCFunction(2);
1835 __ movp(arg_reg_1, object);
1836 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1837 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1843 Operand LCodeGen::BuildSeqStringOperand(Register string,
1845 String::Encoding encoding) {
1846 if (index->IsConstantOperand()) {
1847 int offset = ToInteger32(LConstantOperand::cast(index));
1848 if (encoding == String::TWO_BYTE_ENCODING) {
1849 offset *= kUC16Size;
1851 STATIC_ASSERT(kCharSize == 1);
1852 return FieldOperand(string, SeqString::kHeaderSize + offset);
1854 return FieldOperand(
1855 string, ToRegister(index),
1856 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1857 SeqString::kHeaderSize);
1861 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1862 String::Encoding encoding = instr->hydrogen()->encoding();
1863 Register result = ToRegister(instr->result());
1864 Register string = ToRegister(instr->string());
1866 if (FLAG_debug_code) {
1868 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1869 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1871 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1872 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1873 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1874 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1875 ? one_byte_seq_type : two_byte_seq_type));
1876 __ Check(equal, kUnexpectedStringType);
1880 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1881 if (encoding == String::ONE_BYTE_ENCODING) {
1882 __ movzxbl(result, operand);
1884 __ movzxwl(result, operand);
1889 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1890 String::Encoding encoding = instr->hydrogen()->encoding();
1891 Register string = ToRegister(instr->string());
1893 if (FLAG_debug_code) {
1894 Register value = ToRegister(instr->value());
1895 Register index = ToRegister(instr->index());
1896 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1897 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1899 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1900 ? one_byte_seq_type : two_byte_seq_type;
1901 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1904 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1905 if (instr->value()->IsConstantOperand()) {
1906 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1907 ASSERT_LE(0, value);
1908 if (encoding == String::ONE_BYTE_ENCODING) {
1909 ASSERT_LE(value, String::kMaxOneByteCharCode);
1910 __ movb(operand, Immediate(value));
1912 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
1913 __ movw(operand, Immediate(value));
1916 Register value = ToRegister(instr->value());
1917 if (encoding == String::ONE_BYTE_ENCODING) {
1918 __ movb(operand, value);
1920 __ movw(operand, value);
1926 void LCodeGen::DoAddI(LAddI* instr) {
1927 LOperand* left = instr->left();
1928 LOperand* right = instr->right();
1930 Representation target_rep = instr->hydrogen()->representation();
1931 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1933 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1934 if (right->IsConstantOperand()) {
1935 // No support for smi-immediates for 32-bit SMI.
1936 ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1938 ToRepresentation(LConstantOperand::cast(right),
1939 instr->hydrogen()->right()->representation());
1941 __ leap(ToRegister(instr->result()),
1942 MemOperand(ToRegister(left), offset));
1944 __ leal(ToRegister(instr->result()),
1945 MemOperand(ToRegister(left), offset));
1948 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1950 __ leap(ToRegister(instr->result()), address);
1952 __ leal(ToRegister(instr->result()), address);
1956 if (right->IsConstantOperand()) {
1957 // No support for smi-immediates for 32-bit SMI.
1958 ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1959 int32_t right_operand =
1960 ToRepresentation(LConstantOperand::cast(right),
1961 instr->hydrogen()->right()->representation());
1963 __ addp(ToRegister(left), Immediate(right_operand));
1965 __ addl(ToRegister(left), Immediate(right_operand));
1967 } else if (right->IsRegister()) {
1969 __ addp(ToRegister(left), ToRegister(right));
1971 __ addl(ToRegister(left), ToRegister(right));
1975 __ addp(ToRegister(left), ToOperand(right));
1977 __ addl(ToRegister(left), ToOperand(right));
1980 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1981 DeoptimizeIf(overflow, instr->environment());
1987 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1988 LOperand* left = instr->left();
1989 LOperand* right = instr->right();
1990 ASSERT(left->Equals(instr->result()));
1991 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1992 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1994 Condition condition = (operation == HMathMinMax::kMathMin)
1997 Register left_reg = ToRegister(left);
1998 if (right->IsConstantOperand()) {
1999 Immediate right_imm = Immediate(
2000 ToRepresentation(LConstantOperand::cast(right),
2001 instr->hydrogen()->right()->representation()));
2002 ASSERT(SmiValuesAre32Bits()
2003 ? !instr->hydrogen()->representation().IsSmi()
2004 : SmiValuesAre31Bits());
2005 __ cmpl(left_reg, right_imm);
2006 __ j(condition, &return_left, Label::kNear);
2007 __ movp(left_reg, right_imm);
2008 } else if (right->IsRegister()) {
2009 Register right_reg = ToRegister(right);
2010 if (instr->hydrogen_value()->representation().IsSmi()) {
2011 __ cmpp(left_reg, right_reg);
2013 __ cmpl(left_reg, right_reg);
2015 __ j(condition, &return_left, Label::kNear);
2016 __ movp(left_reg, right_reg);
2018 Operand right_op = ToOperand(right);
2019 if (instr->hydrogen_value()->representation().IsSmi()) {
2020 __ cmpp(left_reg, right_op);
2022 __ cmpl(left_reg, right_op);
2024 __ j(condition, &return_left, Label::kNear);
2025 __ movp(left_reg, right_op);
2027 __ bind(&return_left);
2029 ASSERT(instr->hydrogen()->representation().IsDouble());
2030 Label check_nan_left, check_zero, return_left, return_right;
2031 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2032 XMMRegister left_reg = ToDoubleRegister(left);
2033 XMMRegister right_reg = ToDoubleRegister(right);
2034 __ ucomisd(left_reg, right_reg);
2035 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2036 __ j(equal, &check_zero, Label::kNear); // left == right.
2037 __ j(condition, &return_left, Label::kNear);
2038 __ jmp(&return_right, Label::kNear);
2040 __ bind(&check_zero);
2041 XMMRegister xmm_scratch = double_scratch0();
2042 __ xorps(xmm_scratch, xmm_scratch);
2043 __ ucomisd(left_reg, xmm_scratch);
2044 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2045 // At this point, both left and right are either 0 or -0.
2046 if (operation == HMathMinMax::kMathMin) {
2047 __ orps(left_reg, right_reg);
2049 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2050 __ addsd(left_reg, right_reg);
2052 __ jmp(&return_left, Label::kNear);
2054 __ bind(&check_nan_left);
2055 __ ucomisd(left_reg, left_reg); // NaN check.
2056 __ j(parity_even, &return_left, Label::kNear);
2057 __ bind(&return_right);
2058 __ movaps(left_reg, right_reg);
2060 __ bind(&return_left);
2065 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2066 XMMRegister left = ToDoubleRegister(instr->left());
2067 XMMRegister right = ToDoubleRegister(instr->right());
2068 XMMRegister result = ToDoubleRegister(instr->result());
2069 // All operations except MOD are computed in-place.
2070 ASSERT(instr->op() == Token::MOD || left.is(result));
2071 switch (instr->op()) {
2073 __ addsd(left, right);
2076 __ subsd(left, right);
2079 __ mulsd(left, right);
2082 __ divsd(left, right);
2083 // Don't delete this mov. It may improve performance on some CPUs,
2084 // when there is a mulsd depending on the result
2085 __ movaps(left, left);
2088 XMMRegister xmm_scratch = double_scratch0();
2089 __ PrepareCallCFunction(2);
2090 __ movaps(xmm_scratch, left);
2091 ASSERT(right.is(xmm1));
2093 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2094 __ movaps(result, xmm_scratch);
2104 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2105 ASSERT(ToRegister(instr->context()).is(rsi));
2106 ASSERT(ToRegister(instr->left()).is(rdx));
2107 ASSERT(ToRegister(instr->right()).is(rax));
2108 ASSERT(ToRegister(instr->result()).is(rax));
2110 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2111 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2115 template<class InstrType>
2116 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2117 int left_block = instr->TrueDestination(chunk_);
2118 int right_block = instr->FalseDestination(chunk_);
2120 int next_block = GetNextEmittedBlock();
2122 if (right_block == left_block || cc == no_condition) {
2123 EmitGoto(left_block);
2124 } else if (left_block == next_block) {
2125 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2126 } else if (right_block == next_block) {
2127 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2129 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2131 __ jmp(chunk_->GetAssemblyLabel(right_block));
2137 template<class InstrType>
2138 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2139 int false_block = instr->FalseDestination(chunk_);
2140 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2144 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2149 void LCodeGen::DoBranch(LBranch* instr) {
2150 Representation r = instr->hydrogen()->value()->representation();
2151 if (r.IsInteger32()) {
2152 ASSERT(!info()->IsStub());
2153 Register reg = ToRegister(instr->value());
2155 EmitBranch(instr, not_zero);
2156 } else if (r.IsSmi()) {
2157 ASSERT(!info()->IsStub());
2158 Register reg = ToRegister(instr->value());
2160 EmitBranch(instr, not_zero);
2161 } else if (r.IsDouble()) {
2162 ASSERT(!info()->IsStub());
2163 XMMRegister reg = ToDoubleRegister(instr->value());
2164 XMMRegister xmm_scratch = double_scratch0();
2165 __ xorps(xmm_scratch, xmm_scratch);
2166 __ ucomisd(reg, xmm_scratch);
2167 EmitBranch(instr, not_equal);
2168 } else if (r.IsSIMD128()) {
2169 ASSERT(!info()->IsStub());
2170 EmitBranch(instr, no_condition);
2172 ASSERT(r.IsTagged());
2173 Register reg = ToRegister(instr->value());
2174 HType type = instr->hydrogen()->value()->type();
2175 if (type.IsBoolean()) {
2176 ASSERT(!info()->IsStub());
2177 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2178 EmitBranch(instr, equal);
2179 } else if (type.IsSmi()) {
2180 ASSERT(!info()->IsStub());
2181 __ SmiCompare(reg, Smi::FromInt(0));
2182 EmitBranch(instr, not_equal);
2183 } else if (type.IsJSArray()) {
2184 ASSERT(!info()->IsStub());
2185 EmitBranch(instr, no_condition);
2186 } else if (type.IsHeapNumber()) {
2187 ASSERT(!info()->IsStub());
2188 XMMRegister xmm_scratch = double_scratch0();
2189 __ xorps(xmm_scratch, xmm_scratch);
2190 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2191 EmitBranch(instr, not_equal);
2192 } else if (type.IsString()) {
2193 ASSERT(!info()->IsStub());
2194 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2195 EmitBranch(instr, not_equal);
2197 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2198 // Avoid deopts in the case where we've never executed this path before.
2199 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2201 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2202 // undefined -> false.
2203 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2204 __ j(equal, instr->FalseLabel(chunk_));
2206 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2208 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2209 __ j(equal, instr->TrueLabel(chunk_));
2211 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2212 __ j(equal, instr->FalseLabel(chunk_));
2214 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2216 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2217 __ j(equal, instr->FalseLabel(chunk_));
2220 if (expected.Contains(ToBooleanStub::SMI)) {
2221 // Smis: 0 -> false, all other -> true.
2222 __ Cmp(reg, Smi::FromInt(0));
2223 __ j(equal, instr->FalseLabel(chunk_));
2224 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2225 } else if (expected.NeedsMap()) {
2226 // If we need a map later and have a Smi -> deopt.
2227 __ testb(reg, Immediate(kSmiTagMask));
2228 DeoptimizeIf(zero, instr->environment());
2231 const Register map = kScratchRegister;
2232 if (expected.NeedsMap()) {
2233 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2235 if (expected.CanBeUndetectable()) {
2236 // Undetectable -> false.
2237 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2238 Immediate(1 << Map::kIsUndetectable));
2239 __ j(not_zero, instr->FalseLabel(chunk_));
2243 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2244 // spec object -> true.
2245 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2246 __ j(above_equal, instr->TrueLabel(chunk_));
2249 if (expected.Contains(ToBooleanStub::STRING)) {
2250 // String value -> false iff empty.
2252 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2253 __ j(above_equal, ¬_string, Label::kNear);
2254 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2255 __ j(not_zero, instr->TrueLabel(chunk_));
2256 __ jmp(instr->FalseLabel(chunk_));
2257 __ bind(¬_string);
2260 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2261 // Symbol value -> true.
2262 __ CmpInstanceType(map, SYMBOL_TYPE);
2263 __ j(equal, instr->TrueLabel(chunk_));
2266 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2267 // heap number -> false iff +0, -0, or NaN.
2268 Label not_heap_number;
2269 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2270 __ j(not_equal, ¬_heap_number, Label::kNear);
2271 XMMRegister xmm_scratch = double_scratch0();
2272 __ xorps(xmm_scratch, xmm_scratch);
2273 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2274 __ j(zero, instr->FalseLabel(chunk_));
2275 __ jmp(instr->TrueLabel(chunk_));
2276 __ bind(¬_heap_number);
2279 if (!expected.IsGeneric()) {
2280 // We've seen something for the first time -> deopt.
2281 // This can only happen if we are not generic already.
2282 DeoptimizeIf(no_condition, instr->environment());
2289 void LCodeGen::EmitGoto(int block) {
2290 if (!IsNextEmittedBlock(block)) {
2291 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2296 void LCodeGen::DoGoto(LGoto* instr) {
2297 EmitGoto(instr->block_id());
2301 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2302 Condition cond = no_condition;
2305 case Token::EQ_STRICT:
2309 case Token::NE_STRICT:
2313 cond = is_unsigned ? below : less;
2316 cond = is_unsigned ? above : greater;
2319 cond = is_unsigned ? below_equal : less_equal;
2322 cond = is_unsigned ? above_equal : greater_equal;
2325 case Token::INSTANCEOF:
2333 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2334 LOperand* left = instr->left();
2335 LOperand* right = instr->right();
2337 instr->is_double() ||
2338 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2339 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2340 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2342 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2343 // We can statically evaluate the comparison.
2344 double left_val = ToDouble(LConstantOperand::cast(left));
2345 double right_val = ToDouble(LConstantOperand::cast(right));
2346 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2347 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2348 EmitGoto(next_block);
2350 if (instr->is_double()) {
2351 // Don't base result on EFLAGS when a NaN is involved. Instead
2352 // jump to the false block.
2353 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2354 __ j(parity_even, instr->FalseLabel(chunk_));
2357 if (right->IsConstantOperand()) {
2358 value = ToInteger32(LConstantOperand::cast(right));
2359 if (instr->hydrogen_value()->representation().IsSmi()) {
2360 __ Cmp(ToRegister(left), Smi::FromInt(value));
2362 __ cmpl(ToRegister(left), Immediate(value));
2364 } else if (left->IsConstantOperand()) {
2365 value = ToInteger32(LConstantOperand::cast(left));
2366 if (instr->hydrogen_value()->representation().IsSmi()) {
2367 if (right->IsRegister()) {
2368 __ Cmp(ToRegister(right), Smi::FromInt(value));
2370 __ Cmp(ToOperand(right), Smi::FromInt(value));
2372 } else if (right->IsRegister()) {
2373 __ cmpl(ToRegister(right), Immediate(value));
2375 __ cmpl(ToOperand(right), Immediate(value));
2377 // We commuted the operands, so commute the condition.
2378 cc = CommuteCondition(cc);
2379 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2380 if (right->IsRegister()) {
2381 __ cmpp(ToRegister(left), ToRegister(right));
2383 __ cmpp(ToRegister(left), ToOperand(right));
2386 if (right->IsRegister()) {
2387 __ cmpl(ToRegister(left), ToRegister(right));
2389 __ cmpl(ToRegister(left), ToOperand(right));
2393 EmitBranch(instr, cc);
2398 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2399 Register left = ToRegister(instr->left());
2401 if (instr->right()->IsConstantOperand()) {
2402 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2403 __ Cmp(left, right);
2405 Register right = ToRegister(instr->right());
2406 __ cmpp(left, right);
2408 EmitBranch(instr, equal);
2412 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2413 if (instr->hydrogen()->representation().IsTagged()) {
2414 Register input_reg = ToRegister(instr->object());
2415 __ Cmp(input_reg, factory()->the_hole_value());
2416 EmitBranch(instr, equal);
2420 XMMRegister input_reg = ToDoubleRegister(instr->object());
2421 __ ucomisd(input_reg, input_reg);
2422 EmitFalseBranch(instr, parity_odd);
2424 __ subp(rsp, Immediate(kDoubleSize));
2425 __ movsd(MemOperand(rsp, 0), input_reg);
2426 __ addp(rsp, Immediate(kDoubleSize));
2428 int offset = sizeof(kHoleNanUpper32);
2429 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2430 EmitBranch(instr, equal);
2434 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2435 Representation rep = instr->hydrogen()->value()->representation();
2436 ASSERT(!rep.IsInteger32());
2438 if (rep.IsDouble()) {
2439 XMMRegister value = ToDoubleRegister(instr->value());
2440 XMMRegister xmm_scratch = double_scratch0();
2441 __ xorps(xmm_scratch, xmm_scratch);
2442 __ ucomisd(xmm_scratch, value);
2443 EmitFalseBranch(instr, not_equal);
2444 __ movmskpd(kScratchRegister, value);
2445 __ testl(kScratchRegister, Immediate(1));
2446 EmitBranch(instr, not_zero);
2448 Register value = ToRegister(instr->value());
2449 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2450 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2451 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2453 EmitFalseBranch(instr, no_overflow);
2454 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2455 Immediate(0x00000000));
2456 EmitBranch(instr, equal);
2461 Condition LCodeGen::EmitIsObject(Register input,
2462 Label* is_not_object,
2464 ASSERT(!input.is(kScratchRegister));
2466 __ JumpIfSmi(input, is_not_object);
2468 __ CompareRoot(input, Heap::kNullValueRootIndex);
2469 __ j(equal, is_object);
2471 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2472 // Undetectable objects behave like undefined.
2473 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2474 Immediate(1 << Map::kIsUndetectable));
2475 __ j(not_zero, is_not_object);
2477 __ movzxbl(kScratchRegister,
2478 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2479 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2480 __ j(below, is_not_object);
2481 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2486 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2487 Register reg = ToRegister(instr->value());
2489 Condition true_cond = EmitIsObject(
2490 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2492 EmitBranch(instr, true_cond);
2496 Condition LCodeGen::EmitIsString(Register input,
2498 Label* is_not_string,
2499 SmiCheck check_needed = INLINE_SMI_CHECK) {
2500 if (check_needed == INLINE_SMI_CHECK) {
2501 __ JumpIfSmi(input, is_not_string);
2504 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2510 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2511 Register reg = ToRegister(instr->value());
2512 Register temp = ToRegister(instr->temp());
2514 SmiCheck check_needed =
2515 instr->hydrogen()->value()->type().IsHeapObject()
2516 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2518 Condition true_cond = EmitIsString(
2519 reg, temp, instr->FalseLabel(chunk_), check_needed);
2521 EmitBranch(instr, true_cond);
2525 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2527 if (instr->value()->IsRegister()) {
2528 Register input = ToRegister(instr->value());
2529 is_smi = masm()->CheckSmi(input);
2531 Operand input = ToOperand(instr->value());
2532 is_smi = masm()->CheckSmi(input);
2534 EmitBranch(instr, is_smi);
2538 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2539 Register input = ToRegister(instr->value());
2540 Register temp = ToRegister(instr->temp());
2542 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2543 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2545 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2546 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2547 Immediate(1 << Map::kIsUndetectable));
2548 EmitBranch(instr, not_zero);
2552 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2553 ASSERT(ToRegister(instr->context()).is(rsi));
2554 Token::Value op = instr->op();
2556 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2557 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2559 Condition condition = TokenToCondition(op, false);
2562 EmitBranch(instr, condition);
2566 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2567 InstanceType from = instr->from();
2568 InstanceType to = instr->to();
2569 if (from == FIRST_TYPE) return to;
2570 ASSERT(from == to || to == LAST_TYPE);
2575 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2576 InstanceType from = instr->from();
2577 InstanceType to = instr->to();
2578 if (from == to) return equal;
2579 if (to == LAST_TYPE) return above_equal;
2580 if (from == FIRST_TYPE) return below_equal;
2586 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2587 Register input = ToRegister(instr->value());
2589 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2590 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2593 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2594 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2598 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2599 Register input = ToRegister(instr->value());
2600 Register result = ToRegister(instr->result());
2602 __ AssertString(input);
2604 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2605 ASSERT(String::kHashShift >= kSmiTagSize);
2606 __ IndexFromHash(result, result);
2610 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2611 LHasCachedArrayIndexAndBranch* instr) {
2612 Register input = ToRegister(instr->value());
2614 __ testl(FieldOperand(input, String::kHashFieldOffset),
2615 Immediate(String::kContainsCachedArrayIndexMask));
2616 EmitBranch(instr, equal);
2620 // Branches to a label or falls through with the answer in the z flag.
2621 // Trashes the temp register.
2622 void LCodeGen::EmitClassOfTest(Label* is_true,
2624 Handle<String> class_name,
2628 ASSERT(!input.is(temp));
2629 ASSERT(!input.is(temp2));
2630 ASSERT(!temp.is(temp2));
2632 __ JumpIfSmi(input, is_false);
2634 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2635 // Assuming the following assertions, we can use the same compares to test
2636 // for both being a function type and being in the object type range.
2637 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2638 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2639 FIRST_SPEC_OBJECT_TYPE + 1);
2640 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2641 LAST_SPEC_OBJECT_TYPE - 1);
2642 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2643 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2644 __ j(below, is_false);
2645 __ j(equal, is_true);
2646 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2647 __ j(equal, is_true);
2649 // Faster code path to avoid two compares: subtract lower bound from the
2650 // actual type and do a signed compare with the width of the type range.
2651 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2652 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2653 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2654 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2655 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2656 __ j(above, is_false);
2659 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2660 // Check if the constructor in the map is a function.
2661 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2663 // Objects with a non-function constructor have class 'Object'.
2664 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2665 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2666 __ j(not_equal, is_true);
2668 __ j(not_equal, is_false);
2671 // temp now contains the constructor function. Grab the
2672 // instance class name from there.
2673 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2674 __ movp(temp, FieldOperand(temp,
2675 SharedFunctionInfo::kInstanceClassNameOffset));
2676 // The class name we are testing against is internalized since it's a literal.
2677 // The name in the constructor is internalized because of the way the context
2678 // is booted. This routine isn't expected to work for random API-created
2679 // classes and it doesn't have to because you can't access it with natives
2680 // syntax. Since both sides are internalized it is sufficient to use an
2681 // identity comparison.
2682 ASSERT(class_name->IsInternalizedString());
2683 __ Cmp(temp, class_name);
2684 // End with the answer in the z flag.
2688 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2689 Register input = ToRegister(instr->value());
2690 Register temp = ToRegister(instr->temp());
2691 Register temp2 = ToRegister(instr->temp2());
2692 Handle<String> class_name = instr->hydrogen()->class_name();
2694 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2695 class_name, input, temp, temp2);
2697 EmitBranch(instr, equal);
2701 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2702 Register reg = ToRegister(instr->value());
2704 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2705 EmitBranch(instr, equal);
2709 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2710 ASSERT(ToRegister(instr->context()).is(rsi));
2711 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2712 __ Push(ToRegister(instr->left()));
2713 __ Push(ToRegister(instr->right()));
2714 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2715 Label true_value, done;
2717 __ j(zero, &true_value, Label::kNear);
2718 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2719 __ jmp(&done, Label::kNear);
2720 __ bind(&true_value);
2721 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2726 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2727 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2729 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2730 LInstanceOfKnownGlobal* instr)
2731 : LDeferredCode(codegen), instr_(instr) { }
2732 virtual void Generate() V8_OVERRIDE {
2733 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2735 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2736 Label* map_check() { return &map_check_; }
2738 LInstanceOfKnownGlobal* instr_;
2742 ASSERT(ToRegister(instr->context()).is(rsi));
2743 DeferredInstanceOfKnownGlobal* deferred;
2744 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2746 Label done, false_result;
2747 Register object = ToRegister(instr->value());
2749 // A Smi is not an instance of anything.
2750 __ JumpIfSmi(object, &false_result, Label::kNear);
2752 // This is the inlined call site instanceof cache. The two occurences of the
2753 // hole value will be patched to the last map/result pair generated by the
2756 // Use a temp register to avoid memory operands with variable lengths.
2757 Register map = ToRegister(instr->temp());
2758 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2759 __ bind(deferred->map_check()); // Label for calculating code patching.
2760 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2761 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2762 __ cmpp(map, Operand(kScratchRegister, 0));
2763 __ j(not_equal, &cache_miss, Label::kNear);
2764 // Patched to load either true or false.
2765 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2767 // Check that the code size between patch label and patch sites is invariant.
2768 Label end_of_patched_code;
2769 __ bind(&end_of_patched_code);
2772 __ jmp(&done, Label::kNear);
2774 // The inlined call site cache did not match. Check for null and string
2775 // before calling the deferred code.
2776 __ bind(&cache_miss); // Null is not an instance of anything.
2777 __ CompareRoot(object, Heap::kNullValueRootIndex);
2778 __ j(equal, &false_result, Label::kNear);
2780 // String values are not instances of anything.
2781 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2783 __ bind(&false_result);
2784 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2786 __ bind(deferred->exit());
2791 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2794 PushSafepointRegistersScope scope(this);
2795 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2796 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2797 InstanceofStub stub(isolate(), flags);
2799 __ Push(ToRegister(instr->value()));
2800 __ Push(instr->function());
2802 static const int kAdditionalDelta = 10;
2804 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2806 __ PushImm32(delta);
2808 // We are pushing three values on the stack but recording a
2809 // safepoint with two arguments because stub is going to
2810 // remove the third argument from the stack before jumping
2811 // to instanceof builtin on the slow path.
2812 CallCodeGeneric(stub.GetCode(),
2813 RelocInfo::CODE_TARGET,
2815 RECORD_SAFEPOINT_WITH_REGISTERS,
2817 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2818 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2819 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2820 // Move result to a register that survives the end of the
2821 // PushSafepointRegisterScope.
2822 __ movp(kScratchRegister, rax);
2824 __ testp(kScratchRegister, kScratchRegister);
2827 __ j(not_zero, &load_false, Label::kNear);
2828 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2829 __ jmp(&done, Label::kNear);
2830 __ bind(&load_false);
2831 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2836 void LCodeGen::DoCmpT(LCmpT* instr) {
2837 ASSERT(ToRegister(instr->context()).is(rsi));
2838 Token::Value op = instr->op();
2840 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2841 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2843 Condition condition = TokenToCondition(op, false);
2844 Label true_value, done;
2846 __ j(condition, &true_value, Label::kNear);
2847 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2848 __ jmp(&done, Label::kNear);
2849 __ bind(&true_value);
2850 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2855 void LCodeGen::DoReturn(LReturn* instr) {
2856 if (FLAG_trace && info()->IsOptimizing()) {
2857 // Preserve the return value on the stack and rely on the runtime call
2858 // to return the value in the same register. We're leaving the code
2859 // managed by the register allocator and tearing down the frame, it's
2860 // safe to write to the context register.
2862 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2863 __ CallRuntime(Runtime::kTraceExit, 1);
2865 if (info()->saves_caller_doubles()) {
2866 RestoreCallerDoubles();
2868 int no_frame_start = -1;
2869 if (NeedsEagerFrame()) {
2872 no_frame_start = masm_->pc_offset();
2874 if (instr->has_constant_parameter_count()) {
2875 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2878 Register reg = ToRegister(instr->parameter_count());
2879 // The argument count parameter is a smi
2880 __ SmiToInteger32(reg, reg);
2881 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2882 __ PopReturnAddressTo(return_addr_reg);
2883 __ shlp(reg, Immediate(kPointerSizeLog2));
2885 __ jmp(return_addr_reg);
2887 if (no_frame_start != -1) {
2888 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2893 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2894 Register result = ToRegister(instr->result());
2895 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2896 if (instr->hydrogen()->RequiresHoleCheck()) {
2897 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2898 DeoptimizeIf(equal, instr->environment());
2903 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2904 ASSERT(ToRegister(instr->context()).is(rsi));
2905 ASSERT(ToRegister(instr->global_object()).is(rax));
2906 ASSERT(ToRegister(instr->result()).is(rax));
2908 __ Move(rcx, instr->name());
2909 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2910 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2911 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2915 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2916 Register value = ToRegister(instr->value());
2917 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2919 // If the cell we are storing to contains the hole it could have
2920 // been deleted from the property dictionary. In that case, we need
2921 // to update the property details in the property dictionary to mark
2922 // it as no longer deleted. We deoptimize in that case.
2923 if (instr->hydrogen()->RequiresHoleCheck()) {
2924 // We have a temp because CompareRoot might clobber kScratchRegister.
2925 Register cell = ToRegister(instr->temp());
2926 ASSERT(!value.is(cell));
2927 __ Move(cell, cell_handle, RelocInfo::CELL);
2928 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2929 DeoptimizeIf(equal, instr->environment());
2931 __ movp(Operand(cell, 0), value);
2934 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2935 __ movp(Operand(kScratchRegister, 0), value);
2937 // Cells are always rescanned, so no write barrier here.
2941 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2942 Register context = ToRegister(instr->context());
2943 Register result = ToRegister(instr->result());
2944 __ movp(result, ContextOperand(context, instr->slot_index()));
2945 if (instr->hydrogen()->RequiresHoleCheck()) {
2946 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2947 if (instr->hydrogen()->DeoptimizesOnHole()) {
2948 DeoptimizeIf(equal, instr->environment());
2951 __ j(not_equal, &is_not_hole, Label::kNear);
2952 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2953 __ bind(&is_not_hole);
2959 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2960 Register context = ToRegister(instr->context());
2961 Register value = ToRegister(instr->value());
2963 Operand target = ContextOperand(context, instr->slot_index());
2965 Label skip_assignment;
2966 if (instr->hydrogen()->RequiresHoleCheck()) {
2967 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2968 if (instr->hydrogen()->DeoptimizesOnHole()) {
2969 DeoptimizeIf(equal, instr->environment());
2971 __ j(not_equal, &skip_assignment);
2974 __ movp(target, value);
2976 if (instr->hydrogen()->NeedsWriteBarrier()) {
2977 SmiCheck check_needed =
2978 instr->hydrogen()->value()->type().IsHeapObject()
2979 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2980 int offset = Context::SlotOffset(instr->slot_index());
2981 Register scratch = ToRegister(instr->temp());
2982 __ RecordWriteContextSlot(context,
2987 EMIT_REMEMBERED_SET,
2991 __ bind(&skip_assignment);
2995 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2996 HObjectAccess access = instr->hydrogen()->access();
2997 int offset = access.offset();
2999 if (access.IsExternalMemory()) {
3000 Register result = ToRegister(instr->result());
3001 if (instr->object()->IsConstantOperand()) {
3002 ASSERT(result.is(rax));
3003 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
3005 Register object = ToRegister(instr->object());
3006 __ Load(result, MemOperand(object, offset), access.representation());
3011 Register object = ToRegister(instr->object());
3012 if (instr->hydrogen()->representation().IsDouble()) {
3013 XMMRegister result = ToDoubleRegister(instr->result());
3014 __ movsd(result, FieldOperand(object, offset));
3018 Register result = ToRegister(instr->result());
3019 if (!access.IsInobject()) {
3020 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
3024 Representation representation = access.representation();
3025 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3026 instr->hydrogen()->representation().IsInteger32()) {
3027 if (FLAG_debug_code) {
3028 Register scratch = kScratchRegister;
3029 __ Load(scratch, FieldOperand(object, offset), representation);
3030 __ AssertSmi(scratch);
3033 // Read int value directly from upper half of the smi.
3034 STATIC_ASSERT(kSmiTag == 0);
3035 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3036 offset += kPointerSize / 2;
3037 representation = Representation::Integer32();
3039 __ Load(result, FieldOperand(object, offset), representation);
3043 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3044 ASSERT(ToRegister(instr->context()).is(rsi));
3045 ASSERT(ToRegister(instr->object()).is(rax));
3046 ASSERT(ToRegister(instr->result()).is(rax));
3048 __ Move(rcx, instr->name());
3049 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3050 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3054 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3055 Register function = ToRegister(instr->function());
3056 Register result = ToRegister(instr->result());
3058 // Check that the function really is a function.
3059 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3060 DeoptimizeIf(not_equal, instr->environment());
3062 // Check whether the function has an instance prototype.
3064 __ testb(FieldOperand(result, Map::kBitFieldOffset),
3065 Immediate(1 << Map::kHasNonInstancePrototype));
3066 __ j(not_zero, &non_instance, Label::kNear);
3068 // Get the prototype or initial map from the function.
3070 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3072 // Check that the function has a prototype or an initial map.
3073 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3074 DeoptimizeIf(equal, instr->environment());
3076 // If the function does not have an initial map, we're done.
3078 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3079 __ j(not_equal, &done, Label::kNear);
3081 // Get the prototype from the initial map.
3082 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3083 __ jmp(&done, Label::kNear);
3085 // Non-instance prototype: Fetch prototype from constructor field
3086 // in the function's map.
3087 __ bind(&non_instance);
3088 __ movp(result, FieldOperand(result, Map::kConstructorOffset));
3095 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3096 Register result = ToRegister(instr->result());
3097 __ LoadRoot(result, instr->index());
3101 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3102 Register arguments = ToRegister(instr->arguments());
3103 Register result = ToRegister(instr->result());
3105 if (instr->length()->IsConstantOperand() &&
3106 instr->index()->IsConstantOperand()) {
3107 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3108 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3109 if (const_index >= 0 && const_index < const_length) {
3110 StackArgumentsAccessor args(arguments, const_length,
3111 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3112 __ movp(result, args.GetArgumentOperand(const_index));
3113 } else if (FLAG_debug_code) {
3117 Register length = ToRegister(instr->length());
3118 // There are two words between the frame pointer and the last argument.
3119 // Subtracting from length accounts for one of them add one more.
3120 if (instr->index()->IsRegister()) {
3121 __ subl(length, ToRegister(instr->index()));
3123 __ subl(length, ToOperand(instr->index()));
3125 StackArgumentsAccessor args(arguments, length,
3126 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3127 __ movp(result, args.GetArgumentOperand(0));
3132 bool LCodeGen::HandleExternalArrayOpRequiresPreScale(
3134 Representation key_representation,
3135 ElementsKind elements_kind) {
3136 Register key_reg = ToRegister(key);
3137 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3138 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
3139 static_cast<int>(maximal_scale_factor);
3140 ASSERT(pre_shift_size > 0);
3141 __ shll(key_reg, Immediate(pre_shift_size));
3148 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3149 ElementsKind elements_kind = instr->elements_kind();
3150 LOperand* key = instr->key();
3151 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3152 Register key_reg = ToRegister(key);
3153 Representation key_representation =
3154 instr->hydrogen()->key()->representation();
3155 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3156 if (!HandleExternalArrayOpRequiresPreScale(
3157 key, key_representation, elements_kind))
3158 __ SmiToInteger64(key_reg, key_reg);
3159 } else if (instr->hydrogen()->IsDehoisted()) {
3160 // Sign extend key because it could be a 32 bit negative value
3161 // and the dehoisted address computation happens in 64 bits
3162 __ movsxlq(key_reg, key_reg);
3164 } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
3165 Representation key_representation =
3166 instr->hydrogen()->key()->representation();
3167 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
3168 HandleExternalArrayOpRequiresPreScale(
3169 key, key_representation, elements_kind);
3172 Operand operand(BuildFastArrayOperand(
3175 instr->hydrogen()->key()->representation(),
3177 instr->base_offset()));
3179 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3180 elements_kind == FLOAT32_ELEMENTS) {
3181 XMMRegister result(ToDoubleRegister(instr->result()));
3182 __ movss(result, operand);
3183 __ cvtss2sd(result, result);
3184 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3185 elements_kind == FLOAT64_ELEMENTS) {
3186 __ movsd(ToDoubleRegister(instr->result()), operand);
3187 } else if (IsSIMD128ElementsKind(elements_kind)) {
3188 __ movups(ToSIMD128Register(instr->result()), operand);
3190 Register result(ToRegister(instr->result()));
3191 switch (elements_kind) {
3192 case EXTERNAL_INT8_ELEMENTS:
3194 __ movsxbl(result, operand);
3196 case EXTERNAL_UINT8_ELEMENTS:
3197 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3198 case UINT8_ELEMENTS:
3199 case UINT8_CLAMPED_ELEMENTS:
3200 __ movzxbl(result, operand);
3202 case EXTERNAL_INT16_ELEMENTS:
3203 case INT16_ELEMENTS:
3204 __ movsxwl(result, operand);
3206 case EXTERNAL_UINT16_ELEMENTS:
3207 case UINT16_ELEMENTS:
3208 __ movzxwl(result, operand);
3210 case EXTERNAL_INT32_ELEMENTS:
3211 case INT32_ELEMENTS:
3212 __ movl(result, operand);
3214 case EXTERNAL_UINT32_ELEMENTS:
3215 case UINT32_ELEMENTS:
3216 __ movl(result, operand);
3217 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3218 __ testl(result, result);
3219 DeoptimizeIf(negative, instr->environment());
3222 case EXTERNAL_FLOAT32_ELEMENTS:
3223 case EXTERNAL_FLOAT64_ELEMENTS:
3224 case EXTERNAL_FLOAT32x4_ELEMENTS:
3225 case EXTERNAL_FLOAT64x2_ELEMENTS:
3226 case EXTERNAL_INT32x4_ELEMENTS:
3227 case FLOAT32_ELEMENTS:
3228 case FLOAT64_ELEMENTS:
3229 case FLOAT32x4_ELEMENTS:
3230 case FLOAT64x2_ELEMENTS:
3231 case INT32x4_ELEMENTS:
3233 case FAST_SMI_ELEMENTS:
3234 case FAST_DOUBLE_ELEMENTS:
3235 case FAST_HOLEY_ELEMENTS:
3236 case FAST_HOLEY_SMI_ELEMENTS:
3237 case FAST_HOLEY_DOUBLE_ELEMENTS:
3238 case DICTIONARY_ELEMENTS:
3239 case SLOPPY_ARGUMENTS_ELEMENTS:
3247 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3248 XMMRegister result(ToDoubleRegister(instr->result()));
3249 LOperand* key = instr->key();
3250 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3251 instr->hydrogen()->IsDehoisted()) {
3252 // Sign extend key because it could be a 32 bit negative value
3253 // and the dehoisted address computation happens in 64 bits
3254 __ movsxlq(ToRegister(key), ToRegister(key));
3256 if (instr->hydrogen()->RequiresHoleCheck()) {
3257 Operand hole_check_operand = BuildFastArrayOperand(
3260 instr->hydrogen()->key()->representation(),
3261 FAST_DOUBLE_ELEMENTS,
3262 instr->base_offset() + sizeof(kHoleNanLower32));
3263 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3264 DeoptimizeIf(equal, instr->environment());
3267 Operand double_load_operand = BuildFastArrayOperand(
3270 instr->hydrogen()->key()->representation(),
3271 FAST_DOUBLE_ELEMENTS,
3272 instr->base_offset());
3273 __ movsd(result, double_load_operand);
3277 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3278 HLoadKeyed* hinstr = instr->hydrogen();
3279 Register result = ToRegister(instr->result());
3280 LOperand* key = instr->key();
3281 bool requires_hole_check = hinstr->RequiresHoleCheck();
3282 Representation representation = hinstr->representation();
3283 int offset = instr->base_offset();
3285 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3286 instr->hydrogen()->IsDehoisted()) {
3287 // Sign extend key because it could be a 32 bit negative value
3288 // and the dehoisted address computation happens in 64 bits
3289 __ movsxlq(ToRegister(key), ToRegister(key));
3291 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3292 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3293 ASSERT(!requires_hole_check);
3294 if (FLAG_debug_code) {
3295 Register scratch = kScratchRegister;
3297 BuildFastArrayOperand(instr->elements(),
3299 instr->hydrogen()->key()->representation(),
3302 Representation::Smi());
3303 __ AssertSmi(scratch);
3305 // Read int value directly from upper half of the smi.
3306 STATIC_ASSERT(kSmiTag == 0);
3307 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3308 offset += kPointerSize / 2;
3312 BuildFastArrayOperand(instr->elements(),
3314 instr->hydrogen()->key()->representation(),
3319 // Check for the hole value.
3320 if (requires_hole_check) {
3321 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3322 Condition smi = __ CheckSmi(result);
3323 DeoptimizeIf(NegateCondition(smi), instr->environment());
3325 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3326 DeoptimizeIf(equal, instr->environment());
3332 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3333 if (instr->is_typed_elements()) {
3334 DoLoadKeyedExternalArray(instr);
3335 } else if (instr->hydrogen()->representation().IsDouble()) {
3336 DoLoadKeyedFixedDoubleArray(instr);
3338 DoLoadKeyedFixedArray(instr);
3343 Operand LCodeGen::BuildFastArrayOperand(
3344 LOperand* elements_pointer,
3346 Representation key_representation,
3347 ElementsKind elements_kind,
3349 Register elements_pointer_reg = ToRegister(elements_pointer);
3350 int shift_size = ElementsKindToShiftSize(elements_kind);
3351 if (key->IsConstantOperand()) {
3352 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3353 if (constant_value & 0xF0000000) {
3354 Abort(kArrayIndexConstantValueTooBig);
3357 return Operand(elements_pointer_reg,
3358 (constant_value << shift_size) + offset);
3360 // Take the tag bit into account while computing the shift size.
3361 if (key_representation.IsSmi() && (shift_size >= 1)) {
3362 ASSERT(SmiValuesAre31Bits());
3363 shift_size -= kSmiTagSize;
3365 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3366 // Make sure the key is pre-scaled against maximal_scale_factor.
3367 shift_size = static_cast<int>(maximal_scale_factor);
3369 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3370 return Operand(elements_pointer_reg,
3378 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3379 ASSERT(ToRegister(instr->context()).is(rsi));
3380 ASSERT(ToRegister(instr->object()).is(rdx));
3381 ASSERT(ToRegister(instr->key()).is(rax));
3383 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3384 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3388 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3389 Register result = ToRegister(instr->result());
3391 if (instr->hydrogen()->from_inlined()) {
3392 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3394 // Check for arguments adapter frame.
3395 Label done, adapted;
3396 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3397 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3398 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3399 __ j(equal, &adapted, Label::kNear);
3401 // No arguments adaptor frame.
3402 __ movp(result, rbp);
3403 __ jmp(&done, Label::kNear);
3405 // Arguments adaptor frame present.
3407 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3409 // Result is the frame pointer for the frame if not adapted and for the real
3410 // frame below the adaptor frame if adapted.
3416 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3417 Register result = ToRegister(instr->result());
3421 // If no arguments adaptor frame the number of arguments is fixed.
3422 if (instr->elements()->IsRegister()) {
3423 __ cmpp(rbp, ToRegister(instr->elements()));
3425 __ cmpp(rbp, ToOperand(instr->elements()));
3427 __ movl(result, Immediate(scope()->num_parameters()));
3428 __ j(equal, &done, Label::kNear);
3430 // Arguments adaptor frame present. Get argument length from there.
3431 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3432 __ SmiToInteger32(result,
3434 ArgumentsAdaptorFrameConstants::kLengthOffset));
3436 // Argument length is in result register.
3441 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3442 Register receiver = ToRegister(instr->receiver());
3443 Register function = ToRegister(instr->function());
3445 // If the receiver is null or undefined, we have to pass the global
3446 // object as a receiver to normal functions. Values have to be
3447 // passed unchanged to builtins and strict-mode functions.
3448 Label global_object, receiver_ok;
3449 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3451 if (!instr->hydrogen()->known_function()) {
3452 // Do not transform the receiver to object for strict mode
3454 __ movp(kScratchRegister,
3455 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3456 __ testb(FieldOperand(kScratchRegister,
3457 SharedFunctionInfo::kStrictModeByteOffset),
3458 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3459 __ j(not_equal, &receiver_ok, dist);
3461 // Do not transform the receiver to object for builtins.
3462 __ testb(FieldOperand(kScratchRegister,
3463 SharedFunctionInfo::kNativeByteOffset),
3464 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3465 __ j(not_equal, &receiver_ok, dist);
3468 // Normal function. Replace undefined or null with global receiver.
3469 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3470 __ j(equal, &global_object, Label::kNear);
3471 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3472 __ j(equal, &global_object, Label::kNear);
3474 // The receiver should be a JS object.
3475 Condition is_smi = __ CheckSmi(receiver);
3476 DeoptimizeIf(is_smi, instr->environment());
3477 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3478 DeoptimizeIf(below, instr->environment());
3480 __ jmp(&receiver_ok, Label::kNear);
3481 __ bind(&global_object);
3482 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3485 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3487 FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
3489 __ bind(&receiver_ok);
3493 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3494 Register receiver = ToRegister(instr->receiver());
3495 Register function = ToRegister(instr->function());
3496 Register length = ToRegister(instr->length());
3497 Register elements = ToRegister(instr->elements());
3498 ASSERT(receiver.is(rax)); // Used for parameter count.
3499 ASSERT(function.is(rdi)); // Required by InvokeFunction.
3500 ASSERT(ToRegister(instr->result()).is(rax));
3502 // Copy the arguments to this function possibly from the
3503 // adaptor frame below it.
3504 const uint32_t kArgumentsLimit = 1 * KB;
3505 __ cmpp(length, Immediate(kArgumentsLimit));
3506 DeoptimizeIf(above, instr->environment());
3509 __ movp(receiver, length);
3511 // Loop through the arguments pushing them onto the execution
3514 // length is a small non-negative integer, due to the test above.
3515 __ testl(length, length);
3516 __ j(zero, &invoke, Label::kNear);
3518 StackArgumentsAccessor args(elements, length,
3519 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3520 __ Push(args.GetArgumentOperand(0));
3522 __ j(not_zero, &loop);
3524 // Invoke the function.
3526 ASSERT(instr->HasPointerMap());
3527 LPointerMap* pointers = instr->pointer_map();
3528 SafepointGenerator safepoint_generator(
3529 this, pointers, Safepoint::kLazyDeopt);
3530 ParameterCount actual(rax);
3531 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3535 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3536 LOperand* argument = instr->value();
3537 EmitPushTaggedOperand(argument);
3541 void LCodeGen::DoDrop(LDrop* instr) {
3542 __ Drop(instr->count());
3546 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3547 Register result = ToRegister(instr->result());
3548 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3552 void LCodeGen::DoContext(LContext* instr) {
3553 Register result = ToRegister(instr->result());
3554 if (info()->IsOptimizing()) {
3555 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3557 // If there is no frame, the context must be in rsi.
3558 ASSERT(result.is(rsi));
3563 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3564 ASSERT(ToRegister(instr->context()).is(rsi));
3565 __ Push(rsi); // The context is the first argument.
3566 __ Push(instr->hydrogen()->pairs());
3567 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3568 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3572 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3573 int formal_parameter_count,
3575 LInstruction* instr,
3576 RDIState rdi_state) {
3577 bool dont_adapt_arguments =
3578 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3579 bool can_invoke_directly =
3580 dont_adapt_arguments || formal_parameter_count == arity;
3582 LPointerMap* pointers = instr->pointer_map();
3584 if (can_invoke_directly) {
3585 if (rdi_state == RDI_UNINITIALIZED) {
3586 __ Move(rdi, function);
3590 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3592 // Set rax to arguments count if adaption is not needed. Assumes that rax
3593 // is available to write to at this point.
3594 if (dont_adapt_arguments) {
3599 if (function.is_identical_to(info()->closure())) {
3602 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3605 // Set up deoptimization.
3606 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3608 // We need to adapt arguments.
3609 SafepointGenerator generator(
3610 this, pointers, Safepoint::kLazyDeopt);
3611 ParameterCount count(arity);
3612 ParameterCount expected(formal_parameter_count);
3613 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3618 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3619 ASSERT(ToRegister(instr->result()).is(rax));
3621 LPointerMap* pointers = instr->pointer_map();
3622 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3624 if (instr->target()->IsConstantOperand()) {
3625 LConstantOperand* target = LConstantOperand::cast(instr->target());
3626 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3627 generator.BeforeCall(__ CallSize(code));
3628 __ call(code, RelocInfo::CODE_TARGET);
3630 ASSERT(instr->target()->IsRegister());
3631 Register target = ToRegister(instr->target());
3632 generator.BeforeCall(__ CallSize(target));
3633 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3636 generator.AfterCall();
3640 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3641 ASSERT(ToRegister(instr->function()).is(rdi));
3642 ASSERT(ToRegister(instr->result()).is(rax));
3644 if (instr->hydrogen()->pass_argument_count()) {
3645 __ Set(rax, instr->arity());
3649 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3651 LPointerMap* pointers = instr->pointer_map();
3652 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3654 bool is_self_call = false;
3655 if (instr->hydrogen()->function()->IsConstant()) {
3656 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3657 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3658 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3659 is_self_call = jsfun.is_identical_to(info()->closure());
3665 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3666 generator.BeforeCall(__ CallSize(target));
3669 generator.AfterCall();
3673 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3674 Register input_reg = ToRegister(instr->value());
3675 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3676 Heap::kHeapNumberMapRootIndex);
3677 DeoptimizeIf(not_equal, instr->environment());
3679 Label slow, allocated, done;
3680 Register tmp = input_reg.is(rax) ? rcx : rax;
3681 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3683 // Preserve the value of all registers.
3684 PushSafepointRegistersScope scope(this);
3686 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3687 // Check the sign of the argument. If the argument is positive, just
3688 // return it. We do not need to patch the stack since |input| and
3689 // |result| are the same register and |input| will be restored
3690 // unchanged by popping safepoint registers.
3691 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3694 __ AllocateHeapNumber(tmp, tmp2, &slow);
3695 __ jmp(&allocated, Label::kNear);
3697 // Slow case: Call the runtime system to do the number allocation.
3699 CallRuntimeFromDeferred(
3700 Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3701 // Set the pointer to the new heap number in tmp.
3702 if (!tmp.is(rax)) __ movp(tmp, rax);
3703 // Restore input_reg after call to runtime.
3704 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3706 __ bind(&allocated);
3707 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3708 __ shlq(tmp2, Immediate(1));
3709 __ shrq(tmp2, Immediate(1));
3710 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3711 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3717 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3718 Register input_reg = ToRegister(instr->value());
3719 __ testl(input_reg, input_reg);
3721 __ j(not_sign, &is_positive, Label::kNear);
3722 __ negl(input_reg); // Sets flags.
3723 DeoptimizeIf(negative, instr->environment());
3724 __ bind(&is_positive);
3728 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3729 Register input_reg = ToRegister(instr->value());
3730 __ testp(input_reg, input_reg);
3732 __ j(not_sign, &is_positive, Label::kNear);
3733 __ negp(input_reg); // Sets flags.
3734 DeoptimizeIf(negative, instr->environment());
3735 __ bind(&is_positive);
3739 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3740 // Class for deferred case.
3741 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3743 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3744 : LDeferredCode(codegen), instr_(instr) { }
3745 virtual void Generate() V8_OVERRIDE {
3746 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3748 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3753 ASSERT(instr->value()->Equals(instr->result()));
3754 Representation r = instr->hydrogen()->value()->representation();
3757 XMMRegister scratch = double_scratch0();
3758 XMMRegister input_reg = ToDoubleRegister(instr->value());
3759 __ xorps(scratch, scratch);
3760 __ subsd(scratch, input_reg);
3761 __ andps(input_reg, scratch);
3762 } else if (r.IsInteger32()) {
3763 EmitIntegerMathAbs(instr);
3764 } else if (r.IsSmi()) {
3765 EmitSmiMathAbs(instr);
3766 } else { // Tagged case.
3767 DeferredMathAbsTaggedHeapNumber* deferred =
3768 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3769 Register input_reg = ToRegister(instr->value());
3771 __ JumpIfNotSmi(input_reg, deferred->entry());
3772 EmitSmiMathAbs(instr);
3773 __ bind(deferred->exit());
3778 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3779 XMMRegister xmm_scratch = double_scratch0();
3780 Register output_reg = ToRegister(instr->result());
3781 XMMRegister input_reg = ToDoubleRegister(instr->value());
3783 if (CpuFeatures::IsSupported(SSE4_1)) {
3784 CpuFeatureScope scope(masm(), SSE4_1);
3785 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3786 // Deoptimize if minus zero.
3787 __ movq(output_reg, input_reg);
3788 __ subq(output_reg, Immediate(1));
3789 DeoptimizeIf(overflow, instr->environment());
3791 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3792 __ cvttsd2si(output_reg, xmm_scratch);
3793 __ cmpl(output_reg, Immediate(0x1));
3794 DeoptimizeIf(overflow, instr->environment());
3796 Label negative_sign, done;
3797 // Deoptimize on unordered.
3798 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3799 __ ucomisd(input_reg, xmm_scratch);
3800 DeoptimizeIf(parity_even, instr->environment());
3801 __ j(below, &negative_sign, Label::kNear);
3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3804 // Check for negative zero.
3805 Label positive_sign;
3806 __ j(above, &positive_sign, Label::kNear);
3807 __ movmskpd(output_reg, input_reg);
3808 __ testq(output_reg, Immediate(1));
3809 DeoptimizeIf(not_zero, instr->environment());
3810 __ Set(output_reg, 0);
3812 __ bind(&positive_sign);
3815 // Use truncating instruction (OK because input is positive).
3816 __ cvttsd2si(output_reg, input_reg);
3817 // Overflow is signalled with minint.
3818 __ cmpl(output_reg, Immediate(0x1));
3819 DeoptimizeIf(overflow, instr->environment());
3820 __ jmp(&done, Label::kNear);
3822 // Non-zero negative reaches here.
3823 __ bind(&negative_sign);
3824 // Truncate, then compare and compensate.
3825 __ cvttsd2si(output_reg, input_reg);
3826 __ Cvtlsi2sd(xmm_scratch, output_reg);
3827 __ ucomisd(input_reg, xmm_scratch);
3828 __ j(equal, &done, Label::kNear);
3829 __ subl(output_reg, Immediate(1));
3830 DeoptimizeIf(overflow, instr->environment());
3837 void LCodeGen::DoMathRound(LMathRound* instr) {
3838 const XMMRegister xmm_scratch = double_scratch0();
3839 Register output_reg = ToRegister(instr->result());
3840 XMMRegister input_reg = ToDoubleRegister(instr->value());
3841 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3842 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3843 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3845 Label done, round_to_zero, below_one_half;
3846 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3847 __ movq(kScratchRegister, one_half);
3848 __ movq(xmm_scratch, kScratchRegister);
3849 __ ucomisd(xmm_scratch, input_reg);
3850 __ j(above, &below_one_half, Label::kNear);
3852 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3853 __ addsd(xmm_scratch, input_reg);
3854 __ cvttsd2si(output_reg, xmm_scratch);
3855 // Overflow is signalled with minint.
3856 __ cmpl(output_reg, Immediate(0x1));
3857 __ RecordComment("D2I conversion overflow");
3858 DeoptimizeIf(overflow, instr->environment());
3859 __ jmp(&done, dist);
3861 __ bind(&below_one_half);
3862 __ movq(kScratchRegister, minus_one_half);
3863 __ movq(xmm_scratch, kScratchRegister);
3864 __ ucomisd(xmm_scratch, input_reg);
3865 __ j(below_equal, &round_to_zero, Label::kNear);
3867 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3868 // compare and compensate.
3869 __ movq(input_temp, input_reg); // Do not alter input_reg.
3870 __ subsd(input_temp, xmm_scratch);
3871 __ cvttsd2si(output_reg, input_temp);
3872 // Catch minint due to overflow, and to prevent overflow when compensating.
3873 __ cmpl(output_reg, Immediate(0x1));
3874 __ RecordComment("D2I conversion overflow");
3875 DeoptimizeIf(overflow, instr->environment());
3877 __ Cvtlsi2sd(xmm_scratch, output_reg);
3878 __ ucomisd(xmm_scratch, input_temp);
3879 __ j(equal, &done, dist);
3880 __ subl(output_reg, Immediate(1));
3881 // No overflow because we already ruled out minint.
3882 __ jmp(&done, dist);
3884 __ bind(&round_to_zero);
3885 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3886 // we can ignore the difference between a result of -0 and +0.
3887 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3888 __ movq(output_reg, input_reg);
3889 __ testq(output_reg, output_reg);
3890 __ RecordComment("Minus zero");
3891 DeoptimizeIf(negative, instr->environment());
3893 __ Set(output_reg, 0);
3898 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3899 XMMRegister output = ToDoubleRegister(instr->result());
3900 if (instr->value()->IsDoubleRegister()) {
3901 XMMRegister input = ToDoubleRegister(instr->value());
3902 __ sqrtsd(output, input);
3904 Operand input = ToOperand(instr->value());
3905 __ sqrtsd(output, input);
3910 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3911 XMMRegister xmm_scratch = double_scratch0();
3912 XMMRegister input_reg = ToDoubleRegister(instr->value());
3913 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3915 // Note that according to ECMA-262 15.8.2.13:
3916 // Math.pow(-Infinity, 0.5) == Infinity
3917 // Math.sqrt(-Infinity) == NaN
3919 // Check base for -Infinity. According to IEEE-754, double-precision
3920 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3921 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3922 __ movq(xmm_scratch, kScratchRegister);
3923 __ ucomisd(xmm_scratch, input_reg);
3924 // Comparing -Infinity with NaN results in "unordered", which sets the
3925 // zero flag as if both were equal. However, it also sets the carry flag.
3926 __ j(not_equal, &sqrt, Label::kNear);
3927 __ j(carry, &sqrt, Label::kNear);
3928 // If input is -Infinity, return Infinity.
3929 __ xorps(input_reg, input_reg);
3930 __ subsd(input_reg, xmm_scratch);
3931 __ jmp(&done, Label::kNear);
3935 __ xorps(xmm_scratch, xmm_scratch);
3936 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3937 __ sqrtsd(input_reg, input_reg);
3942 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
3943 switch (instr->op()) {
3944 case kFloat32x4Zero: {
3945 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3946 __ xorps(result_reg, result_reg);
3949 case kFloat64x2Zero: {
3950 XMMRegister result_reg = ToFloat64x2Register(instr->result());
3951 __ xorpd(result_reg, result_reg);
3954 case kInt32x4Zero: {
3955 XMMRegister result_reg = ToInt32x4Register(instr->result());
3956 __ xorps(result_reg, result_reg);
3966 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
3968 switch (instr->op()) {
3969 case kSIMD128Change: {
3970 Comment(";;; deoptimize: can not perform representation change"
3971 "for float32x4 or int32x4");
3972 DeoptimizeIf(no_condition, instr->environment());
3977 case kFloat32x4Reciprocal:
3978 case kFloat32x4ReciprocalSqrt:
3979 case kFloat32x4Sqrt: {
3980 ASSERT(instr->value()->Equals(instr->result()));
3981 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3982 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3983 switch (instr->op()) {
3985 __ absps(input_reg);
3988 __ negateps(input_reg);
3990 case kFloat32x4Reciprocal:
3991 __ rcpps(input_reg, input_reg);
3993 case kFloat32x4ReciprocalSqrt:
3994 __ rsqrtps(input_reg, input_reg);
3996 case kFloat32x4Sqrt:
3997 __ sqrtps(input_reg, input_reg);
4007 case kFloat64x2Sqrt: {
4008 ASSERT(instr->value()->Equals(instr->result()));
4009 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4010 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4011 switch (instr->op()) {
4013 __ abspd(input_reg);
4016 __ negatepd(input_reg);
4018 case kFloat64x2Sqrt:
4019 __ sqrtpd(input_reg, input_reg);
4029 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4030 XMMRegister input_reg = ToInt32x4Register(instr->value());
4031 switch (instr->op()) {
4033 __ notps(input_reg);
4036 __ pnegd(input_reg);
4044 case kFloat32x4BitsToInt32x4:
4045 case kFloat32x4ToInt32x4: {
4046 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
4047 XMMRegister input_reg = ToFloat32x4Register(instr->value());
4048 XMMRegister result_reg = ToInt32x4Register(instr->result());
4049 if (instr->op() == kFloat32x4BitsToInt32x4) {
4050 if (!result_reg.is(input_reg)) {
4051 __ movaps(result_reg, input_reg);
4054 ASSERT(instr->op() == kFloat32x4ToInt32x4);
4055 __ cvtps2dq(result_reg, input_reg);
4059 case kInt32x4BitsToFloat32x4:
4060 case kInt32x4ToFloat32x4: {
4061 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4062 XMMRegister input_reg = ToInt32x4Register(instr->value());
4063 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4064 if (instr->op() == kInt32x4BitsToFloat32x4) {
4065 if (!result_reg.is(input_reg)) {
4066 __ movaps(result_reg, input_reg);
4069 ASSERT(instr->op() == kInt32x4ToFloat32x4);
4070 __ cvtdq2ps(result_reg, input_reg);
4074 case kFloat32x4Splat: {
4075 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
4076 XMMRegister input_reg = ToDoubleRegister(instr->value());
4077 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4078 XMMRegister xmm_scratch = xmm0;
4079 __ xorps(xmm_scratch, xmm_scratch);
4080 __ cvtsd2ss(xmm_scratch, input_reg);
4081 __ shufps(xmm_scratch, xmm_scratch, 0x0);
4082 __ movaps(result_reg, xmm_scratch);
4085 case kInt32x4Splat: {
4086 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4087 Register input_reg = ToRegister(instr->value());
4088 XMMRegister result_reg = ToInt32x4Register(instr->result());
4089 __ movd(result_reg, input_reg);
4090 __ shufps(result_reg, result_reg, 0x0);
4093 case kInt32x4GetSignMask: {
4094 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4095 XMMRegister input_reg = ToInt32x4Register(instr->value());
4096 Register result = ToRegister(instr->result());
4097 __ movmskps(result, input_reg);
4100 case kFloat32x4GetSignMask: {
4101 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
4102 XMMRegister input_reg = ToFloat32x4Register(instr->value());
4103 Register result = ToRegister(instr->result());
4104 __ movmskps(result, input_reg);
4107 case kFloat32x4GetW:
4109 case kFloat32x4GetZ:
4111 case kFloat32x4GetY:
4113 case kFloat32x4GetX: {
4114 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
4115 XMMRegister input_reg = ToFloat32x4Register(instr->value());
4116 XMMRegister result = ToDoubleRegister(instr->result());
4117 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
4119 if (select == 0x0) {
4120 __ xorps(xmm_scratch, xmm_scratch);
4121 __ cvtss2sd(xmm_scratch, input_reg);
4122 if (!xmm_scratch.is(result)) {
4123 __ movaps(result, xmm_scratch);
4126 __ pshufd(xmm_scratch, input_reg, select);
4127 if (!xmm_scratch.is(result)) {
4128 __ xorps(result, result);
4130 __ cvtss2sd(result, xmm_scratch);
4134 case kFloat64x2GetSignMask: {
4135 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4136 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4137 Register result = ToRegister(instr->result());
4138 __ movmskpd(result, input_reg);
4141 case kFloat64x2GetX: {
4142 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4143 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4144 XMMRegister result = ToDoubleRegister(instr->result());
4146 if (!input_reg.is(result)) {
4147 __ movaps(result, input_reg);
4151 case kFloat64x2GetY: {
4152 ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
4153 XMMRegister input_reg = ToFloat64x2Register(instr->value());
4154 XMMRegister result = ToDoubleRegister(instr->result());
4156 if (!input_reg.is(result)) {
4157 __ movaps(result, input_reg);
4159 __ shufpd(result, input_reg, 0x1);
4166 case kInt32x4GetFlagX:
4167 case kInt32x4GetFlagY:
4168 case kInt32x4GetFlagZ:
4169 case kInt32x4GetFlagW: {
4170 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
4172 switch (instr->op()) {
4173 case kInt32x4GetFlagX:
4177 case kInt32x4GetFlagY:
4182 case kInt32x4GetFlagZ:
4187 case kInt32x4GetFlagW:
4196 XMMRegister input_reg = ToInt32x4Register(instr->value());
4197 Register result = ToRegister(instr->result());
4198 if (select == 0x0) {
4199 __ movd(result, input_reg);
4201 if (CpuFeatures::IsSupported(SSE4_1)) {
4202 CpuFeatureScope scope(masm(), SSE4_1);
4203 __ extractps(result, input_reg, select);
4205 XMMRegister xmm_scratch = xmm0;
4206 __ pshufd(xmm_scratch, input_reg, select);
4207 __ movd(result, xmm_scratch);
4212 Label false_value, done;
4213 __ testl(result, result);
4214 __ j(zero, &false_value, Label::kNear);
4215 __ LoadRoot(result, Heap::kTrueValueRootIndex);
4216 __ jmp(&done, Label::kNear);
4217 __ bind(&false_value);
4218 __ LoadRoot(result, Heap::kFalseValueRootIndex);
4230 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
4231 uint8_t imm8 = 0; // for with operation
4232 switch (instr->op()) {
4238 case kFloat32x4Max: {
4239 ASSERT(instr->left()->Equals(instr->result()));
4240 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4241 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4242 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4243 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4244 switch (instr->op()) {
4246 __ addps(left_reg, right_reg);
4249 __ subps(left_reg, right_reg);
4252 __ mulps(left_reg, right_reg);
4255 __ divps(left_reg, right_reg);
4258 __ minps(left_reg, right_reg);
4261 __ maxps(left_reg, right_reg);
4269 case kFloat32x4Scale: {
4270 ASSERT(instr->left()->Equals(instr->result()));
4271 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4272 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4273 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4274 XMMRegister right_reg = ToDoubleRegister(instr->right());
4275 XMMRegister scratch_reg = xmm0;
4276 __ xorps(scratch_reg, scratch_reg);
4277 __ cvtsd2ss(scratch_reg, right_reg);
4278 __ shufps(scratch_reg, scratch_reg, 0x0);
4279 __ mulps(left_reg, scratch_reg);
4287 case kFloat64x2Max: {
4288 ASSERT(instr->left()->Equals(instr->result()));
4289 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4290 ASSERT(instr->hydrogen()->right()->representation().IsFloat64x2());
4291 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4292 XMMRegister right_reg = ToFloat64x2Register(instr->right());
4293 switch (instr->op()) {
4295 __ addpd(left_reg, right_reg);
4298 __ subpd(left_reg, right_reg);
4301 __ mulpd(left_reg, right_reg);
4304 __ divpd(left_reg, right_reg);
4307 __ minpd(left_reg, right_reg);
4310 __ maxpd(left_reg, right_reg);
4318 case kFloat64x2Scale: {
4319 ASSERT(instr->left()->Equals(instr->result()));
4320 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4321 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4322 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4323 XMMRegister right_reg = ToDoubleRegister(instr->right());
4324 __ shufpd(right_reg, right_reg, 0x0);
4325 __ mulpd(left_reg, right_reg);
4328 case kFloat32x4Shuffle: {
4329 ASSERT(instr->left()->Equals(instr->result()));
4330 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4331 if (instr->hydrogen()->right()->IsConstant() &&
4332 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4333 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4334 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4335 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4336 __ shufps(left_reg, left_reg, select);
4339 Comment(";;; deoptimize: non-constant selector for shuffle");
4340 DeoptimizeIf(no_condition, instr->environment());
4344 case kInt32x4Shuffle: {
4345 ASSERT(instr->left()->Equals(instr->result()));
4346 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4347 if (instr->hydrogen()->right()->IsConstant() &&
4348 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4349 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4350 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4351 XMMRegister left_reg = ToInt32x4Register(instr->left());
4352 __ pshufd(left_reg, left_reg, select);
4355 Comment(";;; deoptimize: non-constant selector for shuffle");
4356 DeoptimizeIf(no_condition, instr->environment());
4360 case kInt32x4ShiftLeft:
4361 case kInt32x4ShiftRight:
4362 case kInt32x4ShiftRightArithmetic: {
4363 ASSERT(instr->left()->Equals(instr->result()));
4364 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4365 if (instr->hydrogen()->right()->IsConstant() &&
4366 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4367 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4368 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
4369 XMMRegister left_reg = ToInt32x4Register(instr->left());
4370 switch (instr->op()) {
4371 case kInt32x4ShiftLeft:
4372 __ pslld(left_reg, shift);
4374 case kInt32x4ShiftRight:
4375 __ psrld(left_reg, shift);
4377 case kInt32x4ShiftRightArithmetic:
4378 __ psrad(left_reg, shift);
4385 XMMRegister left_reg = ToInt32x4Register(instr->left());
4386 Register shift = ToRegister(instr->right());
4387 XMMRegister xmm_scratch = double_scratch0();
4388 __ movd(xmm_scratch, shift);
4389 switch (instr->op()) {
4390 case kInt32x4ShiftLeft:
4391 __ pslld(left_reg, xmm_scratch);
4393 case kInt32x4ShiftRight:
4394 __ psrld(left_reg, xmm_scratch);
4396 case kInt32x4ShiftRightArithmetic:
4397 __ psrad(left_reg, xmm_scratch);
4405 case kFloat32x4LessThan:
4406 case kFloat32x4LessThanOrEqual:
4407 case kFloat32x4Equal:
4408 case kFloat32x4NotEqual:
4409 case kFloat32x4GreaterThanOrEqual:
4410 case kFloat32x4GreaterThan: {
4411 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4412 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4413 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4414 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4415 XMMRegister result_reg = ToInt32x4Register(instr->result());
4416 switch (instr->op()) {
4417 case kFloat32x4LessThan:
4418 if (result_reg.is(left_reg)) {
4419 __ cmpltps(result_reg, right_reg);
4420 } else if (result_reg.is(right_reg)) {
4421 __ cmpnltps(result_reg, left_reg);
4423 __ movaps(result_reg, left_reg);
4424 __ cmpltps(result_reg, right_reg);
4427 case kFloat32x4LessThanOrEqual:
4428 if (result_reg.is(left_reg)) {
4429 __ cmpleps(result_reg, right_reg);
4430 } else if (result_reg.is(right_reg)) {
4431 __ cmpnleps(result_reg, left_reg);
4433 __ movaps(result_reg, left_reg);
4434 __ cmpleps(result_reg, right_reg);
4437 case kFloat32x4Equal:
4438 if (result_reg.is(left_reg)) {
4439 __ cmpeqps(result_reg, right_reg);
4440 } else if (result_reg.is(right_reg)) {
4441 __ cmpeqps(result_reg, left_reg);
4443 __ movaps(result_reg, left_reg);
4444 __ cmpeqps(result_reg, right_reg);
4447 case kFloat32x4NotEqual:
4448 if (result_reg.is(left_reg)) {
4449 __ cmpneqps(result_reg, right_reg);
4450 } else if (result_reg.is(right_reg)) {
4451 __ cmpneqps(result_reg, left_reg);
4453 __ movaps(result_reg, left_reg);
4454 __ cmpneqps(result_reg, right_reg);
4457 case kFloat32x4GreaterThanOrEqual:
4458 if (result_reg.is(left_reg)) {
4459 __ cmpnltps(result_reg, right_reg);
4460 } else if (result_reg.is(right_reg)) {
4461 __ cmpltps(result_reg, left_reg);
4463 __ movaps(result_reg, left_reg);
4464 __ cmpnltps(result_reg, right_reg);
4467 case kFloat32x4GreaterThan:
4468 if (result_reg.is(left_reg)) {
4469 __ cmpnleps(result_reg, right_reg);
4470 } else if (result_reg.is(right_reg)) {
4471 __ cmpleps(result_reg, left_reg);
4473 __ movaps(result_reg, left_reg);
4474 __ cmpnleps(result_reg, right_reg);
4489 case kInt32x4GreaterThan:
4491 case kInt32x4LessThan: {
4492 ASSERT(instr->left()->Equals(instr->result()));
4493 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4494 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
4495 XMMRegister left_reg = ToInt32x4Register(instr->left());
4496 XMMRegister right_reg = ToInt32x4Register(instr->right());
4497 switch (instr->op()) {
4499 __ andps(left_reg, right_reg);
4502 __ orps(left_reg, right_reg);
4505 __ xorps(left_reg, right_reg);
4508 __ paddd(left_reg, right_reg);
4511 __ psubd(left_reg, right_reg);
4514 if (CpuFeatures::IsSupported(SSE4_1)) {
4515 CpuFeatureScope scope(masm(), SSE4_1);
4516 __ pmulld(left_reg, right_reg);
4518 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
4519 XMMRegister xmm_scratch = xmm0;
4520 __ movaps(xmm_scratch, left_reg);
4521 __ pmuludq(left_reg, right_reg);
4522 __ psrldq(xmm_scratch, 4);
4523 __ psrldq(right_reg, 4);
4524 __ pmuludq(xmm_scratch, right_reg);
4525 __ pshufd(left_reg, left_reg, 8);
4526 __ pshufd(xmm_scratch, xmm_scratch, 8);
4527 __ punpackldq(left_reg, xmm_scratch);
4530 case kInt32x4GreaterThan:
4531 __ pcmpgtd(left_reg, right_reg);
4534 __ pcmpeqd(left_reg, right_reg);
4536 case kInt32x4LessThan: {
4537 XMMRegister xmm_scratch = xmm0;
4538 __ movaps(xmm_scratch, right_reg);
4539 __ pcmpgtd(xmm_scratch, left_reg);
4540 __ movaps(left_reg, xmm_scratch);
4549 case kFloat32x4WithW:
4551 case kFloat32x4WithZ:
4553 case kFloat32x4WithY:
4555 case kFloat32x4WithX: {
4556 ASSERT(instr->left()->Equals(instr->result()));
4557 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4558 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4559 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4560 XMMRegister right_reg = ToDoubleRegister(instr->right());
4561 XMMRegister xmm_scratch = xmm0;
4562 __ xorps(xmm_scratch, xmm_scratch);
4563 __ cvtsd2ss(xmm_scratch, right_reg);
4564 if (CpuFeatures::IsSupported(SSE4_1)) {
4566 CpuFeatureScope scope(masm(), SSE4_1);
4567 __ insertps(left_reg, xmm_scratch, imm8);
4569 __ subq(rsp, Immediate(kFloat32x4Size));
4570 __ movups(Operand(rsp, 0), left_reg);
4571 __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
4572 __ movups(left_reg, Operand(rsp, 0));
4573 __ addq(rsp, Immediate(kFloat32x4Size));
4577 case kFloat64x2WithX: {
4578 ASSERT(instr->left()->Equals(instr->result()));
4579 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4580 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4581 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4582 XMMRegister right_reg = ToDoubleRegister(instr->right());
4583 __ subq(rsp, Immediate(kFloat64x2Size));
4584 __ movups(Operand(rsp, 0), left_reg);
4585 __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
4586 __ movups(left_reg, Operand(rsp, 0));
4587 __ addq(rsp, Immediate(kFloat64x2Size));
4590 case kFloat64x2WithY: {
4591 ASSERT(instr->left()->Equals(instr->result()));
4592 ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
4593 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4594 XMMRegister left_reg = ToFloat64x2Register(instr->left());
4595 XMMRegister right_reg = ToDoubleRegister(instr->right());
4596 __ subq(rsp, Immediate(kFloat64x2Size));
4597 __ movups(Operand(rsp, 0), left_reg);
4598 __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
4599 __ movups(left_reg, Operand(rsp, 0));
4600 __ addq(rsp, Immediate(kFloat64x2Size));
4603 case kFloat64x2Constructor: {
4604 ASSERT(instr->hydrogen()->left()->representation().IsDouble());
4605 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4606 XMMRegister left_reg = ToDoubleRegister(instr->left());
4607 XMMRegister right_reg = ToDoubleRegister(instr->right());
4608 XMMRegister result_reg = ToFloat64x2Register(instr->result());
4609 __ subq(rsp, Immediate(kFloat64x2Size));
4610 __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
4611 __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
4612 __ movups(result_reg, Operand(rsp, 0));
4613 __ addq(rsp, Immediate(kFloat64x2Size));
4622 case kInt32x4WithX: {
4623 ASSERT(instr->left()->Equals(instr->result()));
4624 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4625 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
4626 XMMRegister left_reg = ToInt32x4Register(instr->left());
4627 Register right_reg = ToRegister(instr->right());
4628 if (CpuFeatures::IsSupported(SSE4_1)) {
4629 CpuFeatureScope scope(masm(), SSE4_1);
4630 __ pinsrd(left_reg, right_reg, imm8);
4632 __ subq(rsp, Immediate(kInt32x4Size));
4633 __ movdqu(Operand(rsp, 0), left_reg);
4634 __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
4635 __ movdqu(left_reg, Operand(rsp, 0));
4636 __ addq(rsp, Immediate(kInt32x4Size));
4640 case kInt32x4WithFlagW:
4642 case kInt32x4WithFlagZ:
4644 case kInt32x4WithFlagY:
4646 case kInt32x4WithFlagX: {
4647 ASSERT(instr->left()->Equals(instr->result()));
4648 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4649 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
4650 HType type = instr->hydrogen()->right()->type();
4651 XMMRegister left_reg = ToInt32x4Register(instr->left());
4652 Register right_reg = ToRegister(instr->right());
4653 Label load_false_value, done;
4654 if (type.IsBoolean()) {
4655 __ subq(rsp, Immediate(kInt32x4Size));
4656 __ movups(Operand(rsp, 0), left_reg);
4657 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
4658 __ j(not_equal, &load_false_value, Label::kNear);
4660 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
4661 DeoptimizeIf(no_condition, instr->environment());
4665 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
4666 __ jmp(&done, Label::kNear);
4667 __ bind(&load_false_value);
4668 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
4670 __ movups(left_reg, Operand(rsp, 0));
4671 __ addq(rsp, Immediate(kInt32x4Size));
4681 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
4682 switch (instr->op()) {
4683 case kInt32x4Select: {
4684 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
4685 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4686 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4688 XMMRegister mask_reg = ToInt32x4Register(instr->first());
4689 XMMRegister left_reg = ToFloat32x4Register(instr->second());
4690 XMMRegister right_reg = ToFloat32x4Register(instr->third());
4691 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4692 XMMRegister temp_reg = xmm0;
4695 __ movaps(temp_reg, mask_reg);
4698 // temp_reg = temp_reg & falseValue.
4699 __ andps(temp_reg, right_reg);
4701 if (!result_reg.is(mask_reg)) {
4702 if (result_reg.is(left_reg)) {
4703 // result_reg = result_reg & trueValue.
4704 __ andps(result_reg, mask_reg);
4705 // out = result_reg | temp_reg.
4706 __ orps(result_reg, temp_reg);
4708 __ movaps(result_reg, mask_reg);
4709 // result_reg = result_reg & trueValue.
4710 __ andps(result_reg, left_reg);
4711 // out = result_reg | temp_reg.
4712 __ orps(result_reg, temp_reg);
4715 // result_reg = result_reg & trueValue.
4716 __ andps(result_reg, left_reg);
4717 // out = result_reg | temp_reg.
4718 __ orps(result_reg, temp_reg);
4722 case kFloat32x4ShuffleMix: {
4723 ASSERT(instr->first()->Equals(instr->result()));
4724 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4725 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4726 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
4727 if (instr->hydrogen()->third()->IsConstant() &&
4728 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
4729 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
4730 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4731 XMMRegister first_reg = ToFloat32x4Register(instr->first());
4732 XMMRegister second_reg = ToFloat32x4Register(instr->second());
4733 __ shufps(first_reg, second_reg, select);
4736 Comment(";;; deoptimize: non-constant selector for shuffle");
4737 DeoptimizeIf(no_condition, instr->environment());
4741 case kFloat32x4Clamp: {
4742 ASSERT(instr->first()->Equals(instr->result()));
4743 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4744 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4745 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4747 XMMRegister value_reg = ToFloat32x4Register(instr->first());
4748 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
4749 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
4750 __ minps(value_reg, upper_reg);
4751 __ maxps(value_reg, lower_reg);
4754 case kFloat64x2Clamp: {
4755 ASSERT(instr->first()->Equals(instr->result()));
4756 ASSERT(instr->hydrogen()->first()->representation().IsFloat64x2());
4757 ASSERT(instr->hydrogen()->second()->representation().IsFloat64x2());
4758 ASSERT(instr->hydrogen()->third()->representation().IsFloat64x2());
4760 XMMRegister value_reg = ToFloat64x2Register(instr->first());
4761 XMMRegister lower_reg = ToFloat64x2Register(instr->second());
4762 XMMRegister upper_reg = ToFloat64x2Register(instr->third());
4763 __ minpd(value_reg, upper_reg);
4764 __ maxpd(value_reg, lower_reg);
4774 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
4775 switch (instr->op()) {
4776 case kFloat32x4Constructor: {
4777 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
4778 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
4779 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
4780 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
4781 XMMRegister x_reg = ToDoubleRegister(instr->x());
4782 XMMRegister y_reg = ToDoubleRegister(instr->y());
4783 XMMRegister z_reg = ToDoubleRegister(instr->z());
4784 XMMRegister w_reg = ToDoubleRegister(instr->w());
4785 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4786 __ subq(rsp, Immediate(kFloat32x4Size));
4787 __ xorps(xmm0, xmm0);
4788 __ cvtsd2ss(xmm0, x_reg);
4789 __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
4790 __ xorps(xmm0, xmm0);
4791 __ cvtsd2ss(xmm0, y_reg);
4792 __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
4793 __ xorps(xmm0, xmm0);
4794 __ cvtsd2ss(xmm0, z_reg);
4795 __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
4796 __ xorps(xmm0, xmm0);
4797 __ cvtsd2ss(xmm0, w_reg);
4798 __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
4799 __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
4800 __ addq(rsp, Immediate(kFloat32x4Size));
4803 case kInt32x4Constructor: {
4804 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
4805 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
4806 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
4807 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
4808 Register x_reg = ToRegister(instr->x());
4809 Register y_reg = ToRegister(instr->y());
4810 Register z_reg = ToRegister(instr->z());
4811 Register w_reg = ToRegister(instr->w());
4812 XMMRegister result_reg = ToInt32x4Register(instr->result());
4813 __ subq(rsp, Immediate(kInt32x4Size));
4814 __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
4815 __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
4816 __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
4817 __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
4818 __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
4819 __ addq(rsp, Immediate(kInt32x4Size));
4822 case kInt32x4Bool: {
4823 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
4824 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
4825 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
4826 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
4827 HType x_type = instr->hydrogen()->x()->type();
4828 HType y_type = instr->hydrogen()->y()->type();
4829 HType z_type = instr->hydrogen()->z()->type();
4830 HType w_type = instr->hydrogen()->w()->type();
4831 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
4832 !z_type.IsBoolean() || !w_type.IsBoolean()) {
4833 Comment(";;; deoptimize: other types for int32x4.bool.");
4834 DeoptimizeIf(no_condition, instr->environment());
4837 XMMRegister result_reg = ToInt32x4Register(instr->result());
4838 Register x_reg = ToRegister(instr->x());
4839 Register y_reg = ToRegister(instr->y());
4840 Register z_reg = ToRegister(instr->z());
4841 Register w_reg = ToRegister(instr->w());
4842 Label load_false_x, done_x, load_false_y, done_y,
4843 load_false_z, done_z, load_false_w, done_w;
4844 __ subq(rsp, Immediate(kInt32x4Size));
4846 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
4847 __ j(not_equal, &load_false_x, Label::kNear);
4848 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
4849 __ jmp(&done_x, Label::kNear);
4850 __ bind(&load_false_x);
4851 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
4854 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
4855 __ j(not_equal, &load_false_y, Label::kNear);
4856 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
4857 __ jmp(&done_y, Label::kNear);
4858 __ bind(&load_false_y);
4859 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
4862 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
4863 __ j(not_equal, &load_false_z, Label::kNear);
4864 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
4865 __ jmp(&done_z, Label::kNear);
4866 __ bind(&load_false_z);
4867 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
4870 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
4871 __ j(not_equal, &load_false_w, Label::kNear);
4872 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
4873 __ jmp(&done_w, Label::kNear);
4874 __ bind(&load_false_w);
4875 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
4878 __ movups(result_reg, Operand(rsp, 0));
4879 __ addq(rsp, Immediate(kInt32x4Size));
4889 void LCodeGen::DoPower(LPower* instr) {
4890 Representation exponent_type = instr->hydrogen()->right()->representation();
4891 // Having marked this as a call, we can use any registers.
4892 // Just make sure that the input/output registers are the expected ones.
4894 Register exponent = rdx;
4895 ASSERT(!instr->right()->IsRegister() ||
4896 ToRegister(instr->right()).is(exponent));
4897 ASSERT(!instr->right()->IsDoubleRegister() ||
4898 ToDoubleRegister(instr->right()).is(xmm1));
4899 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4900 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4902 if (exponent_type.IsSmi()) {
4903 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4905 } else if (exponent_type.IsTagged()) {
4907 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
4908 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
4909 DeoptimizeIf(not_equal, instr->environment());
4911 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4913 } else if (exponent_type.IsInteger32()) {
4914 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4917 ASSERT(exponent_type.IsDouble());
4918 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4924 void LCodeGen::DoMathExp(LMathExp* instr) {
4925 XMMRegister input = ToDoubleRegister(instr->value());
4926 XMMRegister result = ToDoubleRegister(instr->result());
4927 XMMRegister temp0 = double_scratch0();
4928 Register temp1 = ToRegister(instr->temp1());
4929 Register temp2 = ToRegister(instr->temp2());
4931 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4935 void LCodeGen::DoMathLog(LMathLog* instr) {
4936 ASSERT(instr->value()->Equals(instr->result()));
4937 XMMRegister input_reg = ToDoubleRegister(instr->value());
4938 XMMRegister xmm_scratch = double_scratch0();
4939 Label positive, done, zero;
4940 __ xorps(xmm_scratch, xmm_scratch);
4941 __ ucomisd(input_reg, xmm_scratch);
4942 __ j(above, &positive, Label::kNear);
4943 __ j(not_carry, &zero, Label::kNear);
4944 ExternalReference nan =
4945 ExternalReference::address_of_canonical_non_hole_nan();
4946 Operand nan_operand = masm()->ExternalOperand(nan);
4947 __ movsd(input_reg, nan_operand);
4948 __ jmp(&done, Label::kNear);
4950 ExternalReference ninf =
4951 ExternalReference::address_of_negative_infinity();
4952 Operand ninf_operand = masm()->ExternalOperand(ninf);
4953 __ movsd(input_reg, ninf_operand);
4954 __ jmp(&done, Label::kNear);
4957 __ subp(rsp, Immediate(kDoubleSize));
4958 __ movsd(Operand(rsp, 0), input_reg);
4959 __ fld_d(Operand(rsp, 0));
4961 __ fstp_d(Operand(rsp, 0));
4962 __ movsd(input_reg, Operand(rsp, 0));
4963 __ addp(rsp, Immediate(kDoubleSize));
4968 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4969 Register input = ToRegister(instr->value());
4970 Register result = ToRegister(instr->result());
4971 Label not_zero_input;
4972 __ bsrl(result, input);
4974 __ j(not_zero, ¬_zero_input);
4975 __ Set(result, 63); // 63^31 == 32
4977 __ bind(¬_zero_input);
4978 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4982 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4983 ASSERT(ToRegister(instr->context()).is(rsi));
4984 ASSERT(ToRegister(instr->function()).is(rdi));
4985 ASSERT(instr->HasPointerMap());
4987 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4988 if (known_function.is_null()) {
4989 LPointerMap* pointers = instr->pointer_map();
4990 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4991 ParameterCount count(instr->arity());
4992 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4994 CallKnownFunction(known_function,
4995 instr->hydrogen()->formal_parameter_count(),
4998 RDI_CONTAINS_TARGET);
5003 void LCodeGen::DoCallFunction(LCallFunction* instr) {
5004 ASSERT(ToRegister(instr->context()).is(rsi));
5005 ASSERT(ToRegister(instr->function()).is(rdi));
5006 ASSERT(ToRegister(instr->result()).is(rax));
5008 int arity = instr->arity();
5009 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
5010 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5014 void LCodeGen::DoCallNew(LCallNew* instr) {
5015 ASSERT(ToRegister(instr->context()).is(rsi));
5016 ASSERT(ToRegister(instr->constructor()).is(rdi));
5017 ASSERT(ToRegister(instr->result()).is(rax));
5019 __ Set(rax, instr->arity());
5020 // No cell in ebx for construct type feedback in optimized code
5021 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
5022 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
5023 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
5027 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
5028 ASSERT(ToRegister(instr->context()).is(rsi));
5029 ASSERT(ToRegister(instr->constructor()).is(rdi));
5030 ASSERT(ToRegister(instr->result()).is(rax));
5032 __ Set(rax, instr->arity());
5033 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
5034 ElementsKind kind = instr->hydrogen()->elements_kind();
5035 AllocationSiteOverrideMode override_mode =
5036 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
5037 ? DISABLE_ALLOCATION_SITES
5040 if (instr->arity() == 0) {
5041 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
5042 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
5043 } else if (instr->arity() == 1) {
5045 if (IsFastPackedElementsKind(kind)) {
5047 // We might need a change here
5048 // look at the first argument
5049 __ movp(rcx, Operand(rsp, 0));
5051 __ j(zero, &packed_case, Label::kNear);
5053 ElementsKind holey_kind = GetHoleyElementsKind(kind);
5054 ArraySingleArgumentConstructorStub stub(isolate(),
5057 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
5058 __ jmp(&done, Label::kNear);
5059 __ bind(&packed_case);
5062 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
5063 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
5066 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
5067 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
5072 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
5073 ASSERT(ToRegister(instr->context()).is(rsi));
5074 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
5078 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5079 Register function = ToRegister(instr->function());
5080 Register code_object = ToRegister(instr->code_object());
5081 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
5082 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
5086 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
5087 Register result = ToRegister(instr->result());
5088 Register base = ToRegister(instr->base_object());
5089 if (instr->offset()->IsConstantOperand()) {
5090 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
5091 __ leap(result, Operand(base, ToInteger32(offset)));
5093 Register offset = ToRegister(instr->offset());
5094 __ leap(result, Operand(base, offset, times_1, 0));
5099 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5100 HStoreNamedField* hinstr = instr->hydrogen();
5101 Representation representation = instr->representation();
5103 HObjectAccess access = hinstr->access();
5104 int offset = access.offset();
5106 if (access.IsExternalMemory()) {
5107 ASSERT(!hinstr->NeedsWriteBarrier());
5108 Register value = ToRegister(instr->value());
5109 if (instr->object()->IsConstantOperand()) {
5110 ASSERT(value.is(rax));
5111 LConstantOperand* object = LConstantOperand::cast(instr->object());
5112 __ store_rax(ToExternalReference(object));
5114 Register object = ToRegister(instr->object());
5115 __ Store(MemOperand(object, offset), value, representation);
5120 Register object = ToRegister(instr->object());
5121 __ AssertNotSmi(object);
5123 ASSERT(!representation.IsSmi() ||
5124 !instr->value()->IsConstantOperand() ||
5125 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5126 if (representation.IsDouble()) {
5127 ASSERT(access.IsInobject());
5128 ASSERT(!hinstr->has_transition());
5129 ASSERT(!hinstr->NeedsWriteBarrier());
5130 XMMRegister value = ToDoubleRegister(instr->value());
5131 __ movsd(FieldOperand(object, offset), value);
5135 if (hinstr->has_transition()) {
5136 Handle<Map> transition = hinstr->transition_map();
5137 AddDeprecationDependency(transition);
5138 if (!hinstr->NeedsWriteBarrierForMap()) {
5139 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
5141 Register temp = ToRegister(instr->temp());
5142 __ Move(kScratchRegister, transition);
5143 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
5144 // Update the write barrier for the map field.
5145 __ RecordWriteForMap(object,
5153 Register write_register = object;
5154 if (!access.IsInobject()) {
5155 write_register = ToRegister(instr->temp());
5156 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
5159 if (representation.IsSmi() && SmiValuesAre32Bits() &&
5160 hinstr->value()->representation().IsInteger32()) {
5161 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5162 if (FLAG_debug_code) {
5163 Register scratch = kScratchRegister;
5164 __ Load(scratch, FieldOperand(write_register, offset), representation);
5165 __ AssertSmi(scratch);
5167 // Store int value directly to upper half of the smi.
5168 STATIC_ASSERT(kSmiTag == 0);
5169 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5170 offset += kPointerSize / 2;
5171 representation = Representation::Integer32();
5174 Operand operand = FieldOperand(write_register, offset);
5176 if (instr->value()->IsRegister()) {
5177 Register value = ToRegister(instr->value());
5178 __ Store(operand, value, representation);
5180 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5181 if (IsInteger32Constant(operand_value)) {
5182 ASSERT(!hinstr->NeedsWriteBarrier());
5183 int32_t value = ToInteger32(operand_value);
5184 if (representation.IsSmi()) {
5185 __ Move(operand, Smi::FromInt(value));
5188 __ movl(operand, Immediate(value));
5192 Handle<Object> handle_value = ToHandle(operand_value);
5193 ASSERT(!hinstr->NeedsWriteBarrier());
5194 __ Move(operand, handle_value);
5198 if (hinstr->NeedsWriteBarrier()) {
5199 Register value = ToRegister(instr->value());
5200 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
5201 // Update the write barrier for the object for in-object properties.
5202 __ RecordWriteField(write_register,
5207 EMIT_REMEMBERED_SET,
5208 hinstr->SmiCheckForWriteBarrier(),
5209 hinstr->PointersToHereCheckForValue());
5214 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5215 ASSERT(ToRegister(instr->context()).is(rsi));
5216 ASSERT(ToRegister(instr->object()).is(rdx));
5217 ASSERT(ToRegister(instr->value()).is(rax));
5219 __ Move(rcx, instr->hydrogen()->name());
5220 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5221 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5225 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
5226 Representation representation = instr->hydrogen()->length()->representation();
5227 ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
5228 ASSERT(representation.IsSmiOrInteger32());
5230 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
5231 if (instr->length()->IsConstantOperand()) {
5232 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
5233 Register index = ToRegister(instr->index());
5234 if (representation.IsSmi()) {
5235 __ Cmp(index, Smi::FromInt(length));
5237 __ cmpl(index, Immediate(length));
5239 cc = CommuteCondition(cc);
5240 } else if (instr->index()->IsConstantOperand()) {
5241 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
5242 if (instr->length()->IsRegister()) {
5243 Register length = ToRegister(instr->length());
5244 if (representation.IsSmi()) {
5245 __ Cmp(length, Smi::FromInt(index));
5247 __ cmpl(length, Immediate(index));
5250 Operand length = ToOperand(instr->length());
5251 if (representation.IsSmi()) {
5252 __ Cmp(length, Smi::FromInt(index));
5254 __ cmpl(length, Immediate(index));
5258 Register index = ToRegister(instr->index());
5259 if (instr->length()->IsRegister()) {
5260 Register length = ToRegister(instr->length());
5261 if (representation.IsSmi()) {
5262 __ cmpp(length, index);
5264 __ cmpl(length, index);
5267 Operand length = ToOperand(instr->length());
5268 if (representation.IsSmi()) {
5269 __ cmpp(length, index);
5271 __ cmpl(length, index);
5275 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
5277 __ j(NegateCondition(cc), &done, Label::kNear);
5281 DeoptimizeIf(cc, instr->environment());
5286 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
5287 ElementsKind elements_kind = instr->elements_kind();
5288 LOperand* key = instr->key();
5289 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
5290 Register key_reg = ToRegister(key);
5291 Representation key_representation =
5292 instr->hydrogen()->key()->representation();
5293 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
5294 if (!HandleExternalArrayOpRequiresPreScale(
5295 key, key_representation, elements_kind))
5296 __ SmiToInteger64(key_reg, key_reg);
5297 } else if (instr->hydrogen()->IsDehoisted()) {
5298 // Sign extend key because it could be a 32 bit negative value
5299 // and the dehoisted address computation happens in 64 bits
5300 __ movsxlq(key_reg, key_reg);
5302 } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
5303 Representation key_representation =
5304 instr->hydrogen()->key()->representation();
5305 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
5306 HandleExternalArrayOpRequiresPreScale(
5307 key, key_representation, elements_kind);
5310 Operand operand(BuildFastArrayOperand(
5313 instr->hydrogen()->key()->representation(),
5315 instr->base_offset()));
5317 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
5318 elements_kind == FLOAT32_ELEMENTS) {
5319 XMMRegister value(ToDoubleRegister(instr->value()));
5320 __ cvtsd2ss(value, value);
5321 __ movss(operand, value);
5322 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
5323 elements_kind == FLOAT64_ELEMENTS) {
5324 __ movsd(operand, ToDoubleRegister(instr->value()));
5325 } else if (IsSIMD128ElementsKind(elements_kind)) {
5326 __ movups(operand, ToSIMD128Register(instr->value()));
5328 Register value(ToRegister(instr->value()));
5329 switch (elements_kind) {
5330 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5331 case EXTERNAL_INT8_ELEMENTS:
5332 case EXTERNAL_UINT8_ELEMENTS:
5334 case UINT8_ELEMENTS:
5335 case UINT8_CLAMPED_ELEMENTS:
5336 __ movb(operand, value);
5338 case EXTERNAL_INT16_ELEMENTS:
5339 case EXTERNAL_UINT16_ELEMENTS:
5340 case INT16_ELEMENTS:
5341 case UINT16_ELEMENTS:
5342 __ movw(operand, value);
5344 case EXTERNAL_INT32_ELEMENTS:
5345 case EXTERNAL_UINT32_ELEMENTS:
5346 case INT32_ELEMENTS:
5347 case UINT32_ELEMENTS:
5348 __ movl(operand, value);
5350 case EXTERNAL_FLOAT32_ELEMENTS:
5351 case EXTERNAL_FLOAT32x4_ELEMENTS:
5352 case EXTERNAL_FLOAT64x2_ELEMENTS:
5353 case EXTERNAL_INT32x4_ELEMENTS:
5354 case EXTERNAL_FLOAT64_ELEMENTS:
5355 case FLOAT32_ELEMENTS:
5356 case FLOAT64_ELEMENTS:
5357 case FLOAT32x4_ELEMENTS:
5358 case FLOAT64x2_ELEMENTS:
5359 case INT32x4_ELEMENTS:
5361 case FAST_SMI_ELEMENTS:
5362 case FAST_DOUBLE_ELEMENTS:
5363 case FAST_HOLEY_ELEMENTS:
5364 case FAST_HOLEY_SMI_ELEMENTS:
5365 case FAST_HOLEY_DOUBLE_ELEMENTS:
5366 case DICTIONARY_ELEMENTS:
5367 case SLOPPY_ARGUMENTS_ELEMENTS:
5375 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
5376 XMMRegister value = ToDoubleRegister(instr->value());
5377 LOperand* key = instr->key();
5378 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
5379 instr->hydrogen()->IsDehoisted()) {
5380 // Sign extend key because it could be a 32 bit negative value
5381 // and the dehoisted address computation happens in 64 bits
5382 __ movsxlq(ToRegister(key), ToRegister(key));
5384 if (instr->NeedsCanonicalization()) {
5387 __ ucomisd(value, value);
5388 __ j(parity_odd, &have_value, Label::kNear); // NaN.
5390 __ Set(kScratchRegister, BitCast<uint64_t>(
5391 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
5392 __ movq(value, kScratchRegister);
5394 __ bind(&have_value);
5397 Operand double_store_operand = BuildFastArrayOperand(
5400 instr->hydrogen()->key()->representation(),
5401 FAST_DOUBLE_ELEMENTS,
5402 instr->base_offset());
5404 __ movsd(double_store_operand, value);
5408 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
5409 HStoreKeyed* hinstr = instr->hydrogen();
5410 LOperand* key = instr->key();
5411 int offset = instr->base_offset();
5412 Representation representation = hinstr->value()->representation();
5414 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
5415 instr->hydrogen()->IsDehoisted()) {
5416 // Sign extend key because it could be a 32 bit negative value
5417 // and the dehoisted address computation happens in 64 bits
5418 __ movsxlq(ToRegister(key), ToRegister(key));
5420 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
5421 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5422 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
5423 if (FLAG_debug_code) {
5424 Register scratch = kScratchRegister;
5426 BuildFastArrayOperand(instr->elements(),
5428 instr->hydrogen()->key()->representation(),
5431 Representation::Smi());
5432 __ AssertSmi(scratch);
5434 // Store int value directly to upper half of the smi.
5435 STATIC_ASSERT(kSmiTag == 0);
5436 ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5437 offset += kPointerSize / 2;
5441 BuildFastArrayOperand(instr->elements(),
5443 instr->hydrogen()->key()->representation(),
5446 if (instr->value()->IsRegister()) {
5447 __ Store(operand, ToRegister(instr->value()), representation);
5449 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5450 if (IsInteger32Constant(operand_value)) {
5451 int32_t value = ToInteger32(operand_value);
5452 if (representation.IsSmi()) {
5453 __ Move(operand, Smi::FromInt(value));
5456 __ movl(operand, Immediate(value));
5459 Handle<Object> handle_value = ToHandle(operand_value);
5460 __ Move(operand, handle_value);
5464 if (hinstr->NeedsWriteBarrier()) {
5465 Register elements = ToRegister(instr->elements());
5466 ASSERT(instr->value()->IsRegister());
5467 Register value = ToRegister(instr->value());
5468 ASSERT(!key->IsConstantOperand());
5469 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
5470 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5471 // Compute address of modified element and store it into key register.
5472 Register key_reg(ToRegister(key));
5473 __ leap(key_reg, operand);
5474 __ RecordWrite(elements,
5478 EMIT_REMEMBERED_SET,
5480 hinstr->PointersToHereCheckForValue());
5485 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
5486 if (instr->is_typed_elements()) {
5487 DoStoreKeyedExternalArray(instr);
5488 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
5489 DoStoreKeyedFixedDoubleArray(instr);
5491 DoStoreKeyedFixedArray(instr);
5496 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5497 ASSERT(ToRegister(instr->context()).is(rsi));
5498 ASSERT(ToRegister(instr->object()).is(rdx));
5499 ASSERT(ToRegister(instr->key()).is(rcx));
5500 ASSERT(ToRegister(instr->value()).is(rax));
5502 Handle<Code> ic = instr->strict_mode() == STRICT
5503 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5504 : isolate()->builtins()->KeyedStoreIC_Initialize();
5505 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5509 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5510 Register object_reg = ToRegister(instr->object());
5512 Handle<Map> from_map = instr->original_map();
5513 Handle<Map> to_map = instr->transitioned_map();
5514 ElementsKind from_kind = instr->from_kind();
5515 ElementsKind to_kind = instr->to_kind();
5517 Label not_applicable;
5518 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
5519 __ j(not_equal, ¬_applicable);
5520 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5521 Register new_map_reg = ToRegister(instr->new_map_temp());
5522 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
5523 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
5525 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
5528 ASSERT(object_reg.is(rax));
5529 ASSERT(ToRegister(instr->context()).is(rsi));
5530 PushSafepointRegistersScope scope(this);
5531 __ Move(rbx, to_map);
5532 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5533 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5535 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5537 __ bind(¬_applicable);
5541 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5542 Register object = ToRegister(instr->object());
5543 Register temp = ToRegister(instr->temp());
5544 Label no_memento_found;
5545 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
5546 DeoptimizeIf(equal, instr->environment());
5547 __ bind(&no_memento_found);
5551 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5552 ASSERT(ToRegister(instr->context()).is(rsi));
5553 ASSERT(ToRegister(instr->left()).is(rdx));
5554 ASSERT(ToRegister(instr->right()).is(rax));
5555 StringAddStub stub(isolate(),
5556 instr->hydrogen()->flags(),
5557 instr->hydrogen()->pretenure_flag());
5558 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5562 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5563 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
5565 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5566 : LDeferredCode(codegen), instr_(instr) { }
5567 virtual void Generate() V8_OVERRIDE {
5568 codegen()->DoDeferredStringCharCodeAt(instr_);
5570 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5572 LStringCharCodeAt* instr_;
5575 DeferredStringCharCodeAt* deferred =
5576 new(zone()) DeferredStringCharCodeAt(this, instr);
5578 StringCharLoadGenerator::Generate(masm(),
5579 ToRegister(instr->string()),
5580 ToRegister(instr->index()),
5581 ToRegister(instr->result()),
5583 __ bind(deferred->exit());
5587 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5588 Register string = ToRegister(instr->string());
5589 Register result = ToRegister(instr->result());
5591 // TODO(3095996): Get rid of this. For now, we need to make the
5592 // result register contain a valid pointer because it is already
5593 // contained in the register pointer map.
5596 PushSafepointRegistersScope scope(this);
5598 // Push the index as a smi. This is safe because of the checks in
5599 // DoStringCharCodeAt above.
5600 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5601 if (instr->index()->IsConstantOperand()) {
5602 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
5603 __ Push(Smi::FromInt(const_index));
5605 Register index = ToRegister(instr->index());
5606 __ Integer32ToSmi(index, index);
5609 CallRuntimeFromDeferred(
5610 Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
5612 __ SmiToInteger32(rax, rax);
5613 __ StoreToSafepointRegisterSlot(result, rax);
5617 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5618 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5620 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5621 : LDeferredCode(codegen), instr_(instr) { }
5622 virtual void Generate() V8_OVERRIDE {
5623 codegen()->DoDeferredStringCharFromCode(instr_);
5625 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5627 LStringCharFromCode* instr_;
5630 DeferredStringCharFromCode* deferred =
5631 new(zone()) DeferredStringCharFromCode(this, instr);
5633 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5634 Register char_code = ToRegister(instr->char_code());
5635 Register result = ToRegister(instr->result());
5636 ASSERT(!char_code.is(result));
5638 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
5639 __ j(above, deferred->entry());
5640 __ movsxlq(char_code, char_code);
5641 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5642 __ movp(result, FieldOperand(result,
5643 char_code, times_pointer_size,
5644 FixedArray::kHeaderSize));
5645 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5646 __ j(equal, deferred->entry());
5647 __ bind(deferred->exit());
5651 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5652 Register char_code = ToRegister(instr->char_code());
5653 Register result = ToRegister(instr->result());
5655 // TODO(3095996): Get rid of this. For now, we need to make the
5656 // result register contain a valid pointer because it is already
5657 // contained in the register pointer map.
5660 PushSafepointRegistersScope scope(this);
5661 __ Integer32ToSmi(char_code, char_code);
5663 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5664 __ StoreToSafepointRegisterSlot(result, rax);
5668 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5669 LOperand* input = instr->value();
5670 ASSERT(input->IsRegister() || input->IsStackSlot());
5671 LOperand* output = instr->result();
5672 ASSERT(output->IsDoubleRegister());
5673 if (input->IsRegister()) {
5674 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
5676 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
5681 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5682 LOperand* input = instr->value();
5683 LOperand* output = instr->result();
5685 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
5689 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5690 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
5692 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
5693 : LDeferredCode(codegen), instr_(instr) { }
5694 virtual void Generate() V8_OVERRIDE {
5695 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5696 instr_->temp2(), SIGNED_INT32);
5698 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5700 LNumberTagI* instr_;
5703 LOperand* input = instr->value();
5704 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5705 Register reg = ToRegister(input);
5707 if (SmiValuesAre32Bits()) {
5708 __ Integer32ToSmi(reg, reg);
5710 ASSERT(SmiValuesAre31Bits());
5711 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
5712 __ Integer32ToSmi(reg, reg);
5713 __ j(overflow, deferred->entry());
5714 __ bind(deferred->exit());
5719 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5720 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5722 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5723 : LDeferredCode(codegen), instr_(instr) { }
5724 virtual void Generate() V8_OVERRIDE {
5725 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5726 instr_->temp2(), UNSIGNED_INT32);
5728 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5730 LNumberTagU* instr_;
5733 LOperand* input = instr->value();
5734 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5735 Register reg = ToRegister(input);
5737 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
5738 __ cmpl(reg, Immediate(Smi::kMaxValue));
5739 __ j(above, deferred->entry());
5740 __ Integer32ToSmi(reg, reg);
5741 __ bind(deferred->exit());
5745 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
5749 IntegerSignedness signedness) {
5751 Register reg = ToRegister(value);
5752 Register tmp = ToRegister(temp1);
5753 XMMRegister temp_xmm = ToDoubleRegister(temp2);
5755 // Load value into temp_xmm which will be preserved across potential call to
5756 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
5757 // XMM registers on x64).
5758 if (signedness == SIGNED_INT32) {
5759 ASSERT(SmiValuesAre31Bits());
5760 // There was overflow, so bits 30 and 31 of the original integer
5761 // disagree. Try to allocate a heap number in new space and store
5762 // the value in there. If that fails, call the runtime system.
5763 __ SmiToInteger32(reg, reg);
5764 __ xorl(reg, Immediate(0x80000000));
5765 __ cvtlsi2sd(temp_xmm, reg);
5767 ASSERT(signedness == UNSIGNED_INT32);
5768 __ LoadUint32(temp_xmm, reg);
5771 if (FLAG_inline_new) {
5772 __ AllocateHeapNumber(reg, tmp, &slow);
5773 __ jmp(&done, Label::kNear);
5776 // Slow case: Call the runtime system to do the number allocation.
5779 // Put a valid pointer value in the stack slot where the result
5780 // register is stored, as this register is in the pointer map, but contains
5781 // an integer value.
5784 // Preserve the value of all registers.
5785 PushSafepointRegistersScope scope(this);
5787 // NumberTagIU uses the context from the frame, rather than
5788 // the environment's HContext or HInlinedContext value.
5789 // They only call Runtime::kHiddenAllocateHeapNumber.
5790 // The corresponding HChange instructions are added in a phase that does
5791 // not have easy access to the local context.
5792 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5793 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5794 RecordSafepointWithRegisters(
5795 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5796 __ StoreToSafepointRegisterSlot(reg, rax);
5799 // Done. Put the value in temp_xmm into the value of the allocated heap
5802 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
5806 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5807 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5809 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5810 : LDeferredCode(codegen), instr_(instr) { }
5811 virtual void Generate() V8_OVERRIDE {
5812 codegen()->DoDeferredNumberTagD(instr_);
5814 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5816 LNumberTagD* instr_;
5819 XMMRegister input_reg = ToDoubleRegister(instr->value());
5820 Register reg = ToRegister(instr->result());
5821 Register tmp = ToRegister(instr->temp());
5823 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
5824 if (FLAG_inline_new) {
5825 __ AllocateHeapNumber(reg, tmp, deferred->entry());
5827 __ jmp(deferred->entry());
5829 __ bind(deferred->exit());
5830 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5834 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5835 // TODO(3095996): Get rid of this. For now, we need to make the
5836 // result register contain a valid pointer because it is already
5837 // contained in the register pointer map.
5838 Register reg = ToRegister(instr->result());
5839 __ Move(reg, Smi::FromInt(0));
5842 PushSafepointRegistersScope scope(this);
5843 // NumberTagD uses the context from the frame, rather than
5844 // the environment's HContext or HInlinedContext value.
5845 // They only call Runtime::kHiddenAllocateHeapNumber.
5846 // The corresponding HChange instructions are added in a phase that does
5847 // not have easy access to the local context.
5848 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5849 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5850 RecordSafepointWithRegisters(
5851 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5852 __ movp(kScratchRegister, rax);
5854 __ movp(reg, kScratchRegister);
5858 void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
5859 Runtime::FunctionId id) {
5860 // TODO(3095996): Get rid of this. For now, we need to make the
5861 // result register contain a valid pointer because it is already
5862 // contained in the register pointer map.
5863 Register reg = ToRegister(instr->result());
5864 __ Move(reg, Smi::FromInt(0));
5867 PushSafepointRegistersScope scope(this);
5868 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5869 __ CallRuntimeSaveDoubles(id);
5870 RecordSafepointWithRegisters(
5871 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5872 __ movp(kScratchRegister, rax);
5874 __ movp(reg, kScratchRegister);
5879 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
5880 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
5882 DeferredSIMD128ToTagged(LCodeGen* codegen,
5883 LSIMD128ToTagged* instr,
5884 Runtime::FunctionId id)
5885 : LDeferredCode(codegen), instr_(instr), id_(id) { }
5886 virtual void Generate() V8_OVERRIDE {
5887 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
5889 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5891 LSIMD128ToTagged* instr_;
5892 Runtime::FunctionId id_;
5895 XMMRegister input_reg = ToSIMD128Register(instr->value());
5896 Register reg = ToRegister(instr->result());
5897 Register tmp = ToRegister(instr->temp());
5898 Register tmp2 = ToRegister(instr->temp2());
5899 Register tmp3 = ToRegister(instr->temp3());
5901 DeferredSIMD128ToTagged* deferred =
5902 new(zone()) DeferredSIMD128ToTagged(this, instr,
5903 static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
5904 if (FLAG_inline_new) {
5905 if (T::kInstanceType == FLOAT32x4_TYPE) {
5906 __ AllocateFloat32x4(reg, tmp, tmp2, tmp3, deferred->entry());
5907 } else if (T::kInstanceType == INT32x4_TYPE) {
5908 __ AllocateInt32x4(reg, tmp, tmp2, tmp3, deferred->entry());
5909 } else if (T::kInstanceType == FLOAT64x2_TYPE) {
5910 __ AllocateFloat64x2(reg, tmp, tmp2, tmp3, deferred->entry());
5913 __ jmp(deferred->entry());
5915 __ bind(deferred->exit());
5917 // Load the inner FixedTypedArray object.
5918 __ movp(tmp, FieldOperand(reg, T::kValueOffset));
5920 __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
5924 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
5925 if (instr->value()->IsFloat32x4Register()) {
5926 HandleSIMD128ToTagged<Float32x4>(instr);
5927 } else if (instr->value()->IsFloat64x2Register()) {
5928 HandleSIMD128ToTagged<Float64x2>(instr);
5930 ASSERT(instr->value()->IsInt32x4Register());
5931 HandleSIMD128ToTagged<Int32x4>(instr);
5936 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5937 HChange* hchange = instr->hydrogen();
5938 Register input = ToRegister(instr->value());
5939 Register output = ToRegister(instr->result());
5940 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5941 hchange->value()->CheckFlag(HValue::kUint32)) {
5942 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
5943 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5945 __ Integer32ToSmi(output, input);
5946 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5947 !hchange->value()->CheckFlag(HValue::kUint32)) {
5948 DeoptimizeIf(overflow, instr->environment());
5953 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5954 ASSERT(instr->value()->Equals(instr->result()));
5955 Register input = ToRegister(instr->value());
5956 if (instr->needs_check()) {
5957 Condition is_smi = __ CheckSmi(input);
5958 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5960 __ AssertSmi(input);
5962 __ SmiToInteger32(input, input);
5966 void LCodeGen::EmitNumberUntagD(Register input_reg,
5967 XMMRegister result_reg,
5968 bool can_convert_undefined_to_nan,
5969 bool deoptimize_on_minus_zero,
5971 NumberUntagDMode mode) {
5972 Label convert, load_smi, done;
5974 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5976 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5978 // Heap number map check.
5979 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5980 Heap::kHeapNumberMapRootIndex);
5982 // On x64 it is safe to load at heap number offset before evaluating the map
5983 // check, since all heap objects are at least two words long.
5984 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5986 if (can_convert_undefined_to_nan) {
5987 __ j(not_equal, &convert, Label::kNear);
5989 DeoptimizeIf(not_equal, env);
5992 if (deoptimize_on_minus_zero) {
5993 XMMRegister xmm_scratch = double_scratch0();
5994 __ xorps(xmm_scratch, xmm_scratch);
5995 __ ucomisd(xmm_scratch, result_reg);
5996 __ j(not_equal, &done, Label::kNear);
5997 __ movmskpd(kScratchRegister, result_reg);
5998 __ testq(kScratchRegister, Immediate(1));
5999 DeoptimizeIf(not_zero, env);
6001 __ jmp(&done, Label::kNear);
6003 if (can_convert_undefined_to_nan) {
6006 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
6007 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
6008 DeoptimizeIf(not_equal, env);
6010 __ xorps(result_reg, result_reg);
6011 __ divsd(result_reg, result_reg);
6012 __ jmp(&done, Label::kNear);
6015 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
6018 // Smi to XMM conversion
6020 __ SmiToInteger32(kScratchRegister, input_reg);
6021 __ Cvtlsi2sd(result_reg, kScratchRegister);
6026 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
6027 Register input_reg = ToRegister(instr->value());
6029 if (instr->truncating()) {
6030 Label no_heap_number, check_bools, check_false;
6032 // Heap number map check.
6033 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
6034 Heap::kHeapNumberMapRootIndex);
6035 __ j(not_equal, &no_heap_number, Label::kNear);
6036 __ TruncateHeapNumberToI(input_reg, input_reg);
6039 __ bind(&no_heap_number);
6040 // Check for Oddballs. Undefined/False is converted to zero and True to one
6041 // for truncating conversions.
6042 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
6043 __ j(not_equal, &check_bools, Label::kNear);
6044 __ Set(input_reg, 0);
6047 __ bind(&check_bools);
6048 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
6049 __ j(not_equal, &check_false, Label::kNear);
6050 __ Set(input_reg, 1);
6053 __ bind(&check_false);
6054 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
6055 __ RecordComment("Deferred TaggedToI: cannot truncate");
6056 DeoptimizeIf(not_equal, instr->environment());
6057 __ Set(input_reg, 0);
6061 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
6062 __ TaggedToI(input_reg, input_reg, xmm_temp,
6063 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
6067 DeoptimizeIf(no_condition, instr->environment());
6072 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
6073 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
6075 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
6076 : LDeferredCode(codegen), instr_(instr) { }
6077 virtual void Generate() V8_OVERRIDE {
6078 codegen()->DoDeferredTaggedToI(instr_, done());
6080 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6085 LOperand* input = instr->value();
6086 ASSERT(input->IsRegister());
6087 ASSERT(input->Equals(instr->result()));
6088 Register input_reg = ToRegister(input);
6090 if (instr->hydrogen()->value()->representation().IsSmi()) {
6091 __ SmiToInteger32(input_reg, input_reg);
6093 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
6094 __ JumpIfNotSmi(input_reg, deferred->entry());
6095 __ SmiToInteger32(input_reg, input_reg);
6096 __ bind(deferred->exit());
6101 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
6102 LOperand* input = instr->value();
6103 ASSERT(input->IsRegister());
6104 LOperand* result = instr->result();
6105 ASSERT(result->IsDoubleRegister());
6107 Register input_reg = ToRegister(input);
6108 XMMRegister result_reg = ToDoubleRegister(result);
6110 HValue* value = instr->hydrogen()->value();
6111 NumberUntagDMode mode = value->representation().IsSmi()
6112 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
6114 EmitNumberUntagD(input_reg, result_reg,
6115 instr->hydrogen()->can_convert_undefined_to_nan(),
6116 instr->hydrogen()->deoptimize_on_minus_zero(),
6117 instr->environment(),
6123 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
6124 LOperand* input = instr->value();
6125 ASSERT(input->IsRegister());
6126 LOperand* result = instr->result();
6127 ASSERT(result->IsSIMD128Register());
6128 LOperand* temp = instr->temp();
6129 ASSERT(temp->IsRegister());
6131 Register input_reg = ToRegister(input);
6132 XMMRegister result_reg = ToSIMD128Register(result);
6133 Register temp_reg = ToRegister(temp);
6135 __ testp(input_reg, Immediate(kSmiTagMask));
6136 DeoptimizeIf(zero, instr->environment());
6137 __ CmpObjectType(input_reg, T::kInstanceType, kScratchRegister);
6138 DeoptimizeIf(not_equal, instr->environment());
6140 // Load the inner FixedTypedArray object.
6141 __ movp(temp_reg, FieldOperand(input_reg, T::kValueOffset));
6144 result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
6148 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
6149 if (instr->representation().IsFloat32x4()) {
6150 HandleTaggedToSIMD128<Float32x4>(instr);
6151 } else if (instr->representation().IsFloat64x2()) {
6152 HandleTaggedToSIMD128<Float64x2>(instr);
6154 ASSERT(instr->representation().IsInt32x4());
6155 HandleTaggedToSIMD128<Int32x4>(instr);
6160 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
6161 LOperand* input = instr->value();
6162 ASSERT(input->IsDoubleRegister());
6163 LOperand* result = instr->result();
6164 ASSERT(result->IsRegister());
6166 XMMRegister input_reg = ToDoubleRegister(input);
6167 Register result_reg = ToRegister(result);
6169 if (instr->truncating()) {
6170 __ TruncateDoubleToI(result_reg, input_reg);
6172 Label bailout, done;
6173 XMMRegister xmm_scratch = double_scratch0();
6174 __ DoubleToI(result_reg, input_reg, xmm_scratch,
6175 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
6177 __ jmp(&done, Label::kNear);
6179 DeoptimizeIf(no_condition, instr->environment());
6185 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
6186 LOperand* input = instr->value();
6187 ASSERT(input->IsDoubleRegister());
6188 LOperand* result = instr->result();
6189 ASSERT(result->IsRegister());
6191 XMMRegister input_reg = ToDoubleRegister(input);
6192 Register result_reg = ToRegister(result);
6194 Label bailout, done;
6195 XMMRegister xmm_scratch = double_scratch0();
6196 __ DoubleToI(result_reg, input_reg, xmm_scratch,
6197 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
6199 __ jmp(&done, Label::kNear);
6201 DeoptimizeIf(no_condition, instr->environment());
6204 __ Integer32ToSmi(result_reg, result_reg);
6205 DeoptimizeIf(overflow, instr->environment());
6209 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
6210 LOperand* input = instr->value();
6211 Condition cc = masm()->CheckSmi(ToRegister(input));
6212 DeoptimizeIf(NegateCondition(cc), instr->environment());
6216 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
6217 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
6218 LOperand* input = instr->value();
6219 Condition cc = masm()->CheckSmi(ToRegister(input));
6220 DeoptimizeIf(cc, instr->environment());
6225 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
6226 Register input = ToRegister(instr->value());
6228 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
6230 if (instr->hydrogen()->is_interval_check()) {
6233 instr->hydrogen()->GetCheckInterval(&first, &last);
6235 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6236 Immediate(static_cast<int8_t>(first)));
6238 // If there is only one type in the interval check for equality.
6239 if (first == last) {
6240 DeoptimizeIf(not_equal, instr->environment());
6242 DeoptimizeIf(below, instr->environment());
6243 // Omit check for the last type.
6244 if (last != LAST_TYPE) {
6245 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6246 Immediate(static_cast<int8_t>(last)));
6247 DeoptimizeIf(above, instr->environment());
6253 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
6255 if (IsPowerOf2(mask)) {
6256 ASSERT(tag == 0 || IsPowerOf2(tag));
6257 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
6259 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
6261 __ movzxbl(kScratchRegister,
6262 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
6263 __ andb(kScratchRegister, Immediate(mask));
6264 __ cmpb(kScratchRegister, Immediate(tag));
6265 DeoptimizeIf(not_equal, instr->environment());
6271 void LCodeGen::DoCheckValue(LCheckValue* instr) {
6272 Register reg = ToRegister(instr->value());
6273 __ Cmp(reg, instr->hydrogen()->object().handle());
6274 DeoptimizeIf(not_equal, instr->environment());
6278 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
6280 PushSafepointRegistersScope scope(this);
6283 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
6284 RecordSafepointWithRegisters(
6285 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
6287 __ testp(rax, Immediate(kSmiTagMask));
6289 DeoptimizeIf(zero, instr->environment());
6293 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
6294 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
6296 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
6297 : LDeferredCode(codegen), instr_(instr), object_(object) {
6298 SetExit(check_maps());
6300 virtual void Generate() V8_OVERRIDE {
6301 codegen()->DoDeferredInstanceMigration(instr_, object_);
6303 Label* check_maps() { return &check_maps_; }
6304 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6311 if (instr->hydrogen()->IsStabilityCheck()) {
6312 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
6313 for (int i = 0; i < maps->size(); ++i) {
6314 AddStabilityDependency(maps->at(i).handle());
6319 LOperand* input = instr->value();
6320 ASSERT(input->IsRegister());
6321 Register reg = ToRegister(input);
6323 DeferredCheckMaps* deferred = NULL;
6324 if (instr->hydrogen()->HasMigrationTarget()) {
6325 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
6326 __ bind(deferred->check_maps());
6329 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
6331 for (int i = 0; i < maps->size() - 1; i++) {
6332 Handle<Map> map = maps->at(i).handle();
6333 __ CompareMap(reg, map);
6334 __ j(equal, &success, Label::kNear);
6337 Handle<Map> map = maps->at(maps->size() - 1).handle();
6338 __ CompareMap(reg, map);
6339 if (instr->hydrogen()->HasMigrationTarget()) {
6340 __ j(not_equal, deferred->entry());
6342 DeoptimizeIf(not_equal, instr->environment());
6349 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
6350 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
6351 XMMRegister xmm_scratch = double_scratch0();
6352 Register result_reg = ToRegister(instr->result());
6353 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
6357 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
6358 ASSERT(instr->unclamped()->Equals(instr->result()));
6359 Register value_reg = ToRegister(instr->result());
6360 __ ClampUint8(value_reg);
6364 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
6365 ASSERT(instr->unclamped()->Equals(instr->result()));
6366 Register input_reg = ToRegister(instr->unclamped());
6367 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
6368 XMMRegister xmm_scratch = double_scratch0();
6369 Label is_smi, done, heap_number;
6370 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
6371 __ JumpIfSmi(input_reg, &is_smi, dist);
6373 // Check for heap number
6374 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
6375 factory()->heap_number_map());
6376 __ j(equal, &heap_number, Label::kNear);
6378 // Check for undefined. Undefined is converted to zero for clamping
6380 __ Cmp(input_reg, factory()->undefined_value());
6381 DeoptimizeIf(not_equal, instr->environment());
6382 __ xorl(input_reg, input_reg);
6383 __ jmp(&done, Label::kNear);
6386 __ bind(&heap_number);
6387 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
6388 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
6389 __ jmp(&done, Label::kNear);
6393 __ SmiToInteger32(input_reg, input_reg);
6394 __ ClampUint8(input_reg);
6400 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
6401 XMMRegister value_reg = ToDoubleRegister(instr->value());
6402 Register result_reg = ToRegister(instr->result());
6403 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
6404 __ movq(result_reg, value_reg);
6405 __ shrq(result_reg, Immediate(32));
6407 __ movd(result_reg, value_reg);
6412 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
6413 Register hi_reg = ToRegister(instr->hi());
6414 Register lo_reg = ToRegister(instr->lo());
6415 XMMRegister result_reg = ToDoubleRegister(instr->result());
6416 XMMRegister xmm_scratch = double_scratch0();
6417 __ movd(result_reg, hi_reg);
6418 __ psllq(result_reg, 32);
6419 __ movd(xmm_scratch, lo_reg);
6420 __ orps(result_reg, xmm_scratch);
6424 void LCodeGen::DoAllocate(LAllocate* instr) {
6425 class DeferredAllocate V8_FINAL : public LDeferredCode {
6427 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
6428 : LDeferredCode(codegen), instr_(instr) { }
6429 virtual void Generate() V8_OVERRIDE {
6430 codegen()->DoDeferredAllocate(instr_);
6432 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6437 DeferredAllocate* deferred =
6438 new(zone()) DeferredAllocate(this, instr);
6440 Register result = ToRegister(instr->result());
6441 Register temp = ToRegister(instr->temp());
6443 // Allocate memory for the object.
6444 AllocationFlags flags = TAG_OBJECT;
6445 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
6446 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6448 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6449 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6450 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6451 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6452 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6453 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6454 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6457 if (instr->size()->IsConstantOperand()) {
6458 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6459 if (size <= Page::kMaxRegularHeapObjectSize) {
6460 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6462 __ jmp(deferred->entry());
6465 Register size = ToRegister(instr->size());
6466 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6469 __ bind(deferred->exit());
6471 if (instr->hydrogen()->MustPrefillWithFiller()) {
6472 if (instr->size()->IsConstantOperand()) {
6473 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6474 __ movl(temp, Immediate((size / kPointerSize) - 1));
6476 temp = ToRegister(instr->size());
6477 __ sarp(temp, Immediate(kPointerSizeLog2));
6482 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
6483 isolate()->factory()->one_pointer_filler_map());
6485 __ j(not_zero, &loop);
6490 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6491 Register result = ToRegister(instr->result());
6493 // TODO(3095996): Get rid of this. For now, we need to make the
6494 // result register contain a valid pointer because it is already
6495 // contained in the register pointer map.
6496 __ Move(result, Smi::FromInt(0));
6498 PushSafepointRegistersScope scope(this);
6499 if (instr->size()->IsRegister()) {
6500 Register size = ToRegister(instr->size());
6501 ASSERT(!size.is(result));
6502 __ Integer32ToSmi(size, size);
6505 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6506 __ Push(Smi::FromInt(size));
6510 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6511 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6512 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6513 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6514 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6515 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6516 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6518 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6520 __ Push(Smi::FromInt(flags));
6522 CallRuntimeFromDeferred(
6523 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
6524 __ StoreToSafepointRegisterSlot(result, rax);
6528 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6529 ASSERT(ToRegister(instr->value()).is(rax));
6531 CallRuntime(Runtime::kToFastProperties, 1, instr);
6535 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6536 ASSERT(ToRegister(instr->context()).is(rsi));
6538 // Registers will be used as follows:
6539 // rcx = literals array.
6540 // rbx = regexp literal.
6541 // rax = regexp literal clone.
6542 int literal_offset =
6543 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6544 __ Move(rcx, instr->hydrogen()->literals());
6545 __ movp(rbx, FieldOperand(rcx, literal_offset));
6546 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
6547 __ j(not_equal, &materialized, Label::kNear);
6549 // Create regexp literal using runtime function
6550 // Result will be in rax.
6552 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
6553 __ Push(instr->hydrogen()->pattern());
6554 __ Push(instr->hydrogen()->flags());
6555 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
6558 __ bind(&materialized);
6559 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6560 Label allocated, runtime_allocate;
6561 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
6562 __ jmp(&allocated, Label::kNear);
6564 __ bind(&runtime_allocate);
6566 __ Push(Smi::FromInt(size));
6567 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
6570 __ bind(&allocated);
6571 // Copy the content into the newly allocated memory.
6572 // (Unroll copy loop once for better throughput).
6573 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6574 __ movp(rdx, FieldOperand(rbx, i));
6575 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
6576 __ movp(FieldOperand(rax, i), rdx);
6577 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
6579 if ((size % (2 * kPointerSize)) != 0) {
6580 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
6581 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
6586 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6587 ASSERT(ToRegister(instr->context()).is(rsi));
6588 // Use the fast case closure allocation code that allocates in new
6589 // space for nested functions that don't need literals cloning.
6590 bool pretenure = instr->hydrogen()->pretenure();
6591 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6592 FastNewClosureStub stub(isolate(),
6593 instr->hydrogen()->strict_mode(),
6594 instr->hydrogen()->is_generator());
6595 __ Move(rbx, instr->hydrogen()->shared_info());
6596 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
6599 __ Push(instr->hydrogen()->shared_info());
6600 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
6601 Heap::kFalseValueRootIndex);
6602 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6607 void LCodeGen::DoTypeof(LTypeof* instr) {
6608 ASSERT(ToRegister(instr->context()).is(rsi));
6609 LOperand* input = instr->value();
6610 EmitPushTaggedOperand(input);
6611 CallRuntime(Runtime::kTypeof, 1, instr);
6615 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
6616 ASSERT(!operand->IsDoubleRegister());
6617 if (operand->IsConstantOperand()) {
6618 __ Push(ToHandle(LConstantOperand::cast(operand)));
6619 } else if (operand->IsRegister()) {
6620 __ Push(ToRegister(operand));
6622 __ Push(ToOperand(operand));
6627 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6628 Register input = ToRegister(instr->value());
6629 Condition final_branch_condition = EmitTypeofIs(instr, input);
6630 if (final_branch_condition != no_condition) {
6631 EmitBranch(instr, final_branch_condition);
6636 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6637 Label* true_label = instr->TrueLabel(chunk_);
6638 Label* false_label = instr->FalseLabel(chunk_);
6639 Handle<String> type_name = instr->type_literal();
6640 int left_block = instr->TrueDestination(chunk_);
6641 int right_block = instr->FalseDestination(chunk_);
6642 int next_block = GetNextEmittedBlock();
6644 Label::Distance true_distance = left_block == next_block ? Label::kNear
6646 Label::Distance false_distance = right_block == next_block ? Label::kNear
6648 Condition final_branch_condition = no_condition;
6649 Factory* factory = isolate()->factory();
6650 if (String::Equals(type_name, factory->number_string())) {
6651 __ JumpIfSmi(input, true_label, true_distance);
6652 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
6653 Heap::kHeapNumberMapRootIndex);
6655 final_branch_condition = equal;
6657 } else if (String::Equals(type_name, factory->string_string())) {
6658 __ JumpIfSmi(input, false_label, false_distance);
6659 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6660 __ j(above_equal, false_label, false_distance);
6661 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6662 Immediate(1 << Map::kIsUndetectable));
6663 final_branch_condition = zero;
6665 } else if (String::Equals(type_name, factory->symbol_string())) {
6666 __ JumpIfSmi(input, false_label, false_distance);
6667 __ CmpObjectType(input, SYMBOL_TYPE, input);
6668 final_branch_condition = equal;
6670 } else if (String::Equals(type_name, factory->boolean_string())) {
6671 __ CompareRoot(input, Heap::kTrueValueRootIndex);
6672 __ j(equal, true_label, true_distance);
6673 __ CompareRoot(input, Heap::kFalseValueRootIndex);
6674 final_branch_condition = equal;
6676 } else if (FLAG_harmony_typeof &&
6677 String::Equals(type_name, factory->null_string())) {
6678 __ CompareRoot(input, Heap::kNullValueRootIndex);
6679 final_branch_condition = equal;
6681 } else if (String::Equals(type_name, factory->undefined_string())) {
6682 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
6683 __ j(equal, true_label, true_distance);
6684 __ JumpIfSmi(input, false_label, false_distance);
6685 // Check for undetectable objects => true.
6686 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
6687 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6688 Immediate(1 << Map::kIsUndetectable));
6689 final_branch_condition = not_zero;
6691 } else if (String::Equals(type_name, factory->function_string())) {
6692 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6693 __ JumpIfSmi(input, false_label, false_distance);
6694 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6695 __ j(equal, true_label, true_distance);
6696 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6697 final_branch_condition = equal;
6699 } else if (String::Equals(type_name, factory->object_string())) {
6700 __ JumpIfSmi(input, false_label, false_distance);
6701 if (!FLAG_harmony_typeof) {
6702 __ CompareRoot(input, Heap::kNullValueRootIndex);
6703 __ j(equal, true_label, true_distance);
6705 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6706 __ j(below, false_label, false_distance);
6707 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6708 __ j(above, false_label, false_distance);
6709 // Check for undetectable objects => false.
6710 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6711 Immediate(1 << Map::kIsUndetectable));
6712 final_branch_condition = zero;
6715 __ jmp(false_label, false_distance);
6718 return final_branch_condition;
6722 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6723 Register temp = ToRegister(instr->temp());
6725 EmitIsConstructCall(temp);
6726 EmitBranch(instr, equal);
6730 void LCodeGen::EmitIsConstructCall(Register temp) {
6731 // Get the frame pointer for the calling frame.
6732 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6734 // Skip the arguments adaptor frame if it exists.
6735 Label check_frame_marker;
6736 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6737 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6738 __ j(not_equal, &check_frame_marker, Label::kNear);
6739 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6741 // Check the marker in the calling frame.
6742 __ bind(&check_frame_marker);
6743 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6744 Smi::FromInt(StackFrame::CONSTRUCT));
6748 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6749 if (!info()->IsStub()) {
6750 // Ensure that we have enough space after the previous lazy-bailout
6751 // instruction for patching the code here.
6752 int current_pc = masm()->pc_offset();
6753 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6754 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6755 __ Nop(padding_size);
6758 last_lazy_deopt_pc_ = masm()->pc_offset();
6762 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6763 last_lazy_deopt_pc_ = masm()->pc_offset();
6764 ASSERT(instr->HasEnvironment());
6765 LEnvironment* env = instr->environment();
6766 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6767 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6771 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6772 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6773 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6774 // needed return address), even though the implementation of LAZY and EAGER is
6775 // now identical. When LAZY is eventually completely folded into EAGER, remove
6776 // the special case below.
6777 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6778 type = Deoptimizer::LAZY;
6781 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6782 DeoptimizeIf(no_condition, instr->environment(), type);
6786 void LCodeGen::DoDummy(LDummy* instr) {
6787 // Nothing to see here, move on!
6791 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6792 // Nothing to see here, move on!
6796 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6797 PushSafepointRegistersScope scope(this);
6798 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6799 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6800 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
6801 ASSERT(instr->HasEnvironment());
6802 LEnvironment* env = instr->environment();
6803 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6807 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6808 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6810 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6811 : LDeferredCode(codegen), instr_(instr) { }
6812 virtual void Generate() V8_OVERRIDE {
6813 codegen()->DoDeferredStackCheck(instr_);
6815 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6817 LStackCheck* instr_;
6820 ASSERT(instr->HasEnvironment());
6821 LEnvironment* env = instr->environment();
6822 // There is no LLazyBailout instruction for stack-checks. We have to
6823 // prepare for lazy deoptimization explicitly here.
6824 if (instr->hydrogen()->is_function_entry()) {
6825 // Perform stack overflow check.
6827 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6828 __ j(above_equal, &done, Label::kNear);
6830 ASSERT(instr->context()->IsRegister());
6831 ASSERT(ToRegister(instr->context()).is(rsi));
6832 CallCode(isolate()->builtins()->StackCheck(),
6833 RelocInfo::CODE_TARGET,
6837 ASSERT(instr->hydrogen()->is_backwards_branch());
6838 // Perform stack overflow check if this goto needs it before jumping.
6839 DeferredStackCheck* deferred_stack_check =
6840 new(zone()) DeferredStackCheck(this, instr);
6841 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6842 __ j(below, deferred_stack_check->entry());
6843 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6844 __ bind(instr->done_label());
6845 deferred_stack_check->SetExit(instr->done_label());
6846 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6847 // Don't record a deoptimization index for the safepoint here.
6848 // This will be done explicitly when emitting call and the safepoint in
6849 // the deferred code.
6854 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6855 // This is a pseudo-instruction that ensures that the environment here is
6856 // properly registered for deoptimization and records the assembler's PC
6858 LEnvironment* environment = instr->environment();
6860 // If the environment were already registered, we would have no way of
6861 // backpatching it with the spill slot operands.
6862 ASSERT(!environment->HasBeenRegistered());
6863 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6865 GenerateOsrPrologue();
6869 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6870 ASSERT(ToRegister(instr->context()).is(rsi));
6871 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
6872 DeoptimizeIf(equal, instr->environment());
6874 Register null_value = rdi;
6875 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6876 __ cmpp(rax, null_value);
6877 DeoptimizeIf(equal, instr->environment());
6879 Condition cc = masm()->CheckSmi(rax);
6880 DeoptimizeIf(cc, instr->environment());
6882 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6883 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
6884 DeoptimizeIf(below_equal, instr->environment());
6886 Label use_cache, call_runtime;
6887 __ CheckEnumCache(null_value, &call_runtime);
6889 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
6890 __ jmp(&use_cache, Label::kNear);
6892 // Get the set of properties to enumerate.
6893 __ bind(&call_runtime);
6895 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6897 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
6898 Heap::kMetaMapRootIndex);
6899 DeoptimizeIf(not_equal, instr->environment());
6900 __ bind(&use_cache);
6904 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6905 Register map = ToRegister(instr->map());
6906 Register result = ToRegister(instr->result());
6907 Label load_cache, done;
6908 __ EnumLength(result, map);
6909 __ Cmp(result, Smi::FromInt(0));
6910 __ j(not_equal, &load_cache, Label::kNear);
6911 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
6912 __ jmp(&done, Label::kNear);
6913 __ bind(&load_cache);
6914 __ LoadInstanceDescriptors(map, result);
6916 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6918 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6920 Condition cc = masm()->CheckSmi(result);
6921 DeoptimizeIf(cc, instr->environment());
6925 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6926 Register object = ToRegister(instr->value());
6927 __ cmpp(ToRegister(instr->map()),
6928 FieldOperand(object, HeapObject::kMapOffset));
6929 DeoptimizeIf(not_equal, instr->environment());
6933 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6936 PushSafepointRegistersScope scope(this);
6940 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6941 RecordSafepointWithRegisters(
6942 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6943 __ StoreToSafepointRegisterSlot(object, rax);
6947 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6948 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
6950 DeferredLoadMutableDouble(LCodeGen* codegen,
6951 LLoadFieldByIndex* instr,
6954 : LDeferredCode(codegen),
6959 virtual void Generate() V8_OVERRIDE {
6960 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6962 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6964 LLoadFieldByIndex* instr_;
6969 Register object = ToRegister(instr->object());
6970 Register index = ToRegister(instr->index());
6972 DeferredLoadMutableDouble* deferred;
6973 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
6975 Label out_of_object, done;
6976 __ Move(kScratchRegister, Smi::FromInt(1));
6977 __ testp(index, kScratchRegister);
6978 __ j(not_zero, deferred->entry());
6980 __ sarp(index, Immediate(1));
6982 __ SmiToInteger32(index, index);
6983 __ cmpl(index, Immediate(0));
6984 __ j(less, &out_of_object, Label::kNear);
6985 __ movp(object, FieldOperand(object,
6988 JSObject::kHeaderSize));
6989 __ jmp(&done, Label::kNear);
6991 __ bind(&out_of_object);
6992 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6994 // Index is now equal to out of object property index plus 1.
6995 __ movp(object, FieldOperand(object,
6998 FixedArray::kHeaderSize - kPointerSize));
6999 __ bind(deferred->exit());
7004 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
7005 Register context = ToRegister(instr->context());
7006 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
7010 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
7011 Handle<ScopeInfo> scope_info = instr->scope_info();
7012 __ Push(scope_info);
7013 __ Push(ToRegister(instr->function()));
7014 CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
7015 RecordSafepoint(Safepoint::kNoLazyDeopt);
7021 } } // namespace v8::internal
7023 #endif // V8_TARGET_ARCH_X64