1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
16 #include "src/x64/lithium-codegen-x64.h"
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
26 SafepointGenerator(LCodeGen* codegen,
27 LPointerMap* pointers,
28 Safepoint::DeoptMode mode)
32 virtual ~SafepointGenerator() {}
34 void BeforeCall(int call_size) const override {}
36 void AfterCall() const override {
37 codegen_->RecordSafepoint(pointers_, deopt_mode_);
42 LPointerMap* pointers_;
43 Safepoint::DeoptMode deopt_mode_;
49 bool LCodeGen::GenerateCode() {
50 LPhase phase("Z_Code generation", chunk());
54 // Open a frame scope to indicate that there is a frame on the stack. The
55 // MANUAL indicates that the scope shouldn't actually generate code to set up
56 // the frame (that is done in GeneratePrologue).
57 FrameScope frame_scope(masm_, StackFrame::MANUAL);
59 return GeneratePrologue() &&
61 GenerateDeferredCode() &&
62 GenerateJumpTable() &&
63 GenerateSafepointTable();
67 void LCodeGen::FinishCode(Handle<Code> code) {
69 code->set_stack_slots(GetStackSlotCount());
70 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71 PopulateDeoptimizationData(code);
76 void LCodeGen::MakeSureStackPagesMapped(int offset) {
77 const int kPageSize = 4 * KB;
78 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
79 __ movp(Operand(rsp, offset), rax);
85 void LCodeGen::SaveCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Save clobbered callee double registers");
90 BitVector* doubles = chunk()->allocated_double_registers();
91 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ movsd(MemOperand(rsp, count * kDoubleSize),
94 XMMRegister::FromAllocationIndex(save_iterator.Current()));
95 save_iterator.Advance();
101 void LCodeGen::RestoreCallerDoubles() {
102 DCHECK(info()->saves_caller_doubles());
103 DCHECK(NeedsEagerFrame());
104 Comment(";;; Restore clobbered callee double registers");
105 BitVector* doubles = chunk()->allocated_double_registers();
106 BitVector::Iterator save_iterator(doubles);
108 while (!save_iterator.Done()) {
109 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
110 MemOperand(rsp, count * kDoubleSize));
111 save_iterator.Advance();
117 bool LCodeGen::GeneratePrologue() {
118 DCHECK(is_generating());
120 if (info()->IsOptimizing()) {
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
124 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
130 // Sloppy mode functions need to replace the receiver with the global proxy
131 // when called as functions (without an explicit receiver object).
132 if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
133 !info()->is_native() && info()->scope()->has_this_declaration()) {
135 StackArgumentsAccessor args(rsp, scope()->num_parameters());
136 __ movp(rcx, args.GetReceiverOperand());
138 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
139 __ j(not_equal, &ok, Label::kNear);
141 __ movp(rcx, GlobalObjectOperand());
142 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
144 __ movp(args.GetReceiverOperand(), rcx);
150 info()->set_prologue_offset(masm_->pc_offset());
151 if (NeedsEagerFrame()) {
152 DCHECK(!frame_is_built_);
153 frame_is_built_ = true;
154 if (info()->IsStub()) {
157 __ Prologue(info()->IsCodePreAgingActive());
159 info()->AddNoFrameRange(0, masm_->pc_offset());
162 // Reserve space for the stack slots needed by the code.
163 int slots = GetStackSlotCount();
165 if (FLAG_debug_code) {
166 __ subp(rsp, Immediate(slots * kPointerSize));
168 MakeSureStackPagesMapped(slots * kPointerSize);
172 __ Set(kScratchRegister, kSlotsZapValue);
175 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
178 __ j(not_zero, &loop);
181 __ subp(rsp, Immediate(slots * kPointerSize));
183 MakeSureStackPagesMapped(slots * kPointerSize);
187 if (info()->saves_caller_doubles()) {
192 // Possibly allocate a local context.
193 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
194 if (heap_slots > 0) {
195 Comment(";;; Allocate local context");
196 bool need_write_barrier = true;
197 // Argument to NewContext is the function, which is still in rdi.
198 DCHECK(!info()->scope()->is_script_scope());
199 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
200 FastNewContextStub stub(isolate(), heap_slots);
202 // Result of FastNewContextStub is always in new space.
203 need_write_barrier = false;
206 __ CallRuntime(Runtime::kNewFunctionContext, 1);
208 RecordSafepoint(Safepoint::kNoLazyDeopt);
209 // Context is returned in rax. It replaces the context passed to us.
210 // It's saved in the stack and kept live in rsi.
212 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
214 // Copy any necessary parameters into the context.
215 int num_parameters = scope()->num_parameters();
216 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
217 for (int i = first_parameter; i < num_parameters; i++) {
218 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
219 if (var->IsContextSlot()) {
220 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
221 (num_parameters - 1 - i) * kPointerSize;
222 // Load parameter from stack.
223 __ movp(rax, Operand(rbp, parameter_offset));
224 // Store it in the context.
225 int context_offset = Context::SlotOffset(var->index());
226 __ movp(Operand(rsi, context_offset), rax);
227 // Update the write barrier. This clobbers rax and rbx.
228 if (need_write_barrier) {
229 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
230 } else if (FLAG_debug_code) {
232 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
233 __ Abort(kExpectedNewSpaceObject);
238 Comment(";;; End allocate local context");
242 if (FLAG_trace && info()->IsOptimizing()) {
243 __ CallRuntime(Runtime::kTraceEnter, 0);
245 return !is_aborted();
249 void LCodeGen::GenerateOsrPrologue() {
250 // Generate the OSR entry prologue at the first unknown OSR value, or if there
251 // are none, at the OSR entrypoint instruction.
252 if (osr_pc_offset_ >= 0) return;
254 osr_pc_offset_ = masm()->pc_offset();
256 // Adjust the frame size, subsuming the unoptimized frame into the
258 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
260 __ subp(rsp, Immediate(slots * kPointerSize));
264 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
265 if (instr->IsCall()) {
266 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
268 if (!instr->IsLazyBailout() && !instr->IsGap()) {
269 safepoints_.BumpLastLazySafepointIndex();
274 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
275 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
276 instr->hydrogen_value()->representation().IsInteger32() &&
277 instr->result()->IsRegister()) {
278 __ AssertZeroExtended(ToRegister(instr->result()));
281 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
282 // We sign extend the dehoisted key at the definition point when the pointer
283 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
284 // points and MustSignExtendResult is always false. We can't use
285 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
286 DCHECK(kPointerSize == kInt64Size);
287 if (instr->result()->IsRegister()) {
288 Register result_reg = ToRegister(instr->result());
289 __ movsxlq(result_reg, result_reg);
291 // Sign extend the 32bit result in the stack slots.
292 DCHECK(instr->result()->IsStackSlot());
293 Operand src = ToOperand(instr->result());
294 __ movsxlq(kScratchRegister, src);
295 __ movq(src, kScratchRegister);
301 bool LCodeGen::GenerateJumpTable() {
302 if (jump_table_.length() == 0) return !is_aborted();
305 Comment(";;; -------------------- Jump table --------------------");
306 for (int i = 0; i < jump_table_.length(); i++) {
307 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
308 __ bind(&table_entry->label);
309 Address entry = table_entry->address;
310 DeoptComment(table_entry->deopt_info);
311 if (table_entry->needs_frame) {
312 DCHECK(!info()->saves_caller_doubles());
313 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
314 __ call(&needs_frame);
316 if (info()->saves_caller_doubles()) {
317 DCHECK(info()->IsStub());
318 RestoreCallerDoubles();
320 __ call(entry, RelocInfo::RUNTIME_ENTRY);
322 info()->LogDeoptCallPosition(masm()->pc_offset(),
323 table_entry->deopt_info.inlining_id);
326 if (needs_frame.is_linked()) {
327 __ bind(&needs_frame);
329 4: return address <-- rsp
335 // Reserve space for context and stub marker.
336 __ subp(rsp, Immediate(2 * kPointerSize));
337 __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
338 __ Push(kScratchRegister); // Save entry address for ret(0)
345 0: entry address <-- rsp
348 // Remember context pointer.
349 __ movp(kScratchRegister,
350 MemOperand(rbp, StandardFrameConstants::kContextOffset));
351 // Save context pointer into the stack frame.
352 __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
354 // Create a stack frame.
355 __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
356 __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
358 // This variant of deopt can only be used with stubs. Since we don't
359 // have a function pointer to install in the stack frame that we're
360 // building, install a special marker there instead.
361 DCHECK(info()->IsStub());
362 __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
369 0: entry address <-- rsp
374 return !is_aborted();
378 bool LCodeGen::GenerateDeferredCode() {
379 DCHECK(is_generating());
380 if (deferred_.length() > 0) {
381 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
382 LDeferredCode* code = deferred_[i];
385 instructions_->at(code->instruction_index())->hydrogen_value();
386 RecordAndWritePosition(
387 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
389 Comment(";;; <@%d,#%d> "
390 "-------------------- Deferred %s --------------------",
391 code->instruction_index(),
392 code->instr()->hydrogen_value()->id(),
393 code->instr()->Mnemonic());
394 __ bind(code->entry());
395 if (NeedsDeferredFrame()) {
396 Comment(";;; Build frame");
397 DCHECK(!frame_is_built_);
398 DCHECK(info()->IsStub());
399 frame_is_built_ = true;
400 // Build the frame in such a way that esi isn't trashed.
401 __ pushq(rbp); // Caller's frame pointer.
402 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
403 __ Push(Smi::FromInt(StackFrame::STUB));
404 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
405 Comment(";;; Deferred code");
408 if (NeedsDeferredFrame()) {
409 __ bind(code->done());
410 Comment(";;; Destroy frame");
411 DCHECK(frame_is_built_);
412 frame_is_built_ = false;
416 __ jmp(code->exit());
420 // Deferred code is the last part of the instruction sequence. Mark
421 // the generated code as done unless we bailed out.
422 if (!is_aborted()) status_ = DONE;
423 return !is_aborted();
427 bool LCodeGen::GenerateSafepointTable() {
429 safepoints_.Emit(masm(), GetStackSlotCount());
430 return !is_aborted();
434 Register LCodeGen::ToRegister(int index) const {
435 return Register::FromAllocationIndex(index);
439 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
440 return XMMRegister::FromAllocationIndex(index);
444 Register LCodeGen::ToRegister(LOperand* op) const {
445 DCHECK(op->IsRegister());
446 return ToRegister(op->index());
450 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
451 DCHECK(op->IsDoubleRegister());
452 return ToDoubleRegister(op->index());
456 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
457 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
461 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
462 return chunk_->LookupLiteralRepresentation(op).IsExternal();
466 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
467 return op->IsConstantOperand() &&
468 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
472 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
473 return chunk_->LookupLiteralRepresentation(op).IsSmi();
477 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
478 return ToRepresentation(op, Representation::Integer32());
482 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
483 const Representation& r) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 int32_t value = constant->Integer32Value();
486 if (r.IsInteger32()) return value;
487 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
488 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
492 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
493 HConstant* constant = chunk_->LookupConstant(op);
494 return Smi::FromInt(constant->Integer32Value());
498 double LCodeGen::ToDouble(LConstantOperand* op) const {
499 HConstant* constant = chunk_->LookupConstant(op);
500 DCHECK(constant->HasDoubleValue());
501 return constant->DoubleValue();
505 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
506 HConstant* constant = chunk_->LookupConstant(op);
507 DCHECK(constant->HasExternalReferenceValue());
508 return constant->ExternalReferenceValue();
512 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
513 HConstant* constant = chunk_->LookupConstant(op);
514 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
515 return constant->handle(isolate());
519 static int ArgumentsOffsetWithoutFrame(int index) {
521 return -(index + 1) * kPointerSize + kPCOnStackSize;
525 Operand LCodeGen::ToOperand(LOperand* op) const {
526 // Does not handle registers. In X64 assembler, plain registers are not
527 // representable as an Operand.
528 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
529 if (NeedsEagerFrame()) {
530 return Operand(rbp, StackSlotOffset(op->index()));
532 // Retrieve parameter without eager stack-frame relative to the
534 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
539 void LCodeGen::WriteTranslation(LEnvironment* environment,
540 Translation* translation) {
541 if (environment == NULL) return;
543 // The translation includes one command per value in the environment.
544 int translation_size = environment->translation_size();
546 WriteTranslation(environment->outer(), translation);
547 WriteTranslationFrame(environment, translation);
549 int object_index = 0;
550 int dematerialized_index = 0;
551 for (int i = 0; i < translation_size; ++i) {
552 LOperand* value = environment->values()->at(i);
554 environment, translation, value, environment->HasTaggedValueAt(i),
555 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
560 void LCodeGen::AddToTranslation(LEnvironment* environment,
561 Translation* translation,
565 int* object_index_pointer,
566 int* dematerialized_index_pointer) {
567 if (op == LEnvironment::materialization_marker()) {
568 int object_index = (*object_index_pointer)++;
569 if (environment->ObjectIsDuplicateAt(object_index)) {
570 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
571 translation->DuplicateObject(dupe_of);
574 int object_length = environment->ObjectLengthAt(object_index);
575 if (environment->ObjectIsArgumentsAt(object_index)) {
576 translation->BeginArgumentsObject(object_length);
578 translation->BeginCapturedObject(object_length);
580 int dematerialized_index = *dematerialized_index_pointer;
581 int env_offset = environment->translation_size() + dematerialized_index;
582 *dematerialized_index_pointer += object_length;
583 for (int i = 0; i < object_length; ++i) {
584 LOperand* value = environment->values()->at(env_offset + i);
585 AddToTranslation(environment,
588 environment->HasTaggedValueAt(env_offset + i),
589 environment->HasUint32ValueAt(env_offset + i),
590 object_index_pointer,
591 dematerialized_index_pointer);
596 if (op->IsStackSlot()) {
598 translation->StoreStackSlot(op->index());
599 } else if (is_uint32) {
600 translation->StoreUint32StackSlot(op->index());
602 translation->StoreInt32StackSlot(op->index());
604 } else if (op->IsDoubleStackSlot()) {
605 translation->StoreDoubleStackSlot(op->index());
606 } else if (op->IsRegister()) {
607 Register reg = ToRegister(op);
609 translation->StoreRegister(reg);
610 } else if (is_uint32) {
611 translation->StoreUint32Register(reg);
613 translation->StoreInt32Register(reg);
615 } else if (op->IsDoubleRegister()) {
616 XMMRegister reg = ToDoubleRegister(op);
617 translation->StoreDoubleRegister(reg);
618 } else if (op->IsConstantOperand()) {
619 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
620 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
621 translation->StoreLiteral(src_index);
628 void LCodeGen::CallCodeGeneric(Handle<Code> code,
629 RelocInfo::Mode mode,
631 SafepointMode safepoint_mode,
633 DCHECK(instr != NULL);
635 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
637 // Signal that we don't inline smi code before these stubs in the
638 // optimizing code generator.
639 if (code->kind() == Code::BINARY_OP_IC ||
640 code->kind() == Code::COMPARE_IC) {
646 void LCodeGen::CallCode(Handle<Code> code,
647 RelocInfo::Mode mode,
648 LInstruction* instr) {
649 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
653 void LCodeGen::CallRuntime(const Runtime::Function* function,
656 SaveFPRegsMode save_doubles) {
657 DCHECK(instr != NULL);
658 DCHECK(instr->HasPointerMap());
660 __ CallRuntime(function, num_arguments, save_doubles);
662 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
666 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
667 if (context->IsRegister()) {
668 if (!ToRegister(context).is(rsi)) {
669 __ movp(rsi, ToRegister(context));
671 } else if (context->IsStackSlot()) {
672 __ movp(rsi, ToOperand(context));
673 } else if (context->IsConstantOperand()) {
674 HConstant* constant =
675 chunk_->LookupConstant(LConstantOperand::cast(context));
676 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
684 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
688 LoadContextFromDeferred(context);
690 __ CallRuntimeSaveDoubles(id);
691 RecordSafepointWithRegisters(
692 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
696 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
697 Safepoint::DeoptMode mode) {
698 environment->set_has_been_used();
699 if (!environment->HasBeenRegistered()) {
700 // Physical stack frame layout:
701 // -x ............. -4 0 ..................................... y
702 // [incoming arguments] [spill slots] [pushed outgoing arguments]
704 // Layout of the environment:
705 // 0 ..................................................... size-1
706 // [parameters] [locals] [expression stack including arguments]
708 // Layout of the translation:
709 // 0 ........................................................ size - 1 + 4
710 // [expression stack including arguments] [locals] [4 words] [parameters]
711 // |>------------ translation_size ------------<|
714 int jsframe_count = 0;
715 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
717 if (e->frame_type() == JS_FUNCTION) {
721 Translation translation(&translations_, frame_count, jsframe_count, zone());
722 WriteTranslation(environment, &translation);
723 int deoptimization_index = deoptimizations_.length();
724 int pc_offset = masm()->pc_offset();
725 environment->Register(deoptimization_index,
727 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
728 deoptimizations_.Add(environment, environment->zone());
733 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
734 Deoptimizer::DeoptReason deopt_reason,
735 Deoptimizer::BailoutType bailout_type) {
736 LEnvironment* environment = instr->environment();
737 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
738 DCHECK(environment->HasBeenRegistered());
739 int id = environment->deoptimization_index();
740 DCHECK(info()->IsOptimizing() || info()->IsStub());
742 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
744 Abort(kBailoutWasNotPrepared);
748 if (DeoptEveryNTimes()) {
749 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
753 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
754 __ movl(rax, count_operand);
755 __ subl(rax, Immediate(1));
756 __ j(not_zero, &no_deopt, Label::kNear);
757 if (FLAG_trap_on_deopt) __ int3();
758 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
759 __ movl(count_operand, rax);
762 DCHECK(frame_is_built_);
763 __ call(entry, RelocInfo::RUNTIME_ENTRY);
765 __ movl(count_operand, rax);
770 if (info()->ShouldTrapOnDeopt()) {
772 if (cc != no_condition) {
773 __ j(NegateCondition(cc), &done, Label::kNear);
779 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
781 DCHECK(info()->IsStub() || frame_is_built_);
782 // Go through jump table if we need to handle condition, build frame, or
783 // restore caller doubles.
784 if (cc == no_condition && frame_is_built_ &&
785 !info()->saves_caller_doubles()) {
786 DeoptComment(deopt_info);
787 __ call(entry, RelocInfo::RUNTIME_ENTRY);
788 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
790 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
792 // We often have several deopts to the same entry, reuse the last
793 // jump entry if this is the case.
794 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
795 jump_table_.is_empty() ||
796 !table_entry.IsEquivalentTo(jump_table_.last())) {
797 jump_table_.Add(table_entry, zone());
799 if (cc == no_condition) {
800 __ jmp(&jump_table_.last().label);
802 __ j(cc, &jump_table_.last().label);
808 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
809 Deoptimizer::DeoptReason deopt_reason) {
810 Deoptimizer::BailoutType bailout_type = info()->IsStub()
812 : Deoptimizer::EAGER;
813 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
817 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
818 int length = deoptimizations_.length();
819 if (length == 0) return;
820 Handle<DeoptimizationInputData> data =
821 DeoptimizationInputData::New(isolate(), length, TENURED);
823 Handle<ByteArray> translations =
824 translations_.CreateByteArray(isolate()->factory());
825 data->SetTranslationByteArray(*translations);
826 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
827 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
828 if (info_->IsOptimizing()) {
829 // Reference to shared function info does not change between phases.
830 AllowDeferredHandleDereference allow_handle_dereference;
831 data->SetSharedFunctionInfo(*info_->shared_info());
833 data->SetSharedFunctionInfo(Smi::FromInt(0));
835 data->SetWeakCellCache(Smi::FromInt(0));
837 Handle<FixedArray> literals =
838 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
839 { AllowDeferredHandleDereference copy_handles;
840 for (int i = 0; i < deoptimization_literals_.length(); i++) {
841 literals->set(i, *deoptimization_literals_[i]);
843 data->SetLiteralArray(*literals);
846 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
847 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
849 // Populate the deoptimization entries.
850 for (int i = 0; i < length; i++) {
851 LEnvironment* env = deoptimizations_[i];
852 data->SetAstId(i, env->ast_id());
853 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
854 data->SetArgumentsStackHeight(i,
855 Smi::FromInt(env->arguments_stack_height()));
856 data->SetPc(i, Smi::FromInt(env->pc_offset()));
858 code->set_deoptimization_data(*data);
862 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
863 DCHECK_EQ(0, deoptimization_literals_.length());
864 for (auto function : chunk()->inlined_functions()) {
865 DefineDeoptimizationLiteral(function);
867 inlined_function_count_ = deoptimization_literals_.length();
871 void LCodeGen::RecordSafepointWithLazyDeopt(
872 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
873 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
874 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
876 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
877 RecordSafepointWithRegisters(
878 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
883 void LCodeGen::RecordSafepoint(
884 LPointerMap* pointers,
885 Safepoint::Kind kind,
887 Safepoint::DeoptMode deopt_mode) {
888 DCHECK(kind == expected_safepoint_kind_);
890 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
892 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
893 kind, arguments, deopt_mode);
894 for (int i = 0; i < operands->length(); i++) {
895 LOperand* pointer = operands->at(i);
896 if (pointer->IsStackSlot()) {
897 safepoint.DefinePointerSlot(pointer->index(), zone());
898 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
899 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
905 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
906 Safepoint::DeoptMode deopt_mode) {
907 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
911 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
912 LPointerMap empty_pointers(zone());
913 RecordSafepoint(&empty_pointers, deopt_mode);
917 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
919 Safepoint::DeoptMode deopt_mode) {
920 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
924 void LCodeGen::RecordAndWritePosition(int position) {
925 if (position == RelocInfo::kNoPosition) return;
926 masm()->positions_recorder()->RecordPosition(position);
927 masm()->positions_recorder()->WriteRecordedPositions();
931 static const char* LabelType(LLabel* label) {
932 if (label->is_loop_header()) return " (loop header)";
933 if (label->is_osr_entry()) return " (OSR entry)";
938 void LCodeGen::DoLabel(LLabel* label) {
939 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
940 current_instruction_,
941 label->hydrogen_value()->id(),
944 __ bind(label->label());
945 current_block_ = label->block_id();
950 void LCodeGen::DoParallelMove(LParallelMove* move) {
951 resolver_.Resolve(move);
955 void LCodeGen::DoGap(LGap* gap) {
956 for (int i = LGap::FIRST_INNER_POSITION;
957 i <= LGap::LAST_INNER_POSITION;
959 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
960 LParallelMove* move = gap->GetParallelMove(inner_pos);
961 if (move != NULL) DoParallelMove(move);
966 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
971 void LCodeGen::DoParameter(LParameter* instr) {
976 void LCodeGen::DoCallStub(LCallStub* instr) {
977 DCHECK(ToRegister(instr->context()).is(rsi));
978 DCHECK(ToRegister(instr->result()).is(rax));
979 switch (instr->hydrogen()->major_key()) {
980 case CodeStub::RegExpExec: {
981 RegExpExecStub stub(isolate());
982 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
985 case CodeStub::SubString: {
986 SubStringStub stub(isolate());
987 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
990 case CodeStub::StringCompare: {
991 StringCompareStub stub(isolate());
992 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1001 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1002 GenerateOsrPrologue();
1006 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1007 Register dividend = ToRegister(instr->dividend());
1008 int32_t divisor = instr->divisor();
1009 DCHECK(dividend.is(ToRegister(instr->result())));
1011 // Theoretically, a variation of the branch-free code for integer division by
1012 // a power of 2 (calculating the remainder via an additional multiplication
1013 // (which gets simplified to an 'and') and subtraction) should be faster, and
1014 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1015 // indicate that positive dividends are heavily favored, so the branching
1016 // version performs better.
1017 HMod* hmod = instr->hydrogen();
1018 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1019 Label dividend_is_not_negative, done;
1020 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1021 __ testl(dividend, dividend);
1022 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1023 // Note that this is correct even for kMinInt operands.
1025 __ andl(dividend, Immediate(mask));
1027 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1028 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1030 __ jmp(&done, Label::kNear);
1033 __ bind(÷nd_is_not_negative);
1034 __ andl(dividend, Immediate(mask));
1039 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1040 Register dividend = ToRegister(instr->dividend());
1041 int32_t divisor = instr->divisor();
1042 DCHECK(ToRegister(instr->result()).is(rax));
1045 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1049 __ TruncatingDiv(dividend, Abs(divisor));
1050 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1051 __ movl(rax, dividend);
1054 // Check for negative zero.
1055 HMod* hmod = instr->hydrogen();
1056 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1057 Label remainder_not_zero;
1058 __ j(not_zero, &remainder_not_zero, Label::kNear);
1059 __ cmpl(dividend, Immediate(0));
1060 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1061 __ bind(&remainder_not_zero);
1066 void LCodeGen::DoModI(LModI* instr) {
1067 HMod* hmod = instr->hydrogen();
1069 Register left_reg = ToRegister(instr->left());
1070 DCHECK(left_reg.is(rax));
1071 Register right_reg = ToRegister(instr->right());
1072 DCHECK(!right_reg.is(rax));
1073 DCHECK(!right_reg.is(rdx));
1074 Register result_reg = ToRegister(instr->result());
1075 DCHECK(result_reg.is(rdx));
1078 // Check for x % 0, idiv would signal a divide error. We have to
1079 // deopt in this case because we can't return a NaN.
1080 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1081 __ testl(right_reg, right_reg);
1082 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1085 // Check for kMinInt % -1, idiv would signal a divide error. We
1086 // have to deopt if we care about -0, because we can't return that.
1087 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1088 Label no_overflow_possible;
1089 __ cmpl(left_reg, Immediate(kMinInt));
1090 __ j(not_zero, &no_overflow_possible, Label::kNear);
1091 __ cmpl(right_reg, Immediate(-1));
1092 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1093 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1095 __ j(not_equal, &no_overflow_possible, Label::kNear);
1096 __ Set(result_reg, 0);
1097 __ jmp(&done, Label::kNear);
1099 __ bind(&no_overflow_possible);
1102 // Sign extend dividend in eax into edx:eax, since we are using only the low
1103 // 32 bits of the values.
1106 // If we care about -0, test if the dividend is <0 and the result is 0.
1107 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1108 Label positive_left;
1109 __ testl(left_reg, left_reg);
1110 __ j(not_sign, &positive_left, Label::kNear);
1111 __ idivl(right_reg);
1112 __ testl(result_reg, result_reg);
1113 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1114 __ jmp(&done, Label::kNear);
1115 __ bind(&positive_left);
1117 __ idivl(right_reg);
1122 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1123 Register dividend = ToRegister(instr->dividend());
1124 int32_t divisor = instr->divisor();
1125 DCHECK(dividend.is(ToRegister(instr->result())));
1127 // If the divisor is positive, things are easy: There can be no deopts and we
1128 // can simply do an arithmetic right shift.
1129 if (divisor == 1) return;
1130 int32_t shift = WhichPowerOf2Abs(divisor);
1132 __ sarl(dividend, Immediate(shift));
1136 // If the divisor is negative, we have to negate and handle edge cases.
1138 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1142 // Dividing by -1 is basically negation, unless we overflow.
1143 if (divisor == -1) {
1144 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1145 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1150 // If the negation could not overflow, simply shifting is OK.
1151 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1152 __ sarl(dividend, Immediate(shift));
1156 Label not_kmin_int, done;
1157 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1158 __ movl(dividend, Immediate(kMinInt / divisor));
1159 __ jmp(&done, Label::kNear);
1160 __ bind(¬_kmin_int);
1161 __ sarl(dividend, Immediate(shift));
1166 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1167 Register dividend = ToRegister(instr->dividend());
1168 int32_t divisor = instr->divisor();
1169 DCHECK(ToRegister(instr->result()).is(rdx));
1172 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1176 // Check for (0 / -x) that will produce negative zero.
1177 HMathFloorOfDiv* hdiv = instr->hydrogen();
1178 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1179 __ testl(dividend, dividend);
1180 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1183 // Easy case: We need no dynamic check for the dividend and the flooring
1184 // division is the same as the truncating division.
1185 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1186 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1187 __ TruncatingDiv(dividend, Abs(divisor));
1188 if (divisor < 0) __ negl(rdx);
1192 // In the general case we may need to adjust before and after the truncating
1193 // division to get a flooring division.
1194 Register temp = ToRegister(instr->temp3());
1195 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1196 Label needs_adjustment, done;
1197 __ cmpl(dividend, Immediate(0));
1198 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1199 __ TruncatingDiv(dividend, Abs(divisor));
1200 if (divisor < 0) __ negl(rdx);
1201 __ jmp(&done, Label::kNear);
1202 __ bind(&needs_adjustment);
1203 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1204 __ TruncatingDiv(temp, Abs(divisor));
1205 if (divisor < 0) __ negl(rdx);
1211 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1212 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1213 HBinaryOperation* hdiv = instr->hydrogen();
1214 Register dividend = ToRegister(instr->dividend());
1215 Register divisor = ToRegister(instr->divisor());
1216 Register remainder = ToRegister(instr->temp());
1217 Register result = ToRegister(instr->result());
1218 DCHECK(dividend.is(rax));
1219 DCHECK(remainder.is(rdx));
1220 DCHECK(result.is(rax));
1221 DCHECK(!divisor.is(rax));
1222 DCHECK(!divisor.is(rdx));
1225 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1226 __ testl(divisor, divisor);
1227 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1230 // Check for (0 / -x) that will produce negative zero.
1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1232 Label dividend_not_zero;
1233 __ testl(dividend, dividend);
1234 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1235 __ testl(divisor, divisor);
1236 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1237 __ bind(÷nd_not_zero);
1240 // Check for (kMinInt / -1).
1241 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1242 Label dividend_not_min_int;
1243 __ cmpl(dividend, Immediate(kMinInt));
1244 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1245 __ cmpl(divisor, Immediate(-1));
1246 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1247 __ bind(÷nd_not_min_int);
1250 // Sign extend to rdx (= remainder).
1255 __ testl(remainder, remainder);
1256 __ j(zero, &done, Label::kNear);
1257 __ xorl(remainder, divisor);
1258 __ sarl(remainder, Immediate(31));
1259 __ addl(result, remainder);
1264 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1265 Register dividend = ToRegister(instr->dividend());
1266 int32_t divisor = instr->divisor();
1267 Register result = ToRegister(instr->result());
1268 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1269 DCHECK(!result.is(dividend));
1271 // Check for (0 / -x) that will produce negative zero.
1272 HDiv* hdiv = instr->hydrogen();
1273 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1274 __ testl(dividend, dividend);
1275 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1277 // Check for (kMinInt / -1).
1278 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1279 __ cmpl(dividend, Immediate(kMinInt));
1280 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1282 // Deoptimize if remainder will not be 0.
1283 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1284 divisor != 1 && divisor != -1) {
1285 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1286 __ testl(dividend, Immediate(mask));
1287 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1289 __ Move(result, dividend);
1290 int32_t shift = WhichPowerOf2Abs(divisor);
1292 // The arithmetic shift is always OK, the 'if' is an optimization only.
1293 if (shift > 1) __ sarl(result, Immediate(31));
1294 __ shrl(result, Immediate(32 - shift));
1295 __ addl(result, dividend);
1296 __ sarl(result, Immediate(shift));
1298 if (divisor < 0) __ negl(result);
1302 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1303 Register dividend = ToRegister(instr->dividend());
1304 int32_t divisor = instr->divisor();
1305 DCHECK(ToRegister(instr->result()).is(rdx));
1308 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1312 // Check for (0 / -x) that will produce negative zero.
1313 HDiv* hdiv = instr->hydrogen();
1314 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1315 __ testl(dividend, dividend);
1316 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1319 __ TruncatingDiv(dividend, Abs(divisor));
1320 if (divisor < 0) __ negl(rdx);
1322 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1324 __ imull(rax, rax, Immediate(divisor));
1325 __ subl(rax, dividend);
1326 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1331 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1332 void LCodeGen::DoDivI(LDivI* instr) {
1333 HBinaryOperation* hdiv = instr->hydrogen();
1334 Register dividend = ToRegister(instr->dividend());
1335 Register divisor = ToRegister(instr->divisor());
1336 Register remainder = ToRegister(instr->temp());
1337 DCHECK(dividend.is(rax));
1338 DCHECK(remainder.is(rdx));
1339 DCHECK(ToRegister(instr->result()).is(rax));
1340 DCHECK(!divisor.is(rax));
1341 DCHECK(!divisor.is(rdx));
1344 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1345 __ testl(divisor, divisor);
1346 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1349 // Check for (0 / -x) that will produce negative zero.
1350 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1351 Label dividend_not_zero;
1352 __ testl(dividend, dividend);
1353 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1354 __ testl(divisor, divisor);
1355 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1356 __ bind(÷nd_not_zero);
1359 // Check for (kMinInt / -1).
1360 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1361 Label dividend_not_min_int;
1362 __ cmpl(dividend, Immediate(kMinInt));
1363 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1364 __ cmpl(divisor, Immediate(-1));
1365 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1366 __ bind(÷nd_not_min_int);
1369 // Sign extend to rdx (= remainder).
1373 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1374 // Deoptimize if remainder is not 0.
1375 __ testl(remainder, remainder);
1376 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1381 void LCodeGen::DoMulI(LMulI* instr) {
1382 Register left = ToRegister(instr->left());
1383 LOperand* right = instr->right();
1385 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1386 if (instr->hydrogen_value()->representation().IsSmi()) {
1387 __ movp(kScratchRegister, left);
1389 __ movl(kScratchRegister, left);
1394 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1395 if (right->IsConstantOperand()) {
1396 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1397 if (right_value == -1) {
1399 } else if (right_value == 0) {
1400 __ xorl(left, left);
1401 } else if (right_value == 2) {
1402 __ addl(left, left);
1403 } else if (!can_overflow) {
1404 // If the multiplication is known to not overflow, we
1405 // can use operations that don't set the overflow flag
1407 switch (right_value) {
1412 __ leal(left, Operand(left, left, times_2, 0));
1415 __ shll(left, Immediate(2));
1418 __ leal(left, Operand(left, left, times_4, 0));
1421 __ shll(left, Immediate(3));
1424 __ leal(left, Operand(left, left, times_8, 0));
1427 __ shll(left, Immediate(4));
1430 __ imull(left, left, Immediate(right_value));
1434 __ imull(left, left, Immediate(right_value));
1436 } else if (right->IsStackSlot()) {
1437 if (instr->hydrogen_value()->representation().IsSmi()) {
1438 __ SmiToInteger64(left, left);
1439 __ imulp(left, ToOperand(right));
1441 __ imull(left, ToOperand(right));
1444 if (instr->hydrogen_value()->representation().IsSmi()) {
1445 __ SmiToInteger64(left, left);
1446 __ imulp(left, ToRegister(right));
1448 __ imull(left, ToRegister(right));
1453 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1456 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1457 // Bail out if the result is supposed to be negative zero.
1459 if (instr->hydrogen_value()->representation().IsSmi()) {
1460 __ testp(left, left);
1462 __ testl(left, left);
1464 __ j(not_zero, &done, Label::kNear);
1465 if (right->IsConstantOperand()) {
1466 // Constant can't be represented as 32-bit Smi due to immediate size
1468 DCHECK(SmiValuesAre32Bits()
1469 ? !instr->hydrogen_value()->representation().IsSmi()
1470 : SmiValuesAre31Bits());
1471 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1472 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1473 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1474 __ cmpl(kScratchRegister, Immediate(0));
1475 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1477 } else if (right->IsStackSlot()) {
1478 if (instr->hydrogen_value()->representation().IsSmi()) {
1479 __ orp(kScratchRegister, ToOperand(right));
1481 __ orl(kScratchRegister, ToOperand(right));
1483 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1485 // Test the non-zero operand for negative sign.
1486 if (instr->hydrogen_value()->representation().IsSmi()) {
1487 __ orp(kScratchRegister, ToRegister(right));
1489 __ orl(kScratchRegister, ToRegister(right));
1491 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1498 void LCodeGen::DoBitI(LBitI* instr) {
1499 LOperand* left = instr->left();
1500 LOperand* right = instr->right();
1501 DCHECK(left->Equals(instr->result()));
1502 DCHECK(left->IsRegister());
1504 if (right->IsConstantOperand()) {
1505 int32_t right_operand =
1506 ToRepresentation(LConstantOperand::cast(right),
1507 instr->hydrogen()->right()->representation());
1508 switch (instr->op()) {
1509 case Token::BIT_AND:
1510 __ andl(ToRegister(left), Immediate(right_operand));
1513 __ orl(ToRegister(left), Immediate(right_operand));
1515 case Token::BIT_XOR:
1516 if (right_operand == int32_t(~0)) {
1517 __ notl(ToRegister(left));
1519 __ xorl(ToRegister(left), Immediate(right_operand));
1526 } else if (right->IsStackSlot()) {
1527 switch (instr->op()) {
1528 case Token::BIT_AND:
1529 if (instr->IsInteger32()) {
1530 __ andl(ToRegister(left), ToOperand(right));
1532 __ andp(ToRegister(left), ToOperand(right));
1536 if (instr->IsInteger32()) {
1537 __ orl(ToRegister(left), ToOperand(right));
1539 __ orp(ToRegister(left), ToOperand(right));
1542 case Token::BIT_XOR:
1543 if (instr->IsInteger32()) {
1544 __ xorl(ToRegister(left), ToOperand(right));
1546 __ xorp(ToRegister(left), ToOperand(right));
1554 DCHECK(right->IsRegister());
1555 switch (instr->op()) {
1556 case Token::BIT_AND:
1557 if (instr->IsInteger32()) {
1558 __ andl(ToRegister(left), ToRegister(right));
1560 __ andp(ToRegister(left), ToRegister(right));
1564 if (instr->IsInteger32()) {
1565 __ orl(ToRegister(left), ToRegister(right));
1567 __ orp(ToRegister(left), ToRegister(right));
1570 case Token::BIT_XOR:
1571 if (instr->IsInteger32()) {
1572 __ xorl(ToRegister(left), ToRegister(right));
1574 __ xorp(ToRegister(left), ToRegister(right));
1585 void LCodeGen::DoShiftI(LShiftI* instr) {
1586 LOperand* left = instr->left();
1587 LOperand* right = instr->right();
1588 DCHECK(left->Equals(instr->result()));
1589 DCHECK(left->IsRegister());
1590 if (right->IsRegister()) {
1591 DCHECK(ToRegister(right).is(rcx));
1593 switch (instr->op()) {
1595 __ rorl_cl(ToRegister(left));
1598 __ sarl_cl(ToRegister(left));
1601 __ shrl_cl(ToRegister(left));
1602 if (instr->can_deopt()) {
1603 __ testl(ToRegister(left), ToRegister(left));
1604 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1608 __ shll_cl(ToRegister(left));
1615 int32_t value = ToInteger32(LConstantOperand::cast(right));
1616 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1617 switch (instr->op()) {
1619 if (shift_count != 0) {
1620 __ rorl(ToRegister(left), Immediate(shift_count));
1624 if (shift_count != 0) {
1625 __ sarl(ToRegister(left), Immediate(shift_count));
1629 if (shift_count != 0) {
1630 __ shrl(ToRegister(left), Immediate(shift_count));
1631 } else if (instr->can_deopt()) {
1632 __ testl(ToRegister(left), ToRegister(left));
1633 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1637 if (shift_count != 0) {
1638 if (instr->hydrogen_value()->representation().IsSmi()) {
1639 if (SmiValuesAre32Bits()) {
1640 __ shlp(ToRegister(left), Immediate(shift_count));
1642 DCHECK(SmiValuesAre31Bits());
1643 if (instr->can_deopt()) {
1644 if (shift_count != 1) {
1645 __ shll(ToRegister(left), Immediate(shift_count - 1));
1647 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1648 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1650 __ shll(ToRegister(left), Immediate(shift_count));
1654 __ shll(ToRegister(left), Immediate(shift_count));
1666 void LCodeGen::DoSubI(LSubI* instr) {
1667 LOperand* left = instr->left();
1668 LOperand* right = instr->right();
1669 DCHECK(left->Equals(instr->result()));
1671 if (right->IsConstantOperand()) {
1672 int32_t right_operand =
1673 ToRepresentation(LConstantOperand::cast(right),
1674 instr->hydrogen()->right()->representation());
1675 __ subl(ToRegister(left), Immediate(right_operand));
1676 } else if (right->IsRegister()) {
1677 if (instr->hydrogen_value()->representation().IsSmi()) {
1678 __ subp(ToRegister(left), ToRegister(right));
1680 __ subl(ToRegister(left), ToRegister(right));
1683 if (instr->hydrogen_value()->representation().IsSmi()) {
1684 __ subp(ToRegister(left), ToOperand(right));
1686 __ subl(ToRegister(left), ToOperand(right));
1690 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1691 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1696 void LCodeGen::DoConstantI(LConstantI* instr) {
1697 Register dst = ToRegister(instr->result());
1698 if (instr->value() == 0) {
1701 __ movl(dst, Immediate(instr->value()));
1706 void LCodeGen::DoConstantS(LConstantS* instr) {
1707 __ Move(ToRegister(instr->result()), instr->value());
1711 void LCodeGen::DoConstantD(LConstantD* instr) {
1712 __ Move(ToDoubleRegister(instr->result()), instr->bits());
1716 void LCodeGen::DoConstantE(LConstantE* instr) {
1717 __ LoadAddress(ToRegister(instr->result()), instr->value());
1721 void LCodeGen::DoConstantT(LConstantT* instr) {
1722 Handle<Object> object = instr->value(isolate());
1723 AllowDeferredHandleDereference smi_check;
1724 __ Move(ToRegister(instr->result()), object);
1728 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1729 Register result = ToRegister(instr->result());
1730 Register map = ToRegister(instr->value());
1731 __ EnumLength(result, map);
1735 void LCodeGen::DoDateField(LDateField* instr) {
1736 Register object = ToRegister(instr->date());
1737 Register result = ToRegister(instr->result());
1738 Smi* index = instr->index();
1739 DCHECK(object.is(result));
1740 DCHECK(object.is(rax));
1742 if (FLAG_debug_code) {
1743 __ AssertNotSmi(object);
1744 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1745 __ Check(equal, kOperandIsNotADate);
1748 if (index->value() == 0) {
1749 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1751 Label runtime, done;
1752 if (index->value() < JSDate::kFirstUncachedField) {
1753 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1754 Operand stamp_operand = __ ExternalOperand(stamp);
1755 __ movp(kScratchRegister, stamp_operand);
1756 __ cmpp(kScratchRegister, FieldOperand(object,
1757 JSDate::kCacheStampOffset));
1758 __ j(not_equal, &runtime, Label::kNear);
1759 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1760 kPointerSize * index->value()));
1761 __ jmp(&done, Label::kNear);
1764 __ PrepareCallCFunction(2);
1765 __ movp(arg_reg_1, object);
1766 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1767 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1773 Operand LCodeGen::BuildSeqStringOperand(Register string,
1775 String::Encoding encoding) {
1776 if (index->IsConstantOperand()) {
1777 int offset = ToInteger32(LConstantOperand::cast(index));
1778 if (encoding == String::TWO_BYTE_ENCODING) {
1779 offset *= kUC16Size;
1781 STATIC_ASSERT(kCharSize == 1);
1782 return FieldOperand(string, SeqString::kHeaderSize + offset);
1784 return FieldOperand(
1785 string, ToRegister(index),
1786 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1787 SeqString::kHeaderSize);
1791 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1792 String::Encoding encoding = instr->hydrogen()->encoding();
1793 Register result = ToRegister(instr->result());
1794 Register string = ToRegister(instr->string());
1796 if (FLAG_debug_code) {
1798 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1799 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1801 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1802 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1803 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1804 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1805 ? one_byte_seq_type : two_byte_seq_type));
1806 __ Check(equal, kUnexpectedStringType);
1810 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1811 if (encoding == String::ONE_BYTE_ENCODING) {
1812 __ movzxbl(result, operand);
1814 __ movzxwl(result, operand);
1819 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1820 String::Encoding encoding = instr->hydrogen()->encoding();
1821 Register string = ToRegister(instr->string());
1823 if (FLAG_debug_code) {
1824 Register value = ToRegister(instr->value());
1825 Register index = ToRegister(instr->index());
1826 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1827 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1829 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1830 ? one_byte_seq_type : two_byte_seq_type;
1831 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1834 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1835 if (instr->value()->IsConstantOperand()) {
1836 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1837 DCHECK_LE(0, value);
1838 if (encoding == String::ONE_BYTE_ENCODING) {
1839 DCHECK_LE(value, String::kMaxOneByteCharCode);
1840 __ movb(operand, Immediate(value));
1842 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1843 __ movw(operand, Immediate(value));
1846 Register value = ToRegister(instr->value());
1847 if (encoding == String::ONE_BYTE_ENCODING) {
1848 __ movb(operand, value);
1850 __ movw(operand, value);
1856 void LCodeGen::DoAddI(LAddI* instr) {
1857 LOperand* left = instr->left();
1858 LOperand* right = instr->right();
1860 Representation target_rep = instr->hydrogen()->representation();
1861 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1863 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1864 if (right->IsConstantOperand()) {
1865 // No support for smi-immediates for 32-bit SMI.
1866 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1868 ToRepresentation(LConstantOperand::cast(right),
1869 instr->hydrogen()->right()->representation());
1871 __ leap(ToRegister(instr->result()),
1872 MemOperand(ToRegister(left), offset));
1874 __ leal(ToRegister(instr->result()),
1875 MemOperand(ToRegister(left), offset));
1878 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1880 __ leap(ToRegister(instr->result()), address);
1882 __ leal(ToRegister(instr->result()), address);
1886 if (right->IsConstantOperand()) {
1887 // No support for smi-immediates for 32-bit SMI.
1888 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1889 int32_t right_operand =
1890 ToRepresentation(LConstantOperand::cast(right),
1891 instr->hydrogen()->right()->representation());
1893 __ addp(ToRegister(left), Immediate(right_operand));
1895 __ addl(ToRegister(left), Immediate(right_operand));
1897 } else if (right->IsRegister()) {
1899 __ addp(ToRegister(left), ToRegister(right));
1901 __ addl(ToRegister(left), ToRegister(right));
1905 __ addp(ToRegister(left), ToOperand(right));
1907 __ addl(ToRegister(left), ToOperand(right));
1910 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1911 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1917 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1918 LOperand* left = instr->left();
1919 LOperand* right = instr->right();
1920 DCHECK(left->Equals(instr->result()));
1921 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1922 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1924 Condition condition = (operation == HMathMinMax::kMathMin)
1927 Register left_reg = ToRegister(left);
1928 if (right->IsConstantOperand()) {
1929 Immediate right_imm = Immediate(
1930 ToRepresentation(LConstantOperand::cast(right),
1931 instr->hydrogen()->right()->representation()));
1932 DCHECK(SmiValuesAre32Bits()
1933 ? !instr->hydrogen()->representation().IsSmi()
1934 : SmiValuesAre31Bits());
1935 __ cmpl(left_reg, right_imm);
1936 __ j(condition, &return_left, Label::kNear);
1937 __ movp(left_reg, right_imm);
1938 } else if (right->IsRegister()) {
1939 Register right_reg = ToRegister(right);
1940 if (instr->hydrogen_value()->representation().IsSmi()) {
1941 __ cmpp(left_reg, right_reg);
1943 __ cmpl(left_reg, right_reg);
1945 __ j(condition, &return_left, Label::kNear);
1946 __ movp(left_reg, right_reg);
1948 Operand right_op = ToOperand(right);
1949 if (instr->hydrogen_value()->representation().IsSmi()) {
1950 __ cmpp(left_reg, right_op);
1952 __ cmpl(left_reg, right_op);
1954 __ j(condition, &return_left, Label::kNear);
1955 __ movp(left_reg, right_op);
1957 __ bind(&return_left);
1959 DCHECK(instr->hydrogen()->representation().IsDouble());
1960 Label check_nan_left, check_zero, return_left, return_right;
1961 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1962 XMMRegister left_reg = ToDoubleRegister(left);
1963 XMMRegister right_reg = ToDoubleRegister(right);
1964 __ ucomisd(left_reg, right_reg);
1965 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1966 __ j(equal, &check_zero, Label::kNear); // left == right.
1967 __ j(condition, &return_left, Label::kNear);
1968 __ jmp(&return_right, Label::kNear);
1970 __ bind(&check_zero);
1971 XMMRegister xmm_scratch = double_scratch0();
1972 __ xorps(xmm_scratch, xmm_scratch);
1973 __ ucomisd(left_reg, xmm_scratch);
1974 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1975 // At this point, both left and right are either 0 or -0.
1976 if (operation == HMathMinMax::kMathMin) {
1977 __ orps(left_reg, right_reg);
1979 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1980 __ addsd(left_reg, right_reg);
1982 __ jmp(&return_left, Label::kNear);
1984 __ bind(&check_nan_left);
1985 __ ucomisd(left_reg, left_reg); // NaN check.
1986 __ j(parity_even, &return_left, Label::kNear);
1987 __ bind(&return_right);
1988 __ movaps(left_reg, right_reg);
1990 __ bind(&return_left);
1995 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1996 XMMRegister left = ToDoubleRegister(instr->left());
1997 XMMRegister right = ToDoubleRegister(instr->right());
1998 XMMRegister result = ToDoubleRegister(instr->result());
1999 switch (instr->op()) {
2001 if (CpuFeatures::IsSupported(AVX)) {
2002 CpuFeatureScope scope(masm(), AVX);
2003 __ vaddsd(result, left, right);
2005 DCHECK(result.is(left));
2006 __ addsd(left, right);
2010 if (CpuFeatures::IsSupported(AVX)) {
2011 CpuFeatureScope scope(masm(), AVX);
2012 __ vsubsd(result, left, right);
2014 DCHECK(result.is(left));
2015 __ subsd(left, right);
2019 if (CpuFeatures::IsSupported(AVX)) {
2020 CpuFeatureScope scope(masm(), AVX);
2021 __ vmulsd(result, left, right);
2023 DCHECK(result.is(left));
2024 __ mulsd(left, right);
2028 if (CpuFeatures::IsSupported(AVX)) {
2029 CpuFeatureScope scope(masm(), AVX);
2030 __ vdivsd(result, left, right);
2032 DCHECK(result.is(left));
2033 __ divsd(left, right);
2035 // Don't delete this mov. It may improve performance on some CPUs,
2036 // when there is a (v)mulsd depending on the result
2037 __ movaps(result, result);
2040 XMMRegister xmm_scratch = double_scratch0();
2041 __ PrepareCallCFunction(2);
2042 __ movaps(xmm_scratch, left);
2043 DCHECK(right.is(xmm1));
2045 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2046 __ movaps(result, xmm_scratch);
2056 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2057 DCHECK(ToRegister(instr->context()).is(rsi));
2058 DCHECK(ToRegister(instr->left()).is(rdx));
2059 DCHECK(ToRegister(instr->right()).is(rax));
2060 DCHECK(ToRegister(instr->result()).is(rax));
2063 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2064 CallCode(code, RelocInfo::CODE_TARGET, instr);
2068 template<class InstrType>
2069 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2070 int left_block = instr->TrueDestination(chunk_);
2071 int right_block = instr->FalseDestination(chunk_);
2073 int next_block = GetNextEmittedBlock();
2075 if (right_block == left_block || cc == no_condition) {
2076 EmitGoto(left_block);
2077 } else if (left_block == next_block) {
2078 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2079 } else if (right_block == next_block) {
2080 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2082 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2084 __ jmp(chunk_->GetAssemblyLabel(right_block));
2090 template<class InstrType>
2091 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2092 int false_block = instr->FalseDestination(chunk_);
2093 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2097 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2102 void LCodeGen::DoBranch(LBranch* instr) {
2103 Representation r = instr->hydrogen()->value()->representation();
2104 if (r.IsInteger32()) {
2105 DCHECK(!info()->IsStub());
2106 Register reg = ToRegister(instr->value());
2108 EmitBranch(instr, not_zero);
2109 } else if (r.IsSmi()) {
2110 DCHECK(!info()->IsStub());
2111 Register reg = ToRegister(instr->value());
2113 EmitBranch(instr, not_zero);
2114 } else if (r.IsDouble()) {
2115 DCHECK(!info()->IsStub());
2116 XMMRegister reg = ToDoubleRegister(instr->value());
2117 XMMRegister xmm_scratch = double_scratch0();
2118 __ xorps(xmm_scratch, xmm_scratch);
2119 __ ucomisd(reg, xmm_scratch);
2120 EmitBranch(instr, not_equal);
2122 DCHECK(r.IsTagged());
2123 Register reg = ToRegister(instr->value());
2124 HType type = instr->hydrogen()->value()->type();
2125 if (type.IsBoolean()) {
2126 DCHECK(!info()->IsStub());
2127 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2128 EmitBranch(instr, equal);
2129 } else if (type.IsSmi()) {
2130 DCHECK(!info()->IsStub());
2131 __ SmiCompare(reg, Smi::FromInt(0));
2132 EmitBranch(instr, not_equal);
2133 } else if (type.IsJSArray()) {
2134 DCHECK(!info()->IsStub());
2135 EmitBranch(instr, no_condition);
2136 } else if (type.IsHeapNumber()) {
2137 DCHECK(!info()->IsStub());
2138 XMMRegister xmm_scratch = double_scratch0();
2139 __ xorps(xmm_scratch, xmm_scratch);
2140 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2141 EmitBranch(instr, not_equal);
2142 } else if (type.IsString()) {
2143 DCHECK(!info()->IsStub());
2144 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2145 EmitBranch(instr, not_equal);
2147 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2148 // Avoid deopts in the case where we've never executed this path before.
2149 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2151 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2152 // undefined -> false.
2153 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2154 __ j(equal, instr->FalseLabel(chunk_));
2156 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2158 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2159 __ j(equal, instr->TrueLabel(chunk_));
2161 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2162 __ j(equal, instr->FalseLabel(chunk_));
2164 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2166 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2167 __ j(equal, instr->FalseLabel(chunk_));
2170 if (expected.Contains(ToBooleanStub::SMI)) {
2171 // Smis: 0 -> false, all other -> true.
2172 __ Cmp(reg, Smi::FromInt(0));
2173 __ j(equal, instr->FalseLabel(chunk_));
2174 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2175 } else if (expected.NeedsMap()) {
2176 // If we need a map later and have a Smi -> deopt.
2177 __ testb(reg, Immediate(kSmiTagMask));
2178 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2181 const Register map = kScratchRegister;
2182 if (expected.NeedsMap()) {
2183 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2185 if (expected.CanBeUndetectable()) {
2186 // Undetectable -> false.
2187 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2188 Immediate(1 << Map::kIsUndetectable));
2189 __ j(not_zero, instr->FalseLabel(chunk_));
2193 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2194 // spec object -> true.
2195 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2196 __ j(above_equal, instr->TrueLabel(chunk_));
2199 if (expected.Contains(ToBooleanStub::STRING)) {
2200 // String value -> false iff empty.
2202 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2203 __ j(above_equal, ¬_string, Label::kNear);
2204 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2205 __ j(not_zero, instr->TrueLabel(chunk_));
2206 __ jmp(instr->FalseLabel(chunk_));
2207 __ bind(¬_string);
2210 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2211 // Symbol value -> true.
2212 __ CmpInstanceType(map, SYMBOL_TYPE);
2213 __ j(equal, instr->TrueLabel(chunk_));
2216 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2217 // SIMD value -> true.
2218 __ CmpInstanceType(map, FLOAT32X4_TYPE);
2219 __ j(equal, instr->TrueLabel(chunk_));
2222 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2223 // heap number -> false iff +0, -0, or NaN.
2224 Label not_heap_number;
2225 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2226 __ j(not_equal, ¬_heap_number, Label::kNear);
2227 XMMRegister xmm_scratch = double_scratch0();
2228 __ xorps(xmm_scratch, xmm_scratch);
2229 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2230 __ j(zero, instr->FalseLabel(chunk_));
2231 __ jmp(instr->TrueLabel(chunk_));
2232 __ bind(¬_heap_number);
2235 if (!expected.IsGeneric()) {
2236 // We've seen something for the first time -> deopt.
2237 // This can only happen if we are not generic already.
2238 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2245 void LCodeGen::EmitGoto(int block) {
2246 if (!IsNextEmittedBlock(block)) {
2247 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2252 void LCodeGen::DoGoto(LGoto* instr) {
2253 EmitGoto(instr->block_id());
2257 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2258 Condition cond = no_condition;
2261 case Token::EQ_STRICT:
2265 case Token::NE_STRICT:
2269 cond = is_unsigned ? below : less;
2272 cond = is_unsigned ? above : greater;
2275 cond = is_unsigned ? below_equal : less_equal;
2278 cond = is_unsigned ? above_equal : greater_equal;
2281 case Token::INSTANCEOF:
2289 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2290 LOperand* left = instr->left();
2291 LOperand* right = instr->right();
2293 instr->is_double() ||
2294 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2295 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2296 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2298 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2299 // We can statically evaluate the comparison.
2300 double left_val = ToDouble(LConstantOperand::cast(left));
2301 double right_val = ToDouble(LConstantOperand::cast(right));
2302 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2303 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2304 EmitGoto(next_block);
2306 if (instr->is_double()) {
2307 // Don't base result on EFLAGS when a NaN is involved. Instead
2308 // jump to the false block.
2309 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2310 __ j(parity_even, instr->FalseLabel(chunk_));
2313 if (right->IsConstantOperand()) {
2314 value = ToInteger32(LConstantOperand::cast(right));
2315 if (instr->hydrogen_value()->representation().IsSmi()) {
2316 __ Cmp(ToRegister(left), Smi::FromInt(value));
2318 __ cmpl(ToRegister(left), Immediate(value));
2320 } else if (left->IsConstantOperand()) {
2321 value = ToInteger32(LConstantOperand::cast(left));
2322 if (instr->hydrogen_value()->representation().IsSmi()) {
2323 if (right->IsRegister()) {
2324 __ Cmp(ToRegister(right), Smi::FromInt(value));
2326 __ Cmp(ToOperand(right), Smi::FromInt(value));
2328 } else if (right->IsRegister()) {
2329 __ cmpl(ToRegister(right), Immediate(value));
2331 __ cmpl(ToOperand(right), Immediate(value));
2333 // We commuted the operands, so commute the condition.
2334 cc = CommuteCondition(cc);
2335 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2336 if (right->IsRegister()) {
2337 __ cmpp(ToRegister(left), ToRegister(right));
2339 __ cmpp(ToRegister(left), ToOperand(right));
2342 if (right->IsRegister()) {
2343 __ cmpl(ToRegister(left), ToRegister(right));
2345 __ cmpl(ToRegister(left), ToOperand(right));
2349 EmitBranch(instr, cc);
2354 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2355 Register left = ToRegister(instr->left());
2357 if (instr->right()->IsConstantOperand()) {
2358 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2359 __ Cmp(left, right);
2361 Register right = ToRegister(instr->right());
2362 __ cmpp(left, right);
2364 EmitBranch(instr, equal);
2368 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2369 if (instr->hydrogen()->representation().IsTagged()) {
2370 Register input_reg = ToRegister(instr->object());
2371 __ Cmp(input_reg, factory()->the_hole_value());
2372 EmitBranch(instr, equal);
2376 XMMRegister input_reg = ToDoubleRegister(instr->object());
2377 __ ucomisd(input_reg, input_reg);
2378 EmitFalseBranch(instr, parity_odd);
2380 __ subp(rsp, Immediate(kDoubleSize));
2381 __ movsd(MemOperand(rsp, 0), input_reg);
2382 __ addp(rsp, Immediate(kDoubleSize));
2384 int offset = sizeof(kHoleNanUpper32);
2385 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2386 EmitBranch(instr, equal);
2390 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2391 Representation rep = instr->hydrogen()->value()->representation();
2392 DCHECK(!rep.IsInteger32());
2394 if (rep.IsDouble()) {
2395 XMMRegister value = ToDoubleRegister(instr->value());
2396 XMMRegister xmm_scratch = double_scratch0();
2397 __ xorps(xmm_scratch, xmm_scratch);
2398 __ ucomisd(xmm_scratch, value);
2399 EmitFalseBranch(instr, not_equal);
2400 __ movmskpd(kScratchRegister, value);
2401 __ testl(kScratchRegister, Immediate(1));
2402 EmitBranch(instr, not_zero);
2404 Register value = ToRegister(instr->value());
2405 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2406 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2407 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2409 EmitFalseBranch(instr, no_overflow);
2410 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2411 Immediate(0x00000000));
2412 EmitBranch(instr, equal);
2417 Condition LCodeGen::EmitIsObject(Register input,
2418 Label* is_not_object,
2420 DCHECK(!input.is(kScratchRegister));
2422 __ JumpIfSmi(input, is_not_object);
2424 __ CompareRoot(input, Heap::kNullValueRootIndex);
2425 __ j(equal, is_object);
2427 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2428 // Undetectable objects behave like undefined.
2429 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2430 Immediate(1 << Map::kIsUndetectable));
2431 __ j(not_zero, is_not_object);
2433 __ movzxbl(kScratchRegister,
2434 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2435 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2436 __ j(below, is_not_object);
2437 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2442 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2443 Register reg = ToRegister(instr->value());
2445 Condition true_cond = EmitIsObject(
2446 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2448 EmitBranch(instr, true_cond);
2452 Condition LCodeGen::EmitIsString(Register input,
2454 Label* is_not_string,
2455 SmiCheck check_needed = INLINE_SMI_CHECK) {
2456 if (check_needed == INLINE_SMI_CHECK) {
2457 __ JumpIfSmi(input, is_not_string);
2460 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2466 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2467 Register reg = ToRegister(instr->value());
2468 Register temp = ToRegister(instr->temp());
2470 SmiCheck check_needed =
2471 instr->hydrogen()->value()->type().IsHeapObject()
2472 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2474 Condition true_cond = EmitIsString(
2475 reg, temp, instr->FalseLabel(chunk_), check_needed);
2477 EmitBranch(instr, true_cond);
2481 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2483 if (instr->value()->IsRegister()) {
2484 Register input = ToRegister(instr->value());
2485 is_smi = masm()->CheckSmi(input);
2487 Operand input = ToOperand(instr->value());
2488 is_smi = masm()->CheckSmi(input);
2490 EmitBranch(instr, is_smi);
2494 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2495 Register input = ToRegister(instr->value());
2496 Register temp = ToRegister(instr->temp());
2498 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2499 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2501 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2502 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2503 Immediate(1 << Map::kIsUndetectable));
2504 EmitBranch(instr, not_zero);
2508 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2509 DCHECK(ToRegister(instr->context()).is(rsi));
2510 Token::Value op = instr->op();
2513 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2514 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2516 Condition condition = TokenToCondition(op, false);
2519 EmitBranch(instr, condition);
2523 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2524 InstanceType from = instr->from();
2525 InstanceType to = instr->to();
2526 if (from == FIRST_TYPE) return to;
2527 DCHECK(from == to || to == LAST_TYPE);
2532 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2533 InstanceType from = instr->from();
2534 InstanceType to = instr->to();
2535 if (from == to) return equal;
2536 if (to == LAST_TYPE) return above_equal;
2537 if (from == FIRST_TYPE) return below_equal;
2543 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2544 Register input = ToRegister(instr->value());
2546 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2547 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2550 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2551 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2555 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2556 Register input = ToRegister(instr->value());
2557 Register result = ToRegister(instr->result());
2559 __ AssertString(input);
2561 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2562 DCHECK(String::kHashShift >= kSmiTagSize);
2563 __ IndexFromHash(result, result);
2567 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2568 LHasCachedArrayIndexAndBranch* instr) {
2569 Register input = ToRegister(instr->value());
2571 __ testl(FieldOperand(input, String::kHashFieldOffset),
2572 Immediate(String::kContainsCachedArrayIndexMask));
2573 EmitBranch(instr, equal);
2577 // Branches to a label or falls through with the answer in the z flag.
2578 // Trashes the temp register.
2579 void LCodeGen::EmitClassOfTest(Label* is_true,
2581 Handle<String> class_name,
2585 DCHECK(!input.is(temp));
2586 DCHECK(!input.is(temp2));
2587 DCHECK(!temp.is(temp2));
2589 __ JumpIfSmi(input, is_false);
2591 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2592 // Assuming the following assertions, we can use the same compares to test
2593 // for both being a function type and being in the object type range.
2594 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2595 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2596 FIRST_SPEC_OBJECT_TYPE + 1);
2597 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2598 LAST_SPEC_OBJECT_TYPE - 1);
2599 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2600 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2601 __ j(below, is_false);
2602 __ j(equal, is_true);
2603 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2604 __ j(equal, is_true);
2606 // Faster code path to avoid two compares: subtract lower bound from the
2607 // actual type and do a signed compare with the width of the type range.
2608 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2609 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2610 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2611 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2612 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2613 __ j(above, is_false);
2616 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2617 // Check if the constructor in the map is a function.
2618 __ GetMapConstructor(temp, temp, kScratchRegister);
2620 // Objects with a non-function constructor have class 'Object'.
2621 __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
2622 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2623 __ j(not_equal, is_true);
2625 __ j(not_equal, is_false);
2628 // temp now contains the constructor function. Grab the
2629 // instance class name from there.
2630 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2631 __ movp(temp, FieldOperand(temp,
2632 SharedFunctionInfo::kInstanceClassNameOffset));
2633 // The class name we are testing against is internalized since it's a literal.
2634 // The name in the constructor is internalized because of the way the context
2635 // is booted. This routine isn't expected to work for random API-created
2636 // classes and it doesn't have to because you can't access it with natives
2637 // syntax. Since both sides are internalized it is sufficient to use an
2638 // identity comparison.
2639 DCHECK(class_name->IsInternalizedString());
2640 __ Cmp(temp, class_name);
2641 // End with the answer in the z flag.
2645 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2646 Register input = ToRegister(instr->value());
2647 Register temp = ToRegister(instr->temp());
2648 Register temp2 = ToRegister(instr->temp2());
2649 Handle<String> class_name = instr->hydrogen()->class_name();
2651 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2652 class_name, input, temp, temp2);
2654 EmitBranch(instr, equal);
2658 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2659 Register reg = ToRegister(instr->value());
2661 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2662 EmitBranch(instr, equal);
2666 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2667 DCHECK(ToRegister(instr->context()).is(rsi));
2668 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2669 __ Push(ToRegister(instr->left()));
2670 __ Push(ToRegister(instr->right()));
2671 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2672 Label true_value, done;
2674 __ j(zero, &true_value, Label::kNear);
2675 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2676 __ jmp(&done, Label::kNear);
2677 __ bind(&true_value);
2678 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2683 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2684 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2686 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2687 LInstanceOfKnownGlobal* instr)
2688 : LDeferredCode(codegen), instr_(instr) { }
2689 void Generate() override {
2690 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2692 LInstruction* instr() override { return instr_; }
2693 Label* map_check() { return &map_check_; }
2695 LInstanceOfKnownGlobal* instr_;
2699 DCHECK(ToRegister(instr->context()).is(rsi));
2700 DeferredInstanceOfKnownGlobal* deferred;
2701 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2703 Label done, false_result;
2704 Register object = ToRegister(instr->value());
2706 // A Smi is not an instance of anything.
2707 __ JumpIfSmi(object, &false_result, Label::kNear);
2709 // This is the inlined call site instanceof cache. The two occurences of the
2710 // hole value will be patched to the last map/result pair generated by the
2713 // Use a temp register to avoid memory operands with variable lengths.
2714 Register map = ToRegister(instr->temp());
2715 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2716 __ bind(deferred->map_check()); // Label for calculating code patching.
2717 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2718 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2719 __ cmpp(map, Operand(kScratchRegister, 0));
2720 __ j(not_equal, &cache_miss, Label::kNear);
2721 // Patched to load either true or false.
2722 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2724 // Check that the code size between patch label and patch sites is invariant.
2725 Label end_of_patched_code;
2726 __ bind(&end_of_patched_code);
2729 __ jmp(&done, Label::kNear);
2731 // The inlined call site cache did not match. Check for null and string
2732 // before calling the deferred code.
2733 __ bind(&cache_miss); // Null is not an instance of anything.
2734 __ CompareRoot(object, Heap::kNullValueRootIndex);
2735 __ j(equal, &false_result, Label::kNear);
2737 // String values are not instances of anything.
2738 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2740 __ bind(&false_result);
2741 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2743 __ bind(deferred->exit());
2748 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2751 PushSafepointRegistersScope scope(this);
2752 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2753 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2754 InstanceofStub stub(isolate(), flags);
2756 __ Push(ToRegister(instr->value()));
2757 __ Push(instr->function());
2759 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2761 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2763 __ PushImm32(delta);
2765 // We are pushing three values on the stack but recording a
2766 // safepoint with two arguments because stub is going to
2767 // remove the third argument from the stack before jumping
2768 // to instanceof builtin on the slow path.
2769 CallCodeGeneric(stub.GetCode(),
2770 RelocInfo::CODE_TARGET,
2772 RECORD_SAFEPOINT_WITH_REGISTERS,
2774 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2775 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2776 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2777 // Move result to a register that survives the end of the
2778 // PushSafepointRegisterScope.
2779 __ movp(kScratchRegister, rax);
2781 __ testp(kScratchRegister, kScratchRegister);
2784 __ j(not_zero, &load_false, Label::kNear);
2785 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2786 __ jmp(&done, Label::kNear);
2787 __ bind(&load_false);
2788 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2793 void LCodeGen::DoCmpT(LCmpT* instr) {
2794 DCHECK(ToRegister(instr->context()).is(rsi));
2795 Token::Value op = instr->op();
2798 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2799 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2801 Condition condition = TokenToCondition(op, false);
2802 Label true_value, done;
2804 __ j(condition, &true_value, Label::kNear);
2805 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2806 __ jmp(&done, Label::kNear);
2807 __ bind(&true_value);
2808 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2813 void LCodeGen::DoReturn(LReturn* instr) {
2814 if (FLAG_trace && info()->IsOptimizing()) {
2815 // Preserve the return value on the stack and rely on the runtime call
2816 // to return the value in the same register. We're leaving the code
2817 // managed by the register allocator and tearing down the frame, it's
2818 // safe to write to the context register.
2820 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2821 __ CallRuntime(Runtime::kTraceExit, 1);
2823 if (info()->saves_caller_doubles()) {
2824 RestoreCallerDoubles();
2826 int no_frame_start = -1;
2827 if (NeedsEagerFrame()) {
2830 no_frame_start = masm_->pc_offset();
2832 if (instr->has_constant_parameter_count()) {
2833 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2836 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2837 Register reg = ToRegister(instr->parameter_count());
2838 // The argument count parameter is a smi
2839 __ SmiToInteger32(reg, reg);
2840 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2841 __ PopReturnAddressTo(return_addr_reg);
2842 __ shlp(reg, Immediate(kPointerSizeLog2));
2844 __ jmp(return_addr_reg);
2846 if (no_frame_start != -1) {
2847 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2853 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2854 Register vector_register = ToRegister(instr->temp_vector());
2855 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2856 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2857 DCHECK(slot_register.is(rax));
2859 AllowDeferredHandleDereference vector_structure_check;
2860 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2861 __ Move(vector_register, vector);
2862 // No need to allocate this register.
2863 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2864 int index = vector->GetIndex(slot);
2865 __ Move(slot_register, Smi::FromInt(index));
2870 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2871 Register vector_register = ToRegister(instr->temp_vector());
2872 Register slot_register = ToRegister(instr->temp_slot());
2874 AllowDeferredHandleDereference vector_structure_check;
2875 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2876 __ Move(vector_register, vector);
2877 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2878 int index = vector->GetIndex(slot);
2879 __ Move(slot_register, Smi::FromInt(index));
2883 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2884 DCHECK(ToRegister(instr->context()).is(rsi));
2885 DCHECK(ToRegister(instr->global_object())
2886 .is(LoadDescriptor::ReceiverRegister()));
2887 DCHECK(ToRegister(instr->result()).is(rax));
2889 __ Move(LoadDescriptor::NameRegister(), instr->name());
2890 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2892 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2893 SLOPPY, PREMONOMORPHIC).code();
2894 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2898 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
2899 DCHECK(ToRegister(instr->context()).is(rsi));
2900 DCHECK(ToRegister(instr->result()).is(rax));
2901 int const slot = instr->slot_index();
2902 int const depth = instr->depth();
2903 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
2904 __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
2906 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
2907 CallCode(stub, RelocInfo::CODE_TARGET, instr);
2909 __ Push(Smi::FromInt(slot));
2910 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
2915 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2916 Register context = ToRegister(instr->context());
2917 Register result = ToRegister(instr->result());
2918 __ movp(result, ContextOperand(context, instr->slot_index()));
2919 if (instr->hydrogen()->RequiresHoleCheck()) {
2920 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2921 if (instr->hydrogen()->DeoptimizesOnHole()) {
2922 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2925 __ j(not_equal, &is_not_hole, Label::kNear);
2926 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2927 __ bind(&is_not_hole);
2933 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2934 Register context = ToRegister(instr->context());
2935 Register value = ToRegister(instr->value());
2937 Operand target = ContextOperand(context, instr->slot_index());
2939 Label skip_assignment;
2940 if (instr->hydrogen()->RequiresHoleCheck()) {
2941 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2942 if (instr->hydrogen()->DeoptimizesOnHole()) {
2943 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2945 __ j(not_equal, &skip_assignment);
2948 __ movp(target, value);
2950 if (instr->hydrogen()->NeedsWriteBarrier()) {
2951 SmiCheck check_needed =
2952 instr->hydrogen()->value()->type().IsHeapObject()
2953 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2954 int offset = Context::SlotOffset(instr->slot_index());
2955 Register scratch = ToRegister(instr->temp());
2956 __ RecordWriteContextSlot(context,
2961 EMIT_REMEMBERED_SET,
2965 __ bind(&skip_assignment);
2969 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2970 HObjectAccess access = instr->hydrogen()->access();
2971 int offset = access.offset();
2973 if (access.IsExternalMemory()) {
2974 Register result = ToRegister(instr->result());
2975 if (instr->object()->IsConstantOperand()) {
2976 DCHECK(result.is(rax));
2977 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2979 Register object = ToRegister(instr->object());
2980 __ Load(result, MemOperand(object, offset), access.representation());
2985 Register object = ToRegister(instr->object());
2986 if (instr->hydrogen()->representation().IsDouble()) {
2987 DCHECK(access.IsInobject());
2988 XMMRegister result = ToDoubleRegister(instr->result());
2989 __ movsd(result, FieldOperand(object, offset));
2993 Register result = ToRegister(instr->result());
2994 if (!access.IsInobject()) {
2995 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2999 Representation representation = access.representation();
3000 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3001 instr->hydrogen()->representation().IsInteger32()) {
3002 if (FLAG_debug_code) {
3003 Register scratch = kScratchRegister;
3004 __ Load(scratch, FieldOperand(object, offset), representation);
3005 __ AssertSmi(scratch);
3008 // Read int value directly from upper half of the smi.
3009 STATIC_ASSERT(kSmiTag == 0);
3010 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3011 offset += kPointerSize / 2;
3012 representation = Representation::Integer32();
3014 __ Load(result, FieldOperand(object, offset), representation);
3018 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3019 DCHECK(ToRegister(instr->context()).is(rsi));
3020 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3021 DCHECK(ToRegister(instr->result()).is(rax));
3023 __ Move(LoadDescriptor::NameRegister(), instr->name());
3024 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3026 CodeFactory::LoadICInOptimizedCode(
3027 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3028 instr->hydrogen()->initialization_state()).code();
3029 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3033 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3034 Register function = ToRegister(instr->function());
3035 Register result = ToRegister(instr->result());
3037 // Get the prototype or initial map from the function.
3039 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3041 // Check that the function has a prototype or an initial map.
3042 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3043 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3045 // If the function does not have an initial map, we're done.
3047 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3048 __ j(not_equal, &done, Label::kNear);
3050 // Get the prototype from the initial map.
3051 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3058 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3059 Register result = ToRegister(instr->result());
3060 __ LoadRoot(result, instr->index());
3064 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3065 Register arguments = ToRegister(instr->arguments());
3066 Register result = ToRegister(instr->result());
3068 if (instr->length()->IsConstantOperand() &&
3069 instr->index()->IsConstantOperand()) {
3070 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3071 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3072 if (const_index >= 0 && const_index < const_length) {
3073 StackArgumentsAccessor args(arguments, const_length,
3074 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3075 __ movp(result, args.GetArgumentOperand(const_index));
3076 } else if (FLAG_debug_code) {
3080 Register length = ToRegister(instr->length());
3081 // There are two words between the frame pointer and the last argument.
3082 // Subtracting from length accounts for one of them add one more.
3083 if (instr->index()->IsRegister()) {
3084 __ subl(length, ToRegister(instr->index()));
3086 __ subl(length, ToOperand(instr->index()));
3088 StackArgumentsAccessor args(arguments, length,
3089 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3090 __ movp(result, args.GetArgumentOperand(0));
3095 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3096 ElementsKind elements_kind = instr->elements_kind();
3097 LOperand* key = instr->key();
3098 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3099 Register key_reg = ToRegister(key);
3100 Representation key_representation =
3101 instr->hydrogen()->key()->representation();
3102 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3103 __ SmiToInteger64(key_reg, key_reg);
3104 } else if (instr->hydrogen()->IsDehoisted()) {
3105 // Sign extend key because it could be a 32 bit negative value
3106 // and the dehoisted address computation happens in 64 bits
3107 __ movsxlq(key_reg, key_reg);
3110 Operand operand(BuildFastArrayOperand(
3113 instr->hydrogen()->key()->representation(),
3115 instr->base_offset()));
3117 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3118 elements_kind == FLOAT32_ELEMENTS) {
3119 XMMRegister result(ToDoubleRegister(instr->result()));
3120 __ movss(result, operand);
3121 __ cvtss2sd(result, result);
3122 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3123 elements_kind == FLOAT64_ELEMENTS) {
3124 __ movsd(ToDoubleRegister(instr->result()), operand);
3126 Register result(ToRegister(instr->result()));
3127 switch (elements_kind) {
3128 case EXTERNAL_INT8_ELEMENTS:
3130 __ movsxbl(result, operand);
3132 case EXTERNAL_UINT8_ELEMENTS:
3133 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3134 case UINT8_ELEMENTS:
3135 case UINT8_CLAMPED_ELEMENTS:
3136 __ movzxbl(result, operand);
3138 case EXTERNAL_INT16_ELEMENTS:
3139 case INT16_ELEMENTS:
3140 __ movsxwl(result, operand);
3142 case EXTERNAL_UINT16_ELEMENTS:
3143 case UINT16_ELEMENTS:
3144 __ movzxwl(result, operand);
3146 case EXTERNAL_INT32_ELEMENTS:
3147 case INT32_ELEMENTS:
3148 __ movl(result, operand);
3150 case EXTERNAL_UINT32_ELEMENTS:
3151 case UINT32_ELEMENTS:
3152 __ movl(result, operand);
3153 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3154 __ testl(result, result);
3155 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3158 case EXTERNAL_FLOAT32_ELEMENTS:
3159 case EXTERNAL_FLOAT64_ELEMENTS:
3160 case FLOAT32_ELEMENTS:
3161 case FLOAT64_ELEMENTS:
3163 case FAST_SMI_ELEMENTS:
3164 case FAST_DOUBLE_ELEMENTS:
3165 case FAST_HOLEY_ELEMENTS:
3166 case FAST_HOLEY_SMI_ELEMENTS:
3167 case FAST_HOLEY_DOUBLE_ELEMENTS:
3168 case DICTIONARY_ELEMENTS:
3169 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3170 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3178 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3179 XMMRegister result(ToDoubleRegister(instr->result()));
3180 LOperand* key = instr->key();
3181 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3182 instr->hydrogen()->IsDehoisted()) {
3183 // Sign extend key because it could be a 32 bit negative value
3184 // and the dehoisted address computation happens in 64 bits
3185 __ movsxlq(ToRegister(key), ToRegister(key));
3187 if (instr->hydrogen()->RequiresHoleCheck()) {
3188 Operand hole_check_operand = BuildFastArrayOperand(
3191 instr->hydrogen()->key()->representation(),
3192 FAST_DOUBLE_ELEMENTS,
3193 instr->base_offset() + sizeof(kHoleNanLower32));
3194 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3195 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3198 Operand double_load_operand = BuildFastArrayOperand(
3201 instr->hydrogen()->key()->representation(),
3202 FAST_DOUBLE_ELEMENTS,
3203 instr->base_offset());
3204 __ movsd(result, double_load_operand);
3208 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3209 HLoadKeyed* hinstr = instr->hydrogen();
3210 Register result = ToRegister(instr->result());
3211 LOperand* key = instr->key();
3212 bool requires_hole_check = hinstr->RequiresHoleCheck();
3213 Representation representation = hinstr->representation();
3214 int offset = instr->base_offset();
3216 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3217 instr->hydrogen()->IsDehoisted()) {
3218 // Sign extend key because it could be a 32 bit negative value
3219 // and the dehoisted address computation happens in 64 bits
3220 __ movsxlq(ToRegister(key), ToRegister(key));
3222 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3223 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3224 DCHECK(!requires_hole_check);
3225 if (FLAG_debug_code) {
3226 Register scratch = kScratchRegister;
3228 BuildFastArrayOperand(instr->elements(),
3230 instr->hydrogen()->key()->representation(),
3233 Representation::Smi());
3234 __ AssertSmi(scratch);
3236 // Read int value directly from upper half of the smi.
3237 STATIC_ASSERT(kSmiTag == 0);
3238 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3239 offset += kPointerSize / 2;
3243 BuildFastArrayOperand(instr->elements(), key,
3244 instr->hydrogen()->key()->representation(),
3245 FAST_ELEMENTS, offset),
3248 // Check for the hole value.
3249 if (requires_hole_check) {
3250 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3251 Condition smi = __ CheckSmi(result);
3252 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
3254 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3255 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3257 } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3258 DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
3260 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3261 __ j(not_equal, &done);
3262 if (info()->IsStub()) {
3263 // A stub can safely convert the hole to undefined only if the array
3264 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3265 // it needs to bail out.
3266 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3267 __ Cmp(FieldOperand(result, Cell::kValueOffset),
3268 Smi::FromInt(Isolate::kArrayProtectorValid));
3269 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3271 __ Move(result, isolate()->factory()->undefined_value());
3277 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3278 if (instr->is_typed_elements()) {
3279 DoLoadKeyedExternalArray(instr);
3280 } else if (instr->hydrogen()->representation().IsDouble()) {
3281 DoLoadKeyedFixedDoubleArray(instr);
3283 DoLoadKeyedFixedArray(instr);
3288 Operand LCodeGen::BuildFastArrayOperand(
3289 LOperand* elements_pointer,
3291 Representation key_representation,
3292 ElementsKind elements_kind,
3294 Register elements_pointer_reg = ToRegister(elements_pointer);
3295 int shift_size = ElementsKindToShiftSize(elements_kind);
3296 if (key->IsConstantOperand()) {
3297 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3298 if (constant_value & 0xF0000000) {
3299 Abort(kArrayIndexConstantValueTooBig);
3301 return Operand(elements_pointer_reg,
3302 (constant_value << shift_size) + offset);
3304 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
3305 DCHECK(key_representation.IsInteger32());
3307 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3308 return Operand(elements_pointer_reg,
3316 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3317 DCHECK(ToRegister(instr->context()).is(rsi));
3318 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3319 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3321 if (instr->hydrogen()->HasVectorAndSlot()) {
3322 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3325 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3326 isolate(), instr->hydrogen()->language_mode(),
3327 instr->hydrogen()->initialization_state()).code();
3328 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3332 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3333 Register result = ToRegister(instr->result());
3335 if (instr->hydrogen()->from_inlined()) {
3336 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3338 // Check for arguments adapter frame.
3339 Label done, adapted;
3340 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3341 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3342 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3343 __ j(equal, &adapted, Label::kNear);
3345 // No arguments adaptor frame.
3346 __ movp(result, rbp);
3347 __ jmp(&done, Label::kNear);
3349 // Arguments adaptor frame present.
3351 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3353 // Result is the frame pointer for the frame if not adapted and for the real
3354 // frame below the adaptor frame if adapted.
3360 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3361 Register result = ToRegister(instr->result());
3365 // If no arguments adaptor frame the number of arguments is fixed.
3366 if (instr->elements()->IsRegister()) {
3367 __ cmpp(rbp, ToRegister(instr->elements()));
3369 __ cmpp(rbp, ToOperand(instr->elements()));
3371 __ movl(result, Immediate(scope()->num_parameters()));
3372 __ j(equal, &done, Label::kNear);
3374 // Arguments adaptor frame present. Get argument length from there.
3375 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3376 __ SmiToInteger32(result,
3378 ArgumentsAdaptorFrameConstants::kLengthOffset));
3380 // Argument length is in result register.
3385 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3386 Register receiver = ToRegister(instr->receiver());
3387 Register function = ToRegister(instr->function());
3389 // If the receiver is null or undefined, we have to pass the global
3390 // object as a receiver to normal functions. Values have to be
3391 // passed unchanged to builtins and strict-mode functions.
3392 Label global_object, receiver_ok;
3393 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3395 if (!instr->hydrogen()->known_function()) {
3396 // Do not transform the receiver to object for strict mode
3398 __ movp(kScratchRegister,
3399 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3400 __ testb(FieldOperand(kScratchRegister,
3401 SharedFunctionInfo::kStrictModeByteOffset),
3402 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3403 __ j(not_equal, &receiver_ok, dist);
3405 // Do not transform the receiver to object for builtins.
3406 __ testb(FieldOperand(kScratchRegister,
3407 SharedFunctionInfo::kNativeByteOffset),
3408 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3409 __ j(not_equal, &receiver_ok, dist);
3412 // Normal function. Replace undefined or null with global receiver.
3413 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3414 __ j(equal, &global_object, Label::kNear);
3415 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3416 __ j(equal, &global_object, Label::kNear);
3418 // The receiver should be a JS object.
3419 Condition is_smi = __ CheckSmi(receiver);
3420 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
3421 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3422 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3424 __ jmp(&receiver_ok, Label::kNear);
3425 __ bind(&global_object);
3426 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3429 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3430 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3432 __ bind(&receiver_ok);
3436 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3437 Register receiver = ToRegister(instr->receiver());
3438 Register function = ToRegister(instr->function());
3439 Register length = ToRegister(instr->length());
3440 Register elements = ToRegister(instr->elements());
3441 DCHECK(receiver.is(rax)); // Used for parameter count.
3442 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3443 DCHECK(ToRegister(instr->result()).is(rax));
3445 // Copy the arguments to this function possibly from the
3446 // adaptor frame below it.
3447 const uint32_t kArgumentsLimit = 1 * KB;
3448 __ cmpp(length, Immediate(kArgumentsLimit));
3449 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3452 __ movp(receiver, length);
3454 // Loop through the arguments pushing them onto the execution
3457 // length is a small non-negative integer, due to the test above.
3458 __ testl(length, length);
3459 __ j(zero, &invoke, Label::kNear);
3461 StackArgumentsAccessor args(elements, length,
3462 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3463 __ Push(args.GetArgumentOperand(0));
3465 __ j(not_zero, &loop);
3467 // Invoke the function.
3469 DCHECK(instr->HasPointerMap());
3470 LPointerMap* pointers = instr->pointer_map();
3471 SafepointGenerator safepoint_generator(
3472 this, pointers, Safepoint::kLazyDeopt);
3473 ParameterCount actual(rax);
3474 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3478 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3479 LOperand* argument = instr->value();
3480 EmitPushTaggedOperand(argument);
3484 void LCodeGen::DoDrop(LDrop* instr) {
3485 __ Drop(instr->count());
3489 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3490 Register result = ToRegister(instr->result());
3491 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3495 void LCodeGen::DoContext(LContext* instr) {
3496 Register result = ToRegister(instr->result());
3497 if (info()->IsOptimizing()) {
3498 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3500 // If there is no frame, the context must be in rsi.
3501 DCHECK(result.is(rsi));
3506 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3507 DCHECK(ToRegister(instr->context()).is(rsi));
3508 __ Push(rsi); // The context is the first argument.
3509 __ Push(instr->hydrogen()->pairs());
3510 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3511 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3515 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3516 int formal_parameter_count, int arity,
3517 LInstruction* instr) {
3518 bool dont_adapt_arguments =
3519 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3520 bool can_invoke_directly =
3521 dont_adapt_arguments || formal_parameter_count == arity;
3523 Register function_reg = rdi;
3524 LPointerMap* pointers = instr->pointer_map();
3526 if (can_invoke_directly) {
3528 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3530 // Set rax to arguments count if adaption is not needed. Assumes that rax
3531 // is available to write to at this point.
3532 if (dont_adapt_arguments) {
3537 if (function.is_identical_to(info()->closure())) {
3540 __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3543 // Set up deoptimization.
3544 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3546 // We need to adapt arguments.
3547 SafepointGenerator generator(
3548 this, pointers, Safepoint::kLazyDeopt);
3549 ParameterCount count(arity);
3550 ParameterCount expected(formal_parameter_count);
3551 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3556 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3557 DCHECK(ToRegister(instr->result()).is(rax));
3559 if (instr->hydrogen()->IsTailCall()) {
3560 if (NeedsEagerFrame()) __ leave();
3562 if (instr->target()->IsConstantOperand()) {
3563 LConstantOperand* target = LConstantOperand::cast(instr->target());
3564 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3565 __ jmp(code, RelocInfo::CODE_TARGET);
3567 DCHECK(instr->target()->IsRegister());
3568 Register target = ToRegister(instr->target());
3569 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3573 LPointerMap* pointers = instr->pointer_map();
3574 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3576 if (instr->target()->IsConstantOperand()) {
3577 LConstantOperand* target = LConstantOperand::cast(instr->target());
3578 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3579 generator.BeforeCall(__ CallSize(code));
3580 __ call(code, RelocInfo::CODE_TARGET);
3582 DCHECK(instr->target()->IsRegister());
3583 Register target = ToRegister(instr->target());
3584 generator.BeforeCall(__ CallSize(target));
3585 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3588 generator.AfterCall();
3593 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3594 DCHECK(ToRegister(instr->function()).is(rdi));
3595 DCHECK(ToRegister(instr->result()).is(rax));
3597 if (instr->hydrogen()->pass_argument_count()) {
3598 __ Set(rax, instr->arity());
3602 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3604 LPointerMap* pointers = instr->pointer_map();
3605 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3607 bool is_self_call = false;
3608 if (instr->hydrogen()->function()->IsConstant()) {
3609 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3610 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3611 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3612 is_self_call = jsfun.is_identical_to(info()->closure());
3618 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3619 generator.BeforeCall(__ CallSize(target));
3622 generator.AfterCall();
3626 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3627 Register input_reg = ToRegister(instr->value());
3628 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3629 Heap::kHeapNumberMapRootIndex);
3630 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3632 Label slow, allocated, done;
3633 Register tmp = input_reg.is(rax) ? rcx : rax;
3634 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3636 // Preserve the value of all registers.
3637 PushSafepointRegistersScope scope(this);
3639 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3640 // Check the sign of the argument. If the argument is positive, just
3641 // return it. We do not need to patch the stack since |input| and
3642 // |result| are the same register and |input| will be restored
3643 // unchanged by popping safepoint registers.
3644 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3647 __ AllocateHeapNumber(tmp, tmp2, &slow);
3648 __ jmp(&allocated, Label::kNear);
3650 // Slow case: Call the runtime system to do the number allocation.
3652 CallRuntimeFromDeferred(
3653 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3654 // Set the pointer to the new heap number in tmp.
3655 if (!tmp.is(rax)) __ movp(tmp, rax);
3656 // Restore input_reg after call to runtime.
3657 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3659 __ bind(&allocated);
3660 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3661 __ shlq(tmp2, Immediate(1));
3662 __ shrq(tmp2, Immediate(1));
3663 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3664 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3670 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3671 Register input_reg = ToRegister(instr->value());
3672 __ testl(input_reg, input_reg);
3674 __ j(not_sign, &is_positive, Label::kNear);
3675 __ negl(input_reg); // Sets flags.
3676 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3677 __ bind(&is_positive);
3681 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3682 Register input_reg = ToRegister(instr->value());
3683 __ testp(input_reg, input_reg);
3685 __ j(not_sign, &is_positive, Label::kNear);
3686 __ negp(input_reg); // Sets flags.
3687 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3688 __ bind(&is_positive);
3692 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3693 // Class for deferred case.
3694 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3696 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3697 : LDeferredCode(codegen), instr_(instr) { }
3698 void Generate() override {
3699 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3701 LInstruction* instr() override { return instr_; }
3707 DCHECK(instr->value()->Equals(instr->result()));
3708 Representation r = instr->hydrogen()->value()->representation();
3711 XMMRegister scratch = double_scratch0();
3712 XMMRegister input_reg = ToDoubleRegister(instr->value());
3713 __ xorps(scratch, scratch);
3714 __ subsd(scratch, input_reg);
3715 __ andps(input_reg, scratch);
3716 } else if (r.IsInteger32()) {
3717 EmitIntegerMathAbs(instr);
3718 } else if (r.IsSmi()) {
3719 EmitSmiMathAbs(instr);
3720 } else { // Tagged case.
3721 DeferredMathAbsTaggedHeapNumber* deferred =
3722 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3723 Register input_reg = ToRegister(instr->value());
3725 __ JumpIfNotSmi(input_reg, deferred->entry());
3726 EmitSmiMathAbs(instr);
3727 __ bind(deferred->exit());
3732 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3733 XMMRegister xmm_scratch = double_scratch0();
3734 Register output_reg = ToRegister(instr->result());
3735 XMMRegister input_reg = ToDoubleRegister(instr->value());
3737 if (CpuFeatures::IsSupported(SSE4_1)) {
3738 CpuFeatureScope scope(masm(), SSE4_1);
3739 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3740 // Deoptimize if minus zero.
3741 __ movq(output_reg, input_reg);
3742 __ subq(output_reg, Immediate(1));
3743 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
3745 __ roundsd(xmm_scratch, input_reg, kRoundDown);
3746 __ cvttsd2si(output_reg, xmm_scratch);
3747 __ cmpl(output_reg, Immediate(0x1));
3748 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3750 Label negative_sign, done;
3751 // Deoptimize on unordered.
3752 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3753 __ ucomisd(input_reg, xmm_scratch);
3754 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3755 __ j(below, &negative_sign, Label::kNear);
3757 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3758 // Check for negative zero.
3759 Label positive_sign;
3760 __ j(above, &positive_sign, Label::kNear);
3761 __ movmskpd(output_reg, input_reg);
3762 __ testq(output_reg, Immediate(1));
3763 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3764 __ Set(output_reg, 0);
3766 __ bind(&positive_sign);
3769 // Use truncating instruction (OK because input is positive).
3770 __ cvttsd2si(output_reg, input_reg);
3771 // Overflow is signalled with minint.
3772 __ cmpl(output_reg, Immediate(0x1));
3773 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3774 __ jmp(&done, Label::kNear);
3776 // Non-zero negative reaches here.
3777 __ bind(&negative_sign);
3778 // Truncate, then compare and compensate.
3779 __ cvttsd2si(output_reg, input_reg);
3780 __ Cvtlsi2sd(xmm_scratch, output_reg);
3781 __ ucomisd(input_reg, xmm_scratch);
3782 __ j(equal, &done, Label::kNear);
3783 __ subl(output_reg, Immediate(1));
3784 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3791 void LCodeGen::DoMathRound(LMathRound* instr) {
3792 const XMMRegister xmm_scratch = double_scratch0();
3793 Register output_reg = ToRegister(instr->result());
3794 XMMRegister input_reg = ToDoubleRegister(instr->value());
3795 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3796 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3797 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3799 Label done, round_to_zero, below_one_half;
3800 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3801 __ movq(kScratchRegister, one_half);
3802 __ movq(xmm_scratch, kScratchRegister);
3803 __ ucomisd(xmm_scratch, input_reg);
3804 __ j(above, &below_one_half, Label::kNear);
3806 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3807 __ addsd(xmm_scratch, input_reg);
3808 __ cvttsd2si(output_reg, xmm_scratch);
3809 // Overflow is signalled with minint.
3810 __ cmpl(output_reg, Immediate(0x1));
3811 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3812 __ jmp(&done, dist);
3814 __ bind(&below_one_half);
3815 __ movq(kScratchRegister, minus_one_half);
3816 __ movq(xmm_scratch, kScratchRegister);
3817 __ ucomisd(xmm_scratch, input_reg);
3818 __ j(below_equal, &round_to_zero, Label::kNear);
3820 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3821 // compare and compensate.
3822 __ movq(input_temp, input_reg); // Do not alter input_reg.
3823 __ subsd(input_temp, xmm_scratch);
3824 __ cvttsd2si(output_reg, input_temp);
3825 // Catch minint due to overflow, and to prevent overflow when compensating.
3826 __ cmpl(output_reg, Immediate(0x1));
3827 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3829 __ Cvtlsi2sd(xmm_scratch, output_reg);
3830 __ ucomisd(xmm_scratch, input_temp);
3831 __ j(equal, &done, dist);
3832 __ subl(output_reg, Immediate(1));
3833 // No overflow because we already ruled out minint.
3834 __ jmp(&done, dist);
3836 __ bind(&round_to_zero);
3837 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3838 // we can ignore the difference between a result of -0 and +0.
3839 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3840 __ movq(output_reg, input_reg);
3841 __ testq(output_reg, output_reg);
3842 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
3844 __ Set(output_reg, 0);
3849 void LCodeGen::DoMathFround(LMathFround* instr) {
3850 XMMRegister input_reg = ToDoubleRegister(instr->value());
3851 XMMRegister output_reg = ToDoubleRegister(instr->result());
3852 __ cvtsd2ss(output_reg, input_reg);
3853 __ cvtss2sd(output_reg, output_reg);
3857 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3858 XMMRegister output = ToDoubleRegister(instr->result());
3859 if (instr->value()->IsDoubleRegister()) {
3860 XMMRegister input = ToDoubleRegister(instr->value());
3861 __ sqrtsd(output, input);
3863 Operand input = ToOperand(instr->value());
3864 __ sqrtsd(output, input);
3869 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3870 XMMRegister xmm_scratch = double_scratch0();
3871 XMMRegister input_reg = ToDoubleRegister(instr->value());
3872 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3874 // Note that according to ECMA-262 15.8.2.13:
3875 // Math.pow(-Infinity, 0.5) == Infinity
3876 // Math.sqrt(-Infinity) == NaN
3878 // Check base for -Infinity. According to IEEE-754, double-precision
3879 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3880 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3881 __ movq(xmm_scratch, kScratchRegister);
3882 __ ucomisd(xmm_scratch, input_reg);
3883 // Comparing -Infinity with NaN results in "unordered", which sets the
3884 // zero flag as if both were equal. However, it also sets the carry flag.
3885 __ j(not_equal, &sqrt, Label::kNear);
3886 __ j(carry, &sqrt, Label::kNear);
3887 // If input is -Infinity, return Infinity.
3888 __ xorps(input_reg, input_reg);
3889 __ subsd(input_reg, xmm_scratch);
3890 __ jmp(&done, Label::kNear);
3894 __ xorps(xmm_scratch, xmm_scratch);
3895 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3896 __ sqrtsd(input_reg, input_reg);
3901 void LCodeGen::DoPower(LPower* instr) {
3902 Representation exponent_type = instr->hydrogen()->right()->representation();
3903 // Having marked this as a call, we can use any registers.
3904 // Just make sure that the input/output registers are the expected ones.
3906 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3907 DCHECK(!instr->right()->IsRegister() ||
3908 ToRegister(instr->right()).is(tagged_exponent));
3909 DCHECK(!instr->right()->IsDoubleRegister() ||
3910 ToDoubleRegister(instr->right()).is(xmm1));
3911 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3912 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3914 if (exponent_type.IsSmi()) {
3915 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3917 } else if (exponent_type.IsTagged()) {
3919 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3920 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3921 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3923 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3925 } else if (exponent_type.IsInteger32()) {
3926 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3929 DCHECK(exponent_type.IsDouble());
3930 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3936 void LCodeGen::DoMathExp(LMathExp* instr) {
3937 XMMRegister input = ToDoubleRegister(instr->value());
3938 XMMRegister result = ToDoubleRegister(instr->result());
3939 XMMRegister temp0 = double_scratch0();
3940 Register temp1 = ToRegister(instr->temp1());
3941 Register temp2 = ToRegister(instr->temp2());
3943 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3947 void LCodeGen::DoMathLog(LMathLog* instr) {
3948 DCHECK(instr->value()->Equals(instr->result()));
3949 XMMRegister input_reg = ToDoubleRegister(instr->value());
3950 XMMRegister xmm_scratch = double_scratch0();
3951 Label positive, done, zero;
3952 __ xorps(xmm_scratch, xmm_scratch);
3953 __ ucomisd(input_reg, xmm_scratch);
3954 __ j(above, &positive, Label::kNear);
3955 __ j(not_carry, &zero, Label::kNear);
3956 __ pcmpeqd(input_reg, input_reg);
3957 __ jmp(&done, Label::kNear);
3959 ExternalReference ninf =
3960 ExternalReference::address_of_negative_infinity();
3961 Operand ninf_operand = masm()->ExternalOperand(ninf);
3962 __ movsd(input_reg, ninf_operand);
3963 __ jmp(&done, Label::kNear);
3966 __ subp(rsp, Immediate(kDoubleSize));
3967 __ movsd(Operand(rsp, 0), input_reg);
3968 __ fld_d(Operand(rsp, 0));
3970 __ fstp_d(Operand(rsp, 0));
3971 __ movsd(input_reg, Operand(rsp, 0));
3972 __ addp(rsp, Immediate(kDoubleSize));
3977 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3978 Register input = ToRegister(instr->value());
3979 Register result = ToRegister(instr->result());
3981 __ Lzcntl(result, input);
3985 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3986 DCHECK(ToRegister(instr->context()).is(rsi));
3987 DCHECK(ToRegister(instr->function()).is(rdi));
3988 DCHECK(instr->HasPointerMap());
3990 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3991 if (known_function.is_null()) {
3992 LPointerMap* pointers = instr->pointer_map();
3993 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3994 ParameterCount count(instr->arity());
3995 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3997 CallKnownFunction(known_function,
3998 instr->hydrogen()->formal_parameter_count(),
3999 instr->arity(), instr);
4004 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4005 DCHECK(ToRegister(instr->context()).is(rsi));
4006 DCHECK(ToRegister(instr->function()).is(rdi));
4007 DCHECK(ToRegister(instr->result()).is(rax));
4009 int arity = instr->arity();
4010 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4011 if (instr->hydrogen()->HasVectorAndSlot()) {
4012 Register slot_register = ToRegister(instr->temp_slot());
4013 Register vector_register = ToRegister(instr->temp_vector());
4014 DCHECK(slot_register.is(rdx));
4015 DCHECK(vector_register.is(rbx));
4017 AllowDeferredHandleDereference vector_structure_check;
4018 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4019 int index = vector->GetIndex(instr->hydrogen()->slot());
4021 __ Move(vector_register, vector);
4022 __ Move(slot_register, Smi::FromInt(index));
4024 CallICState::CallType call_type =
4025 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4028 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4029 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4031 CallFunctionStub stub(isolate(), arity, flags);
4032 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4037 void LCodeGen::DoCallNew(LCallNew* instr) {
4038 DCHECK(ToRegister(instr->context()).is(rsi));
4039 DCHECK(ToRegister(instr->constructor()).is(rdi));
4040 DCHECK(ToRegister(instr->result()).is(rax));
4042 __ Set(rax, instr->arity());
4043 // No cell in ebx for construct type feedback in optimized code
4044 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4045 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4046 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4050 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4051 DCHECK(ToRegister(instr->context()).is(rsi));
4052 DCHECK(ToRegister(instr->constructor()).is(rdi));
4053 DCHECK(ToRegister(instr->result()).is(rax));
4055 __ Set(rax, instr->arity());
4056 if (instr->arity() == 1) {
4057 // We only need the allocation site for the case we have a length argument.
4058 // The case may bail out to the runtime, which will determine the correct
4059 // elements kind with the site.
4060 __ Move(rbx, instr->hydrogen()->site());
4062 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4065 ElementsKind kind = instr->hydrogen()->elements_kind();
4066 AllocationSiteOverrideMode override_mode =
4067 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4068 ? DISABLE_ALLOCATION_SITES
4071 if (instr->arity() == 0) {
4072 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4073 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4074 } else if (instr->arity() == 1) {
4076 if (IsFastPackedElementsKind(kind)) {
4078 // We might need a change here
4079 // look at the first argument
4080 __ movp(rcx, Operand(rsp, 0));
4082 __ j(zero, &packed_case, Label::kNear);
4084 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4085 ArraySingleArgumentConstructorStub stub(isolate(),
4088 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4089 __ jmp(&done, Label::kNear);
4090 __ bind(&packed_case);
4093 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4094 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4097 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4098 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4103 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4104 DCHECK(ToRegister(instr->context()).is(rsi));
4105 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4109 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4110 Register function = ToRegister(instr->function());
4111 Register code_object = ToRegister(instr->code_object());
4112 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4113 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4117 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4118 Register result = ToRegister(instr->result());
4119 Register base = ToRegister(instr->base_object());
4120 if (instr->offset()->IsConstantOperand()) {
4121 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4122 __ leap(result, Operand(base, ToInteger32(offset)));
4124 Register offset = ToRegister(instr->offset());
4125 __ leap(result, Operand(base, offset, times_1, 0));
4130 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4131 HStoreNamedField* hinstr = instr->hydrogen();
4132 Representation representation = instr->representation();
4134 HObjectAccess access = hinstr->access();
4135 int offset = access.offset();
4137 if (access.IsExternalMemory()) {
4138 DCHECK(!hinstr->NeedsWriteBarrier());
4139 Register value = ToRegister(instr->value());
4140 if (instr->object()->IsConstantOperand()) {
4141 DCHECK(value.is(rax));
4142 LConstantOperand* object = LConstantOperand::cast(instr->object());
4143 __ store_rax(ToExternalReference(object));
4145 Register object = ToRegister(instr->object());
4146 __ Store(MemOperand(object, offset), value, representation);
4151 Register object = ToRegister(instr->object());
4152 __ AssertNotSmi(object);
4154 DCHECK(!representation.IsSmi() ||
4155 !instr->value()->IsConstantOperand() ||
4156 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4157 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4158 DCHECK(access.IsInobject());
4159 DCHECK(!hinstr->has_transition());
4160 DCHECK(!hinstr->NeedsWriteBarrier());
4161 XMMRegister value = ToDoubleRegister(instr->value());
4162 __ movsd(FieldOperand(object, offset), value);
4166 if (hinstr->has_transition()) {
4167 Handle<Map> transition = hinstr->transition_map();
4168 AddDeprecationDependency(transition);
4169 if (!hinstr->NeedsWriteBarrierForMap()) {
4170 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4172 Register temp = ToRegister(instr->temp());
4173 __ Move(kScratchRegister, transition);
4174 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4175 // Update the write barrier for the map field.
4176 __ RecordWriteForMap(object,
4184 Register write_register = object;
4185 if (!access.IsInobject()) {
4186 write_register = ToRegister(instr->temp());
4187 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4190 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4191 hinstr->value()->representation().IsInteger32()) {
4192 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4193 if (FLAG_debug_code) {
4194 Register scratch = kScratchRegister;
4195 __ Load(scratch, FieldOperand(write_register, offset), representation);
4196 __ AssertSmi(scratch);
4198 // Store int value directly to upper half of the smi.
4199 STATIC_ASSERT(kSmiTag == 0);
4200 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4201 offset += kPointerSize / 2;
4202 representation = Representation::Integer32();
4205 Operand operand = FieldOperand(write_register, offset);
4207 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4208 DCHECK(access.IsInobject());
4209 XMMRegister value = ToDoubleRegister(instr->value());
4210 __ movsd(operand, value);
4212 } else if (instr->value()->IsRegister()) {
4213 Register value = ToRegister(instr->value());
4214 __ Store(operand, value, representation);
4216 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4217 if (IsInteger32Constant(operand_value)) {
4218 DCHECK(!hinstr->NeedsWriteBarrier());
4219 int32_t value = ToInteger32(operand_value);
4220 if (representation.IsSmi()) {
4221 __ Move(operand, Smi::FromInt(value));
4224 __ movl(operand, Immediate(value));
4227 } else if (IsExternalConstant(operand_value)) {
4228 DCHECK(!hinstr->NeedsWriteBarrier());
4229 ExternalReference ptr = ToExternalReference(operand_value);
4230 __ Move(kScratchRegister, ptr);
4231 __ movp(operand, kScratchRegister);
4233 Handle<Object> handle_value = ToHandle(operand_value);
4234 DCHECK(!hinstr->NeedsWriteBarrier());
4235 __ Move(operand, handle_value);
4239 if (hinstr->NeedsWriteBarrier()) {
4240 Register value = ToRegister(instr->value());
4241 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4242 // Update the write barrier for the object for in-object properties.
4243 __ RecordWriteField(write_register,
4248 EMIT_REMEMBERED_SET,
4249 hinstr->SmiCheckForWriteBarrier(),
4250 hinstr->PointersToHereCheckForValue());
4255 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4256 DCHECK(ToRegister(instr->context()).is(rsi));
4257 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4258 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4260 if (instr->hydrogen()->HasVectorAndSlot()) {
4261 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4264 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
4265 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4266 isolate(), instr->language_mode(),
4267 instr->hydrogen()->initialization_state()).code();
4268 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4272 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4273 DCHECK(ToRegister(instr->context()).is(rsi));
4274 DCHECK(ToRegister(instr->value())
4275 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4276 int const slot = instr->slot_index();
4277 int const depth = instr->depth();
4278 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4279 __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
4280 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4281 isolate(), depth, instr->language_mode())
4283 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4285 __ Push(Smi::FromInt(slot));
4286 __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
4287 __ CallRuntime(is_strict(instr->language_mode())
4288 ? Runtime::kStoreGlobalViaContext_Strict
4289 : Runtime::kStoreGlobalViaContext_Sloppy,
4295 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4296 Representation representation = instr->hydrogen()->length()->representation();
4297 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4298 DCHECK(representation.IsSmiOrInteger32());
4300 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4301 if (instr->length()->IsConstantOperand()) {
4302 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4303 Register index = ToRegister(instr->index());
4304 if (representation.IsSmi()) {
4305 __ Cmp(index, Smi::FromInt(length));
4307 __ cmpl(index, Immediate(length));
4309 cc = CommuteCondition(cc);
4310 } else if (instr->index()->IsConstantOperand()) {
4311 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4312 if (instr->length()->IsRegister()) {
4313 Register length = ToRegister(instr->length());
4314 if (representation.IsSmi()) {
4315 __ Cmp(length, Smi::FromInt(index));
4317 __ cmpl(length, Immediate(index));
4320 Operand length = ToOperand(instr->length());
4321 if (representation.IsSmi()) {
4322 __ Cmp(length, Smi::FromInt(index));
4324 __ cmpl(length, Immediate(index));
4328 Register index = ToRegister(instr->index());
4329 if (instr->length()->IsRegister()) {
4330 Register length = ToRegister(instr->length());
4331 if (representation.IsSmi()) {
4332 __ cmpp(length, index);
4334 __ cmpl(length, index);
4337 Operand length = ToOperand(instr->length());
4338 if (representation.IsSmi()) {
4339 __ cmpp(length, index);
4341 __ cmpl(length, index);
4345 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4347 __ j(NegateCondition(cc), &done, Label::kNear);
4351 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4356 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4357 ElementsKind elements_kind = instr->elements_kind();
4358 LOperand* key = instr->key();
4359 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4360 Register key_reg = ToRegister(key);
4361 Representation key_representation =
4362 instr->hydrogen()->key()->representation();
4363 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4364 __ SmiToInteger64(key_reg, key_reg);
4365 } else if (instr->hydrogen()->IsDehoisted()) {
4366 // Sign extend key because it could be a 32 bit negative value
4367 // and the dehoisted address computation happens in 64 bits
4368 __ movsxlq(key_reg, key_reg);
4371 Operand operand(BuildFastArrayOperand(
4374 instr->hydrogen()->key()->representation(),
4376 instr->base_offset()));
4378 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4379 elements_kind == FLOAT32_ELEMENTS) {
4380 XMMRegister value(ToDoubleRegister(instr->value()));
4381 __ cvtsd2ss(value, value);
4382 __ movss(operand, value);
4383 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4384 elements_kind == FLOAT64_ELEMENTS) {
4385 __ movsd(operand, ToDoubleRegister(instr->value()));
4387 Register value(ToRegister(instr->value()));
4388 switch (elements_kind) {
4389 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4390 case EXTERNAL_INT8_ELEMENTS:
4391 case EXTERNAL_UINT8_ELEMENTS:
4393 case UINT8_ELEMENTS:
4394 case UINT8_CLAMPED_ELEMENTS:
4395 __ movb(operand, value);
4397 case EXTERNAL_INT16_ELEMENTS:
4398 case EXTERNAL_UINT16_ELEMENTS:
4399 case INT16_ELEMENTS:
4400 case UINT16_ELEMENTS:
4401 __ movw(operand, value);
4403 case EXTERNAL_INT32_ELEMENTS:
4404 case EXTERNAL_UINT32_ELEMENTS:
4405 case INT32_ELEMENTS:
4406 case UINT32_ELEMENTS:
4407 __ movl(operand, value);
4409 case EXTERNAL_FLOAT32_ELEMENTS:
4410 case EXTERNAL_FLOAT64_ELEMENTS:
4411 case FLOAT32_ELEMENTS:
4412 case FLOAT64_ELEMENTS:
4414 case FAST_SMI_ELEMENTS:
4415 case FAST_DOUBLE_ELEMENTS:
4416 case FAST_HOLEY_ELEMENTS:
4417 case FAST_HOLEY_SMI_ELEMENTS:
4418 case FAST_HOLEY_DOUBLE_ELEMENTS:
4419 case DICTIONARY_ELEMENTS:
4420 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4421 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4429 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4430 XMMRegister value = ToDoubleRegister(instr->value());
4431 LOperand* key = instr->key();
4432 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4433 instr->hydrogen()->IsDehoisted()) {
4434 // Sign extend key because it could be a 32 bit negative value
4435 // and the dehoisted address computation happens in 64 bits
4436 __ movsxlq(ToRegister(key), ToRegister(key));
4438 if (instr->NeedsCanonicalization()) {
4439 XMMRegister xmm_scratch = double_scratch0();
4440 // Turn potential sNaN value into qNaN.
4441 __ xorps(xmm_scratch, xmm_scratch);
4442 __ subsd(value, xmm_scratch);
4445 Operand double_store_operand = BuildFastArrayOperand(
4448 instr->hydrogen()->key()->representation(),
4449 FAST_DOUBLE_ELEMENTS,
4450 instr->base_offset());
4452 __ movsd(double_store_operand, value);
4456 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4457 HStoreKeyed* hinstr = instr->hydrogen();
4458 LOperand* key = instr->key();
4459 int offset = instr->base_offset();
4460 Representation representation = hinstr->value()->representation();
4462 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4463 instr->hydrogen()->IsDehoisted()) {
4464 // Sign extend key because it could be a 32 bit negative value
4465 // and the dehoisted address computation happens in 64 bits
4466 __ movsxlq(ToRegister(key), ToRegister(key));
4468 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4469 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4470 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4471 if (FLAG_debug_code) {
4472 Register scratch = kScratchRegister;
4474 BuildFastArrayOperand(instr->elements(),
4476 instr->hydrogen()->key()->representation(),
4479 Representation::Smi());
4480 __ AssertSmi(scratch);
4482 // Store int value directly to upper half of the smi.
4483 STATIC_ASSERT(kSmiTag == 0);
4484 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4485 offset += kPointerSize / 2;
4489 BuildFastArrayOperand(instr->elements(),
4491 instr->hydrogen()->key()->representation(),
4494 if (instr->value()->IsRegister()) {
4495 __ Store(operand, ToRegister(instr->value()), representation);
4497 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4498 if (IsInteger32Constant(operand_value)) {
4499 int32_t value = ToInteger32(operand_value);
4500 if (representation.IsSmi()) {
4501 __ Move(operand, Smi::FromInt(value));
4504 __ movl(operand, Immediate(value));
4507 Handle<Object> handle_value = ToHandle(operand_value);
4508 __ Move(operand, handle_value);
4512 if (hinstr->NeedsWriteBarrier()) {
4513 Register elements = ToRegister(instr->elements());
4514 DCHECK(instr->value()->IsRegister());
4515 Register value = ToRegister(instr->value());
4516 DCHECK(!key->IsConstantOperand());
4517 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4518 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4519 // Compute address of modified element and store it into key register.
4520 Register key_reg(ToRegister(key));
4521 __ leap(key_reg, operand);
4522 __ RecordWrite(elements,
4526 EMIT_REMEMBERED_SET,
4528 hinstr->PointersToHereCheckForValue());
4533 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4534 if (instr->is_typed_elements()) {
4535 DoStoreKeyedExternalArray(instr);
4536 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4537 DoStoreKeyedFixedDoubleArray(instr);
4539 DoStoreKeyedFixedArray(instr);
4544 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4545 DCHECK(ToRegister(instr->context()).is(rsi));
4546 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4547 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4548 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4550 if (instr->hydrogen()->HasVectorAndSlot()) {
4551 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4554 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4555 isolate(), instr->language_mode(),
4556 instr->hydrogen()->initialization_state()).code();
4557 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4561 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4562 class DeferredMaybeGrowElements final : public LDeferredCode {
4564 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4565 : LDeferredCode(codegen), instr_(instr) {}
4566 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4567 LInstruction* instr() override { return instr_; }
4570 LMaybeGrowElements* instr_;
4573 Register result = rax;
4574 DeferredMaybeGrowElements* deferred =
4575 new (zone()) DeferredMaybeGrowElements(this, instr);
4576 LOperand* key = instr->key();
4577 LOperand* current_capacity = instr->current_capacity();
4579 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4580 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4581 DCHECK(key->IsConstantOperand() || key->IsRegister());
4582 DCHECK(current_capacity->IsConstantOperand() ||
4583 current_capacity->IsRegister());
4585 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4586 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4587 int32_t constant_capacity =
4588 ToInteger32(LConstantOperand::cast(current_capacity));
4589 if (constant_key >= constant_capacity) {
4591 __ jmp(deferred->entry());
4593 } else if (key->IsConstantOperand()) {
4594 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4595 __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
4596 __ j(less_equal, deferred->entry());
4597 } else if (current_capacity->IsConstantOperand()) {
4598 int32_t constant_capacity =
4599 ToInteger32(LConstantOperand::cast(current_capacity));
4600 __ cmpl(ToRegister(key), Immediate(constant_capacity));
4601 __ j(greater_equal, deferred->entry());
4603 __ cmpl(ToRegister(key), ToRegister(current_capacity));
4604 __ j(greater_equal, deferred->entry());
4607 if (instr->elements()->IsRegister()) {
4608 __ movp(result, ToRegister(instr->elements()));
4610 __ movp(result, ToOperand(instr->elements()));
4613 __ bind(deferred->exit());
4617 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4618 // TODO(3095996): Get rid of this. For now, we need to make the
4619 // result register contain a valid pointer because it is already
4620 // contained in the register pointer map.
4621 Register result = rax;
4622 __ Move(result, Smi::FromInt(0));
4624 // We have to call a stub.
4626 PushSafepointRegistersScope scope(this);
4627 if (instr->object()->IsConstantOperand()) {
4628 LConstantOperand* constant_object =
4629 LConstantOperand::cast(instr->object());
4630 if (IsSmiConstant(constant_object)) {
4631 Smi* immediate = ToSmi(constant_object);
4632 __ Move(result, immediate);
4634 Handle<Object> handle_value = ToHandle(constant_object);
4635 __ Move(result, handle_value);
4637 } else if (instr->object()->IsRegister()) {
4638 __ Move(result, ToRegister(instr->object()));
4640 __ movp(result, ToOperand(instr->object()));
4643 LOperand* key = instr->key();
4644 if (key->IsConstantOperand()) {
4645 __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
4647 __ Move(rbx, ToRegister(key));
4648 __ Integer32ToSmi(rbx, rbx);
4651 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4652 instr->hydrogen()->kind());
4654 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4655 __ StoreToSafepointRegisterSlot(result, result);
4658 // Deopt on smi, which means the elements array changed to dictionary mode.
4659 Condition is_smi = __ CheckSmi(result);
4660 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
4664 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4665 Register object_reg = ToRegister(instr->object());
4667 Handle<Map> from_map = instr->original_map();
4668 Handle<Map> to_map = instr->transitioned_map();
4669 ElementsKind from_kind = instr->from_kind();
4670 ElementsKind to_kind = instr->to_kind();
4672 Label not_applicable;
4673 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4674 __ j(not_equal, ¬_applicable);
4675 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4676 Register new_map_reg = ToRegister(instr->new_map_temp());
4677 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4678 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4680 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4683 DCHECK(object_reg.is(rax));
4684 DCHECK(ToRegister(instr->context()).is(rsi));
4685 PushSafepointRegistersScope scope(this);
4686 __ Move(rbx, to_map);
4687 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4688 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4690 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4692 __ bind(¬_applicable);
4696 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4697 Register object = ToRegister(instr->object());
4698 Register temp = ToRegister(instr->temp());
4699 Label no_memento_found;
4700 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4701 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4702 __ bind(&no_memento_found);
4706 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4707 DCHECK(ToRegister(instr->context()).is(rsi));
4708 DCHECK(ToRegister(instr->left()).is(rdx));
4709 DCHECK(ToRegister(instr->right()).is(rax));
4710 StringAddStub stub(isolate(),
4711 instr->hydrogen()->flags(),
4712 instr->hydrogen()->pretenure_flag());
4713 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4717 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4718 class DeferredStringCharCodeAt final : public LDeferredCode {
4720 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4721 : LDeferredCode(codegen), instr_(instr) { }
4722 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4723 LInstruction* instr() override { return instr_; }
4726 LStringCharCodeAt* instr_;
4729 DeferredStringCharCodeAt* deferred =
4730 new(zone()) DeferredStringCharCodeAt(this, instr);
4732 StringCharLoadGenerator::Generate(masm(),
4733 ToRegister(instr->string()),
4734 ToRegister(instr->index()),
4735 ToRegister(instr->result()),
4737 __ bind(deferred->exit());
4741 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4742 Register string = ToRegister(instr->string());
4743 Register result = ToRegister(instr->result());
4745 // TODO(3095996): Get rid of this. For now, we need to make the
4746 // result register contain a valid pointer because it is already
4747 // contained in the register pointer map.
4750 PushSafepointRegistersScope scope(this);
4752 // Push the index as a smi. This is safe because of the checks in
4753 // DoStringCharCodeAt above.
4754 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4755 if (instr->index()->IsConstantOperand()) {
4756 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4757 __ Push(Smi::FromInt(const_index));
4759 Register index = ToRegister(instr->index());
4760 __ Integer32ToSmi(index, index);
4763 CallRuntimeFromDeferred(
4764 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4766 __ SmiToInteger32(rax, rax);
4767 __ StoreToSafepointRegisterSlot(result, rax);
4771 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4772 class DeferredStringCharFromCode final : public LDeferredCode {
4774 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4775 : LDeferredCode(codegen), instr_(instr) { }
4776 void Generate() override {
4777 codegen()->DoDeferredStringCharFromCode(instr_);
4779 LInstruction* instr() override { return instr_; }
4782 LStringCharFromCode* instr_;
4785 DeferredStringCharFromCode* deferred =
4786 new(zone()) DeferredStringCharFromCode(this, instr);
4788 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4789 Register char_code = ToRegister(instr->char_code());
4790 Register result = ToRegister(instr->result());
4791 DCHECK(!char_code.is(result));
4793 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4794 __ j(above, deferred->entry());
4795 __ movsxlq(char_code, char_code);
4796 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4797 __ movp(result, FieldOperand(result,
4798 char_code, times_pointer_size,
4799 FixedArray::kHeaderSize));
4800 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4801 __ j(equal, deferred->entry());
4802 __ bind(deferred->exit());
4806 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4807 Register char_code = ToRegister(instr->char_code());
4808 Register result = ToRegister(instr->result());
4810 // TODO(3095996): Get rid of this. For now, we need to make the
4811 // result register contain a valid pointer because it is already
4812 // contained in the register pointer map.
4815 PushSafepointRegistersScope scope(this);
4816 __ Integer32ToSmi(char_code, char_code);
4818 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4819 __ StoreToSafepointRegisterSlot(result, rax);
4823 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4824 LOperand* input = instr->value();
4825 DCHECK(input->IsRegister() || input->IsStackSlot());
4826 LOperand* output = instr->result();
4827 DCHECK(output->IsDoubleRegister());
4828 if (input->IsRegister()) {
4829 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4831 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4836 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4837 LOperand* input = instr->value();
4838 LOperand* output = instr->result();
4840 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4844 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4845 class DeferredNumberTagI final : public LDeferredCode {
4847 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4848 : LDeferredCode(codegen), instr_(instr) { }
4849 void Generate() override {
4850 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4851 instr_->temp2(), SIGNED_INT32);
4853 LInstruction* instr() override { return instr_; }
4856 LNumberTagI* instr_;
4859 LOperand* input = instr->value();
4860 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4861 Register reg = ToRegister(input);
4863 if (SmiValuesAre32Bits()) {
4864 __ Integer32ToSmi(reg, reg);
4866 DCHECK(SmiValuesAre31Bits());
4867 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4868 __ Integer32ToSmi(reg, reg);
4869 __ j(overflow, deferred->entry());
4870 __ bind(deferred->exit());
4875 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4876 class DeferredNumberTagU final : public LDeferredCode {
4878 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4879 : LDeferredCode(codegen), instr_(instr) { }
4880 void Generate() override {
4881 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4882 instr_->temp2(), UNSIGNED_INT32);
4884 LInstruction* instr() override { return instr_; }
4887 LNumberTagU* instr_;
4890 LOperand* input = instr->value();
4891 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4892 Register reg = ToRegister(input);
4894 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4895 __ cmpl(reg, Immediate(Smi::kMaxValue));
4896 __ j(above, deferred->entry());
4897 __ Integer32ToSmi(reg, reg);
4898 __ bind(deferred->exit());
4902 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4906 IntegerSignedness signedness) {
4908 Register reg = ToRegister(value);
4909 Register tmp = ToRegister(temp1);
4910 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4912 // Load value into temp_xmm which will be preserved across potential call to
4913 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4914 // XMM registers on x64).
4915 if (signedness == SIGNED_INT32) {
4916 DCHECK(SmiValuesAre31Bits());
4917 // There was overflow, so bits 30 and 31 of the original integer
4918 // disagree. Try to allocate a heap number in new space and store
4919 // the value in there. If that fails, call the runtime system.
4920 __ SmiToInteger32(reg, reg);
4921 __ xorl(reg, Immediate(0x80000000));
4922 __ cvtlsi2sd(temp_xmm, reg);
4924 DCHECK(signedness == UNSIGNED_INT32);
4925 __ LoadUint32(temp_xmm, reg);
4928 if (FLAG_inline_new) {
4929 __ AllocateHeapNumber(reg, tmp, &slow);
4930 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4933 // Slow case: Call the runtime system to do the number allocation.
4936 // Put a valid pointer value in the stack slot where the result
4937 // register is stored, as this register is in the pointer map, but contains
4938 // an integer value.
4941 // Preserve the value of all registers.
4942 PushSafepointRegistersScope scope(this);
4944 // NumberTagIU uses the context from the frame, rather than
4945 // the environment's HContext or HInlinedContext value.
4946 // They only call Runtime::kAllocateHeapNumber.
4947 // The corresponding HChange instructions are added in a phase that does
4948 // not have easy access to the local context.
4949 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4950 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4951 RecordSafepointWithRegisters(
4952 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4953 __ StoreToSafepointRegisterSlot(reg, rax);
4956 // Done. Put the value in temp_xmm into the value of the allocated heap
4959 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4963 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4964 class DeferredNumberTagD final : public LDeferredCode {
4966 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4967 : LDeferredCode(codegen), instr_(instr) { }
4968 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4969 LInstruction* instr() override { return instr_; }
4972 LNumberTagD* instr_;
4975 XMMRegister input_reg = ToDoubleRegister(instr->value());
4976 Register reg = ToRegister(instr->result());
4977 Register tmp = ToRegister(instr->temp());
4979 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4980 if (FLAG_inline_new) {
4981 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4983 __ jmp(deferred->entry());
4985 __ bind(deferred->exit());
4986 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4990 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4991 // TODO(3095996): Get rid of this. For now, we need to make the
4992 // result register contain a valid pointer because it is already
4993 // contained in the register pointer map.
4994 Register reg = ToRegister(instr->result());
4995 __ Move(reg, Smi::FromInt(0));
4998 PushSafepointRegistersScope scope(this);
4999 // NumberTagD uses the context from the frame, rather than
5000 // the environment's HContext or HInlinedContext value.
5001 // They only call Runtime::kAllocateHeapNumber.
5002 // The corresponding HChange instructions are added in a phase that does
5003 // not have easy access to the local context.
5004 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5005 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5006 RecordSafepointWithRegisters(
5007 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5008 __ movp(kScratchRegister, rax);
5010 __ movp(reg, kScratchRegister);
5014 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5015 HChange* hchange = instr->hydrogen();
5016 Register input = ToRegister(instr->value());
5017 Register output = ToRegister(instr->result());
5018 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5019 hchange->value()->CheckFlag(HValue::kUint32)) {
5020 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
5021 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
5023 __ Integer32ToSmi(output, input);
5024 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5025 !hchange->value()->CheckFlag(HValue::kUint32)) {
5026 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5031 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5032 DCHECK(instr->value()->Equals(instr->result()));
5033 Register input = ToRegister(instr->value());
5034 if (instr->needs_check()) {
5035 Condition is_smi = __ CheckSmi(input);
5036 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
5038 __ AssertSmi(input);
5040 __ SmiToInteger32(input, input);
5044 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
5045 XMMRegister result_reg, NumberUntagDMode mode) {
5046 bool can_convert_undefined_to_nan =
5047 instr->hydrogen()->can_convert_undefined_to_nan();
5048 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5050 Label convert, load_smi, done;
5052 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5054 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5056 // Heap number map check.
5057 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5058 Heap::kHeapNumberMapRootIndex);
5060 // On x64 it is safe to load at heap number offset before evaluating the map
5061 // check, since all heap objects are at least two words long.
5062 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5064 if (can_convert_undefined_to_nan) {
5065 __ j(not_equal, &convert, Label::kNear);
5067 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5070 if (deoptimize_on_minus_zero) {
5071 XMMRegister xmm_scratch = double_scratch0();
5072 __ xorps(xmm_scratch, xmm_scratch);
5073 __ ucomisd(xmm_scratch, result_reg);
5074 __ j(not_equal, &done, Label::kNear);
5075 __ movmskpd(kScratchRegister, result_reg);
5076 __ testq(kScratchRegister, Immediate(1));
5077 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5079 __ jmp(&done, Label::kNear);
5081 if (can_convert_undefined_to_nan) {
5084 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5085 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5086 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5088 __ pcmpeqd(result_reg, result_reg);
5089 __ jmp(&done, Label::kNear);
5092 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5095 // Smi to XMM conversion
5097 __ SmiToInteger32(kScratchRegister, input_reg);
5098 __ Cvtlsi2sd(result_reg, kScratchRegister);
5103 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5104 Register input_reg = ToRegister(instr->value());
5106 if (instr->truncating()) {
5107 Label no_heap_number, check_bools, check_false;
5109 // Heap number map check.
5110 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5111 Heap::kHeapNumberMapRootIndex);
5112 __ j(not_equal, &no_heap_number, Label::kNear);
5113 __ TruncateHeapNumberToI(input_reg, input_reg);
5116 __ bind(&no_heap_number);
5117 // Check for Oddballs. Undefined/False is converted to zero and True to one
5118 // for truncating conversions.
5119 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5120 __ j(not_equal, &check_bools, Label::kNear);
5121 __ Set(input_reg, 0);
5124 __ bind(&check_bools);
5125 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5126 __ j(not_equal, &check_false, Label::kNear);
5127 __ Set(input_reg, 1);
5130 __ bind(&check_false);
5131 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5132 DeoptimizeIf(not_equal, instr,
5133 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5134 __ Set(input_reg, 0);
5136 XMMRegister scratch = ToDoubleRegister(instr->temp());
5137 DCHECK(!scratch.is(xmm0));
5138 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5139 Heap::kHeapNumberMapRootIndex);
5140 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5141 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5142 __ cvttsd2si(input_reg, xmm0);
5143 __ Cvtlsi2sd(scratch, input_reg);
5144 __ ucomisd(xmm0, scratch);
5145 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5146 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5147 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5148 __ testl(input_reg, input_reg);
5149 __ j(not_zero, done);
5150 __ movmskpd(input_reg, xmm0);
5151 __ andl(input_reg, Immediate(1));
5152 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5158 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5159 class DeferredTaggedToI final : public LDeferredCode {
5161 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5162 : LDeferredCode(codegen), instr_(instr) { }
5163 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
5164 LInstruction* instr() override { return instr_; }
5170 LOperand* input = instr->value();
5171 DCHECK(input->IsRegister());
5172 DCHECK(input->Equals(instr->result()));
5173 Register input_reg = ToRegister(input);
5175 if (instr->hydrogen()->value()->representation().IsSmi()) {
5176 __ SmiToInteger32(input_reg, input_reg);
5178 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5179 __ JumpIfNotSmi(input_reg, deferred->entry());
5180 __ SmiToInteger32(input_reg, input_reg);
5181 __ bind(deferred->exit());
5186 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5187 LOperand* input = instr->value();
5188 DCHECK(input->IsRegister());
5189 LOperand* result = instr->result();
5190 DCHECK(result->IsDoubleRegister());
5192 Register input_reg = ToRegister(input);
5193 XMMRegister result_reg = ToDoubleRegister(result);
5195 HValue* value = instr->hydrogen()->value();
5196 NumberUntagDMode mode = value->representation().IsSmi()
5197 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5199 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5203 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5204 LOperand* input = instr->value();
5205 DCHECK(input->IsDoubleRegister());
5206 LOperand* result = instr->result();
5207 DCHECK(result->IsRegister());
5209 XMMRegister input_reg = ToDoubleRegister(input);
5210 Register result_reg = ToRegister(result);
5212 if (instr->truncating()) {
5213 __ TruncateDoubleToI(result_reg, input_reg);
5215 Label lost_precision, is_nan, minus_zero, done;
5216 XMMRegister xmm_scratch = double_scratch0();
5217 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5218 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5219 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
5220 &is_nan, &minus_zero, dist);
5221 __ jmp(&done, dist);
5222 __ bind(&lost_precision);
5223 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5225 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5226 __ bind(&minus_zero);
5227 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5233 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5234 LOperand* input = instr->value();
5235 DCHECK(input->IsDoubleRegister());
5236 LOperand* result = instr->result();
5237 DCHECK(result->IsRegister());
5239 XMMRegister input_reg = ToDoubleRegister(input);
5240 Register result_reg = ToRegister(result);
5242 Label lost_precision, is_nan, minus_zero, done;
5243 XMMRegister xmm_scratch = double_scratch0();
5244 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5245 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5246 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
5248 __ jmp(&done, dist);
5249 __ bind(&lost_precision);
5250 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5252 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5253 __ bind(&minus_zero);
5254 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5256 __ Integer32ToSmi(result_reg, result_reg);
5257 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5261 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5262 LOperand* input = instr->value();
5263 Condition cc = masm()->CheckSmi(ToRegister(input));
5264 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
5268 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5269 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5270 LOperand* input = instr->value();
5271 Condition cc = masm()->CheckSmi(ToRegister(input));
5272 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5277 void LCodeGen::DoCheckArrayBufferNotNeutered(
5278 LCheckArrayBufferNotNeutered* instr) {
5279 Register view = ToRegister(instr->view());
5281 __ movp(kScratchRegister,
5282 FieldOperand(view, JSArrayBufferView::kBufferOffset));
5283 __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
5284 Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
5285 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5289 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5290 Register input = ToRegister(instr->value());
5292 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5294 if (instr->hydrogen()->is_interval_check()) {
5297 instr->hydrogen()->GetCheckInterval(&first, &last);
5299 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5300 Immediate(static_cast<int8_t>(first)));
5302 // If there is only one type in the interval check for equality.
5303 if (first == last) {
5304 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5306 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5307 // Omit check for the last type.
5308 if (last != LAST_TYPE) {
5309 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5310 Immediate(static_cast<int8_t>(last)));
5311 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5317 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5319 if (base::bits::IsPowerOfTwo32(mask)) {
5320 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5321 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5323 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5324 Deoptimizer::kWrongInstanceType);
5326 __ movzxbl(kScratchRegister,
5327 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5328 __ andb(kScratchRegister, Immediate(mask));
5329 __ cmpb(kScratchRegister, Immediate(tag));
5330 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5336 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5337 Register reg = ToRegister(instr->value());
5338 __ Cmp(reg, instr->hydrogen()->object().handle());
5339 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5343 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5345 PushSafepointRegistersScope scope(this);
5348 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5349 RecordSafepointWithRegisters(
5350 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5352 __ testp(rax, Immediate(kSmiTagMask));
5354 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5358 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5359 class DeferredCheckMaps final : public LDeferredCode {
5361 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5362 : LDeferredCode(codegen), instr_(instr), object_(object) {
5363 SetExit(check_maps());
5365 void Generate() override {
5366 codegen()->DoDeferredInstanceMigration(instr_, object_);
5368 Label* check_maps() { return &check_maps_; }
5369 LInstruction* instr() override { return instr_; }
5377 if (instr->hydrogen()->IsStabilityCheck()) {
5378 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5379 for (int i = 0; i < maps->size(); ++i) {
5380 AddStabilityDependency(maps->at(i).handle());
5385 LOperand* input = instr->value();
5386 DCHECK(input->IsRegister());
5387 Register reg = ToRegister(input);
5389 DeferredCheckMaps* deferred = NULL;
5390 if (instr->hydrogen()->HasMigrationTarget()) {
5391 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5392 __ bind(deferred->check_maps());
5395 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5397 for (int i = 0; i < maps->size() - 1; i++) {
5398 Handle<Map> map = maps->at(i).handle();
5399 __ CompareMap(reg, map);
5400 __ j(equal, &success, Label::kNear);
5403 Handle<Map> map = maps->at(maps->size() - 1).handle();
5404 __ CompareMap(reg, map);
5405 if (instr->hydrogen()->HasMigrationTarget()) {
5406 __ j(not_equal, deferred->entry());
5408 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5415 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5416 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5417 XMMRegister xmm_scratch = double_scratch0();
5418 Register result_reg = ToRegister(instr->result());
5419 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5423 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5424 DCHECK(instr->unclamped()->Equals(instr->result()));
5425 Register value_reg = ToRegister(instr->result());
5426 __ ClampUint8(value_reg);
5430 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5431 DCHECK(instr->unclamped()->Equals(instr->result()));
5432 Register input_reg = ToRegister(instr->unclamped());
5433 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5434 XMMRegister xmm_scratch = double_scratch0();
5435 Label is_smi, done, heap_number;
5436 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5437 __ JumpIfSmi(input_reg, &is_smi, dist);
5439 // Check for heap number
5440 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5441 factory()->heap_number_map());
5442 __ j(equal, &heap_number, Label::kNear);
5444 // Check for undefined. Undefined is converted to zero for clamping
5446 __ Cmp(input_reg, factory()->undefined_value());
5447 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5448 __ xorl(input_reg, input_reg);
5449 __ jmp(&done, Label::kNear);
5452 __ bind(&heap_number);
5453 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5454 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5455 __ jmp(&done, Label::kNear);
5459 __ SmiToInteger32(input_reg, input_reg);
5460 __ ClampUint8(input_reg);
5466 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5467 XMMRegister value_reg = ToDoubleRegister(instr->value());
5468 Register result_reg = ToRegister(instr->result());
5469 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5470 __ movq(result_reg, value_reg);
5471 __ shrq(result_reg, Immediate(32));
5473 __ movd(result_reg, value_reg);
5478 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5479 Register hi_reg = ToRegister(instr->hi());
5480 Register lo_reg = ToRegister(instr->lo());
5481 XMMRegister result_reg = ToDoubleRegister(instr->result());
5482 XMMRegister xmm_scratch = double_scratch0();
5483 __ movd(result_reg, hi_reg);
5484 __ psllq(result_reg, 32);
5485 __ movd(xmm_scratch, lo_reg);
5486 __ orps(result_reg, xmm_scratch);
5490 void LCodeGen::DoAllocate(LAllocate* instr) {
5491 class DeferredAllocate final : public LDeferredCode {
5493 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5494 : LDeferredCode(codegen), instr_(instr) { }
5495 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5496 LInstruction* instr() override { return instr_; }
5502 DeferredAllocate* deferred =
5503 new(zone()) DeferredAllocate(this, instr);
5505 Register result = ToRegister(instr->result());
5506 Register temp = ToRegister(instr->temp());
5508 // Allocate memory for the object.
5509 AllocationFlags flags = TAG_OBJECT;
5510 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5511 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5513 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5514 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5515 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5518 if (instr->size()->IsConstantOperand()) {
5519 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5520 if (size <= Page::kMaxRegularHeapObjectSize) {
5521 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5523 __ jmp(deferred->entry());
5526 Register size = ToRegister(instr->size());
5527 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5530 __ bind(deferred->exit());
5532 if (instr->hydrogen()->MustPrefillWithFiller()) {
5533 if (instr->size()->IsConstantOperand()) {
5534 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5535 __ movl(temp, Immediate((size / kPointerSize) - 1));
5537 temp = ToRegister(instr->size());
5538 __ sarp(temp, Immediate(kPointerSizeLog2));
5543 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5544 isolate()->factory()->one_pointer_filler_map());
5546 __ j(not_zero, &loop);
5551 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5552 Register result = ToRegister(instr->result());
5554 // TODO(3095996): Get rid of this. For now, we need to make the
5555 // result register contain a valid pointer because it is already
5556 // contained in the register pointer map.
5557 __ Move(result, Smi::FromInt(0));
5559 PushSafepointRegistersScope scope(this);
5560 if (instr->size()->IsRegister()) {
5561 Register size = ToRegister(instr->size());
5562 DCHECK(!size.is(result));
5563 __ Integer32ToSmi(size, size);
5566 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5567 __ Push(Smi::FromInt(size));
5571 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5572 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5573 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5575 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5577 __ Push(Smi::FromInt(flags));
5579 CallRuntimeFromDeferred(
5580 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5581 __ StoreToSafepointRegisterSlot(result, rax);
5585 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5586 DCHECK(ToRegister(instr->value()).is(rax));
5588 CallRuntime(Runtime::kToFastProperties, 1, instr);
5592 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5593 DCHECK(ToRegister(instr->context()).is(rsi));
5595 // Registers will be used as follows:
5596 // rcx = literals array.
5597 // rbx = regexp literal.
5598 // rax = regexp literal clone.
5599 int literal_offset =
5600 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5601 __ Move(rcx, instr->hydrogen()->literals());
5602 __ movp(rbx, FieldOperand(rcx, literal_offset));
5603 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5604 __ j(not_equal, &materialized, Label::kNear);
5606 // Create regexp literal using runtime function
5607 // Result will be in rax.
5609 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5610 __ Push(instr->hydrogen()->pattern());
5611 __ Push(instr->hydrogen()->flags());
5612 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5615 __ bind(&materialized);
5616 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5617 Label allocated, runtime_allocate;
5618 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5619 __ jmp(&allocated, Label::kNear);
5621 __ bind(&runtime_allocate);
5623 __ Push(Smi::FromInt(size));
5624 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5627 __ bind(&allocated);
5628 // Copy the content into the newly allocated memory.
5629 // (Unroll copy loop once for better throughput).
5630 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5631 __ movp(rdx, FieldOperand(rbx, i));
5632 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5633 __ movp(FieldOperand(rax, i), rdx);
5634 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5636 if ((size % (2 * kPointerSize)) != 0) {
5637 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5638 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5643 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5644 DCHECK(ToRegister(instr->context()).is(rsi));
5645 // Use the fast case closure allocation code that allocates in new
5646 // space for nested functions that don't need literals cloning.
5647 bool pretenure = instr->hydrogen()->pretenure();
5648 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5649 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5650 instr->hydrogen()->kind());
5651 __ Move(rbx, instr->hydrogen()->shared_info());
5652 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5655 __ Push(instr->hydrogen()->shared_info());
5656 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5657 Heap::kFalseValueRootIndex);
5658 CallRuntime(Runtime::kNewClosure, 3, instr);
5663 void LCodeGen::DoTypeof(LTypeof* instr) {
5664 DCHECK(ToRegister(instr->context()).is(rsi));
5665 DCHECK(ToRegister(instr->value()).is(rbx));
5667 Register value_register = ToRegister(instr->value());
5668 __ JumpIfNotSmi(value_register, &do_call);
5669 __ Move(rax, isolate()->factory()->number_string());
5672 TypeofStub stub(isolate());
5673 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5678 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5679 DCHECK(!operand->IsDoubleRegister());
5680 if (operand->IsConstantOperand()) {
5681 __ Push(ToHandle(LConstantOperand::cast(operand)));
5682 } else if (operand->IsRegister()) {
5683 __ Push(ToRegister(operand));
5685 __ Push(ToOperand(operand));
5690 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5691 Register input = ToRegister(instr->value());
5692 Condition final_branch_condition = EmitTypeofIs(instr, input);
5693 if (final_branch_condition != no_condition) {
5694 EmitBranch(instr, final_branch_condition);
5699 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5700 Label* true_label = instr->TrueLabel(chunk_);
5701 Label* false_label = instr->FalseLabel(chunk_);
5702 Handle<String> type_name = instr->type_literal();
5703 int left_block = instr->TrueDestination(chunk_);
5704 int right_block = instr->FalseDestination(chunk_);
5705 int next_block = GetNextEmittedBlock();
5707 Label::Distance true_distance = left_block == next_block ? Label::kNear
5709 Label::Distance false_distance = right_block == next_block ? Label::kNear
5711 Condition final_branch_condition = no_condition;
5712 Factory* factory = isolate()->factory();
5713 if (String::Equals(type_name, factory->number_string())) {
5714 __ JumpIfSmi(input, true_label, true_distance);
5715 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5716 Heap::kHeapNumberMapRootIndex);
5718 final_branch_condition = equal;
5720 } else if (String::Equals(type_name, factory->string_string())) {
5721 __ JumpIfSmi(input, false_label, false_distance);
5722 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5723 __ j(above_equal, false_label, false_distance);
5724 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5725 Immediate(1 << Map::kIsUndetectable));
5726 final_branch_condition = zero;
5728 } else if (String::Equals(type_name, factory->symbol_string())) {
5729 __ JumpIfSmi(input, false_label, false_distance);
5730 __ CmpObjectType(input, SYMBOL_TYPE, input);
5731 final_branch_condition = equal;
5733 } else if (String::Equals(type_name, factory->boolean_string())) {
5734 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5735 __ j(equal, true_label, true_distance);
5736 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5737 final_branch_condition = equal;
5739 } else if (String::Equals(type_name, factory->undefined_string())) {
5740 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5741 __ j(equal, true_label, true_distance);
5742 __ JumpIfSmi(input, false_label, false_distance);
5743 // Check for undetectable objects => true.
5744 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5745 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5746 Immediate(1 << Map::kIsUndetectable));
5747 final_branch_condition = not_zero;
5749 } else if (String::Equals(type_name, factory->function_string())) {
5750 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5751 __ JumpIfSmi(input, false_label, false_distance);
5752 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5753 __ j(equal, true_label, true_distance);
5754 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5755 final_branch_condition = equal;
5757 } else if (String::Equals(type_name, factory->object_string())) {
5758 __ JumpIfSmi(input, false_label, false_distance);
5759 __ CompareRoot(input, Heap::kNullValueRootIndex);
5760 __ j(equal, true_label, true_distance);
5761 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5762 __ j(below, false_label, false_distance);
5763 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5764 __ j(above, false_label, false_distance);
5765 // Check for undetectable objects => false.
5766 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5767 Immediate(1 << Map::kIsUndetectable));
5768 final_branch_condition = zero;
5770 } else if (String::Equals(type_name, factory->float32x4_string())) {
5771 __ JumpIfSmi(input, false_label, false_distance);
5772 __ CmpObjectType(input, FLOAT32X4_TYPE, input);
5773 final_branch_condition = equal;
5776 __ jmp(false_label, false_distance);
5779 return final_branch_condition;
5783 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5784 Register temp = ToRegister(instr->temp());
5786 EmitIsConstructCall(temp);
5787 EmitBranch(instr, equal);
5791 void LCodeGen::EmitIsConstructCall(Register temp) {
5792 // Get the frame pointer for the calling frame.
5793 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5795 // Skip the arguments adaptor frame if it exists.
5796 Label check_frame_marker;
5797 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5798 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5799 __ j(not_equal, &check_frame_marker, Label::kNear);
5800 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5802 // Check the marker in the calling frame.
5803 __ bind(&check_frame_marker);
5804 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5805 Smi::FromInt(StackFrame::CONSTRUCT));
5809 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5810 if (!info()->IsStub()) {
5811 // Ensure that we have enough space after the previous lazy-bailout
5812 // instruction for patching the code here.
5813 int current_pc = masm()->pc_offset();
5814 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5815 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5816 __ Nop(padding_size);
5819 last_lazy_deopt_pc_ = masm()->pc_offset();
5823 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5824 last_lazy_deopt_pc_ = masm()->pc_offset();
5825 DCHECK(instr->HasEnvironment());
5826 LEnvironment* env = instr->environment();
5827 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5828 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5832 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5833 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5834 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5835 // needed return address), even though the implementation of LAZY and EAGER is
5836 // now identical. When LAZY is eventually completely folded into EAGER, remove
5837 // the special case below.
5838 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5839 type = Deoptimizer::LAZY;
5841 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5845 void LCodeGen::DoDummy(LDummy* instr) {
5846 // Nothing to see here, move on!
5850 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5851 // Nothing to see here, move on!
5855 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5856 PushSafepointRegistersScope scope(this);
5857 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5858 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5859 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5860 DCHECK(instr->HasEnvironment());
5861 LEnvironment* env = instr->environment();
5862 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5866 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5867 class DeferredStackCheck final : public LDeferredCode {
5869 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5870 : LDeferredCode(codegen), instr_(instr) { }
5871 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5872 LInstruction* instr() override { return instr_; }
5875 LStackCheck* instr_;
5878 DCHECK(instr->HasEnvironment());
5879 LEnvironment* env = instr->environment();
5880 // There is no LLazyBailout instruction for stack-checks. We have to
5881 // prepare for lazy deoptimization explicitly here.
5882 if (instr->hydrogen()->is_function_entry()) {
5883 // Perform stack overflow check.
5885 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5886 __ j(above_equal, &done, Label::kNear);
5888 DCHECK(instr->context()->IsRegister());
5889 DCHECK(ToRegister(instr->context()).is(rsi));
5890 CallCode(isolate()->builtins()->StackCheck(),
5891 RelocInfo::CODE_TARGET,
5895 DCHECK(instr->hydrogen()->is_backwards_branch());
5896 // Perform stack overflow check if this goto needs it before jumping.
5897 DeferredStackCheck* deferred_stack_check =
5898 new(zone()) DeferredStackCheck(this, instr);
5899 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5900 __ j(below, deferred_stack_check->entry());
5901 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5902 __ bind(instr->done_label());
5903 deferred_stack_check->SetExit(instr->done_label());
5904 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5905 // Don't record a deoptimization index for the safepoint here.
5906 // This will be done explicitly when emitting call and the safepoint in
5907 // the deferred code.
5912 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5913 // This is a pseudo-instruction that ensures that the environment here is
5914 // properly registered for deoptimization and records the assembler's PC
5916 LEnvironment* environment = instr->environment();
5918 // If the environment were already registered, we would have no way of
5919 // backpatching it with the spill slot operands.
5920 DCHECK(!environment->HasBeenRegistered());
5921 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5923 GenerateOsrPrologue();
5927 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5928 DCHECK(ToRegister(instr->context()).is(rsi));
5930 Condition cc = masm()->CheckSmi(rax);
5931 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5933 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5934 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5935 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5937 Label use_cache, call_runtime;
5938 Register null_value = rdi;
5939 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5940 __ CheckEnumCache(null_value, &call_runtime);
5942 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5943 __ jmp(&use_cache, Label::kNear);
5945 // Get the set of properties to enumerate.
5946 __ bind(&call_runtime);
5948 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5950 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5951 Heap::kMetaMapRootIndex);
5952 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5953 __ bind(&use_cache);
5957 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5958 Register map = ToRegister(instr->map());
5959 Register result = ToRegister(instr->result());
5960 Label load_cache, done;
5961 __ EnumLength(result, map);
5962 __ Cmp(result, Smi::FromInt(0));
5963 __ j(not_equal, &load_cache, Label::kNear);
5964 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5965 __ jmp(&done, Label::kNear);
5966 __ bind(&load_cache);
5967 __ LoadInstanceDescriptors(map, result);
5969 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5971 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5973 Condition cc = masm()->CheckSmi(result);
5974 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
5978 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5979 Register object = ToRegister(instr->value());
5980 __ cmpp(ToRegister(instr->map()),
5981 FieldOperand(object, HeapObject::kMapOffset));
5982 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5986 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5989 PushSafepointRegistersScope scope(this);
5993 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5994 RecordSafepointWithRegisters(
5995 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5996 __ StoreToSafepointRegisterSlot(object, rax);
6000 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6001 class DeferredLoadMutableDouble final : public LDeferredCode {
6003 DeferredLoadMutableDouble(LCodeGen* codegen,
6004 LLoadFieldByIndex* instr,
6007 : LDeferredCode(codegen),
6012 void Generate() override {
6013 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6015 LInstruction* instr() override { return instr_; }
6018 LLoadFieldByIndex* instr_;
6023 Register object = ToRegister(instr->object());
6024 Register index = ToRegister(instr->index());
6026 DeferredLoadMutableDouble* deferred;
6027 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
6029 Label out_of_object, done;
6030 __ Move(kScratchRegister, Smi::FromInt(1));
6031 __ testp(index, kScratchRegister);
6032 __ j(not_zero, deferred->entry());
6034 __ sarp(index, Immediate(1));
6036 __ SmiToInteger32(index, index);
6037 __ cmpl(index, Immediate(0));
6038 __ j(less, &out_of_object, Label::kNear);
6039 __ movp(object, FieldOperand(object,
6042 JSObject::kHeaderSize));
6043 __ jmp(&done, Label::kNear);
6045 __ bind(&out_of_object);
6046 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6048 // Index is now equal to out of object property index plus 1.
6049 __ movp(object, FieldOperand(object,
6052 FixedArray::kHeaderSize - kPointerSize));
6053 __ bind(deferred->exit());
6058 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6059 Register context = ToRegister(instr->context());
6060 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
6064 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6065 Handle<ScopeInfo> scope_info = instr->scope_info();
6066 __ Push(scope_info);
6067 __ Push(ToRegister(instr->function()));
6068 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6069 RecordSafepoint(Safepoint::kNoLazyDeopt);
6075 } // namespace internal
6078 #endif // V8_TARGET_ARCH_X64