1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
16 #include "src/x64/lithium-codegen-x64.h"
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
26 SafepointGenerator(LCodeGen* codegen,
27 LPointerMap* pointers,
28 Safepoint::DeoptMode mode)
32 virtual ~SafepointGenerator() {}
34 void BeforeCall(int call_size) const override {}
36 void AfterCall() const override {
37 codegen_->RecordSafepoint(pointers_, deopt_mode_);
42 LPointerMap* pointers_;
43 Safepoint::DeoptMode deopt_mode_;
49 bool LCodeGen::GenerateCode() {
50 LPhase phase("Z_Code generation", chunk());
54 // Open a frame scope to indicate that there is a frame on the stack. The
55 // MANUAL indicates that the scope shouldn't actually generate code to set up
56 // the frame (that is done in GeneratePrologue).
57 FrameScope frame_scope(masm_, StackFrame::MANUAL);
59 return GeneratePrologue() &&
61 GenerateDeferredCode() &&
62 GenerateJumpTable() &&
63 GenerateSafepointTable();
67 void LCodeGen::FinishCode(Handle<Code> code) {
69 code->set_stack_slots(GetStackSlotCount());
70 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71 PopulateDeoptimizationData(code);
76 void LCodeGen::MakeSureStackPagesMapped(int offset) {
77 const int kPageSize = 4 * KB;
78 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
79 __ movp(Operand(rsp, offset), rax);
85 void LCodeGen::SaveCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Save clobbered callee double registers");
90 BitVector* doubles = chunk()->allocated_double_registers();
91 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ movsd(MemOperand(rsp, count * kDoubleSize),
94 XMMRegister::FromAllocationIndex(save_iterator.Current()));
95 save_iterator.Advance();
101 void LCodeGen::RestoreCallerDoubles() {
102 DCHECK(info()->saves_caller_doubles());
103 DCHECK(NeedsEagerFrame());
104 Comment(";;; Restore clobbered callee double registers");
105 BitVector* doubles = chunk()->allocated_double_registers();
106 BitVector::Iterator save_iterator(doubles);
108 while (!save_iterator.Done()) {
109 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
110 MemOperand(rsp, count * kDoubleSize));
111 save_iterator.Advance();
117 bool LCodeGen::GeneratePrologue() {
118 DCHECK(is_generating());
120 if (info()->IsOptimizing()) {
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
124 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
130 // Sloppy mode functions need to replace the receiver with the global proxy
131 // when called as functions (without an explicit receiver object).
132 if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
133 !info()->is_native() && info()->scope()->has_this_declaration()) {
135 StackArgumentsAccessor args(rsp, scope()->num_parameters());
136 __ movp(rcx, args.GetReceiverOperand());
138 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
139 __ j(not_equal, &ok, Label::kNear);
141 __ movp(rcx, GlobalObjectOperand());
142 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
144 __ movp(args.GetReceiverOperand(), rcx);
150 info()->set_prologue_offset(masm_->pc_offset());
151 if (NeedsEagerFrame()) {
152 DCHECK(!frame_is_built_);
153 frame_is_built_ = true;
154 if (info()->IsStub()) {
157 __ Prologue(info()->IsCodePreAgingActive());
159 info()->AddNoFrameRange(0, masm_->pc_offset());
162 // Reserve space for the stack slots needed by the code.
163 int slots = GetStackSlotCount();
165 if (FLAG_debug_code) {
166 __ subp(rsp, Immediate(slots * kPointerSize));
168 MakeSureStackPagesMapped(slots * kPointerSize);
172 __ Set(kScratchRegister, kSlotsZapValue);
175 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
178 __ j(not_zero, &loop);
181 __ subp(rsp, Immediate(slots * kPointerSize));
183 MakeSureStackPagesMapped(slots * kPointerSize);
187 if (info()->saves_caller_doubles()) {
192 // Possibly allocate a local context.
193 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
194 if (heap_slots > 0) {
195 Comment(";;; Allocate local context");
196 bool need_write_barrier = true;
197 // Argument to NewContext is the function, which is still in rdi.
198 DCHECK(!info()->scope()->is_script_scope());
199 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
200 FastNewContextStub stub(isolate(), heap_slots);
202 // Result of FastNewContextStub is always in new space.
203 need_write_barrier = false;
206 __ CallRuntime(Runtime::kNewFunctionContext, 1);
208 RecordSafepoint(Safepoint::kNoLazyDeopt);
209 // Context is returned in rax. It replaces the context passed to us.
210 // It's saved in the stack and kept live in rsi.
212 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
214 // Copy any necessary parameters into the context.
215 int num_parameters = scope()->num_parameters();
216 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
217 for (int i = first_parameter; i < num_parameters; i++) {
218 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
219 if (var->IsContextSlot()) {
220 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
221 (num_parameters - 1 - i) * kPointerSize;
222 // Load parameter from stack.
223 __ movp(rax, Operand(rbp, parameter_offset));
224 // Store it in the context.
225 int context_offset = Context::SlotOffset(var->index());
226 __ movp(Operand(rsi, context_offset), rax);
227 // Update the write barrier. This clobbers rax and rbx.
228 if (need_write_barrier) {
229 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
230 } else if (FLAG_debug_code) {
232 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
233 __ Abort(kExpectedNewSpaceObject);
238 Comment(";;; End allocate local context");
242 if (FLAG_trace && info()->IsOptimizing()) {
243 __ CallRuntime(Runtime::kTraceEnter, 0);
245 return !is_aborted();
249 void LCodeGen::GenerateOsrPrologue() {
250 // Generate the OSR entry prologue at the first unknown OSR value, or if there
251 // are none, at the OSR entrypoint instruction.
252 if (osr_pc_offset_ >= 0) return;
254 osr_pc_offset_ = masm()->pc_offset();
256 // Adjust the frame size, subsuming the unoptimized frame into the
258 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
260 __ subp(rsp, Immediate(slots * kPointerSize));
264 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
265 if (instr->IsCall()) {
266 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
268 if (!instr->IsLazyBailout() && !instr->IsGap()) {
269 safepoints_.BumpLastLazySafepointIndex();
274 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
275 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
276 instr->hydrogen_value()->representation().IsInteger32() &&
277 instr->result()->IsRegister()) {
278 __ AssertZeroExtended(ToRegister(instr->result()));
281 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
282 // We sign extend the dehoisted key at the definition point when the pointer
283 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
284 // points and MustSignExtendResult is always false. We can't use
285 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
286 DCHECK(kPointerSize == kInt64Size);
287 if (instr->result()->IsRegister()) {
288 Register result_reg = ToRegister(instr->result());
289 __ movsxlq(result_reg, result_reg);
291 // Sign extend the 32bit result in the stack slots.
292 DCHECK(instr->result()->IsStackSlot());
293 Operand src = ToOperand(instr->result());
294 __ movsxlq(kScratchRegister, src);
295 __ movq(src, kScratchRegister);
301 bool LCodeGen::GenerateJumpTable() {
302 if (jump_table_.length() == 0) return !is_aborted();
305 Comment(";;; -------------------- Jump table --------------------");
306 for (int i = 0; i < jump_table_.length(); i++) {
307 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
308 __ bind(&table_entry->label);
309 Address entry = table_entry->address;
310 DeoptComment(table_entry->deopt_info);
311 if (table_entry->needs_frame) {
312 DCHECK(!info()->saves_caller_doubles());
313 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
314 __ call(&needs_frame);
316 if (info()->saves_caller_doubles()) {
317 DCHECK(info()->IsStub());
318 RestoreCallerDoubles();
320 __ call(entry, RelocInfo::RUNTIME_ENTRY);
322 info()->LogDeoptCallPosition(masm()->pc_offset(),
323 table_entry->deopt_info.inlining_id);
326 if (needs_frame.is_linked()) {
327 __ bind(&needs_frame);
329 4: return address <-- rsp
335 // Reserve space for context and stub marker.
336 __ subp(rsp, Immediate(2 * kPointerSize));
337 __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
338 __ Push(kScratchRegister); // Save entry address for ret(0)
345 0: entry address <-- rsp
348 // Remember context pointer.
349 __ movp(kScratchRegister,
350 MemOperand(rbp, StandardFrameConstants::kContextOffset));
351 // Save context pointer into the stack frame.
352 __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
354 // Create a stack frame.
355 __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
356 __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
358 // This variant of deopt can only be used with stubs. Since we don't
359 // have a function pointer to install in the stack frame that we're
360 // building, install a special marker there instead.
361 DCHECK(info()->IsStub());
362 __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
369 0: entry address <-- rsp
374 return !is_aborted();
378 bool LCodeGen::GenerateDeferredCode() {
379 DCHECK(is_generating());
380 if (deferred_.length() > 0) {
381 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
382 LDeferredCode* code = deferred_[i];
385 instructions_->at(code->instruction_index())->hydrogen_value();
386 RecordAndWritePosition(
387 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
389 Comment(";;; <@%d,#%d> "
390 "-------------------- Deferred %s --------------------",
391 code->instruction_index(),
392 code->instr()->hydrogen_value()->id(),
393 code->instr()->Mnemonic());
394 __ bind(code->entry());
395 if (NeedsDeferredFrame()) {
396 Comment(";;; Build frame");
397 DCHECK(!frame_is_built_);
398 DCHECK(info()->IsStub());
399 frame_is_built_ = true;
400 // Build the frame in such a way that esi isn't trashed.
401 __ pushq(rbp); // Caller's frame pointer.
402 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
403 __ Push(Smi::FromInt(StackFrame::STUB));
404 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
405 Comment(";;; Deferred code");
408 if (NeedsDeferredFrame()) {
409 __ bind(code->done());
410 Comment(";;; Destroy frame");
411 DCHECK(frame_is_built_);
412 frame_is_built_ = false;
416 __ jmp(code->exit());
420 // Deferred code is the last part of the instruction sequence. Mark
421 // the generated code as done unless we bailed out.
422 if (!is_aborted()) status_ = DONE;
423 return !is_aborted();
427 bool LCodeGen::GenerateSafepointTable() {
429 safepoints_.Emit(masm(), GetStackSlotCount());
430 return !is_aborted();
434 Register LCodeGen::ToRegister(int index) const {
435 return Register::FromAllocationIndex(index);
439 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
440 return XMMRegister::FromAllocationIndex(index);
444 Register LCodeGen::ToRegister(LOperand* op) const {
445 DCHECK(op->IsRegister());
446 return ToRegister(op->index());
450 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
451 DCHECK(op->IsDoubleRegister());
452 return ToDoubleRegister(op->index());
456 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
457 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
461 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
462 return op->IsConstantOperand() &&
463 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
467 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
468 return chunk_->LookupLiteralRepresentation(op).IsSmi();
472 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
473 return ToRepresentation(op, Representation::Integer32());
477 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
478 const Representation& r) const {
479 HConstant* constant = chunk_->LookupConstant(op);
480 int32_t value = constant->Integer32Value();
481 if (r.IsInteger32()) return value;
482 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
483 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
487 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
488 HConstant* constant = chunk_->LookupConstant(op);
489 return Smi::FromInt(constant->Integer32Value());
493 double LCodeGen::ToDouble(LConstantOperand* op) const {
494 HConstant* constant = chunk_->LookupConstant(op);
495 DCHECK(constant->HasDoubleValue());
496 return constant->DoubleValue();
500 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
501 HConstant* constant = chunk_->LookupConstant(op);
502 DCHECK(constant->HasExternalReferenceValue());
503 return constant->ExternalReferenceValue();
507 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
508 HConstant* constant = chunk_->LookupConstant(op);
509 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
510 return constant->handle(isolate());
514 static int ArgumentsOffsetWithoutFrame(int index) {
516 return -(index + 1) * kPointerSize + kPCOnStackSize;
520 Operand LCodeGen::ToOperand(LOperand* op) const {
521 // Does not handle registers. In X64 assembler, plain registers are not
522 // representable as an Operand.
523 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
524 if (NeedsEagerFrame()) {
525 return Operand(rbp, StackSlotOffset(op->index()));
527 // Retrieve parameter without eager stack-frame relative to the
529 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
534 void LCodeGen::WriteTranslation(LEnvironment* environment,
535 Translation* translation) {
536 if (environment == NULL) return;
538 // The translation includes one command per value in the environment.
539 int translation_size = environment->translation_size();
541 WriteTranslation(environment->outer(), translation);
542 WriteTranslationFrame(environment, translation);
544 int object_index = 0;
545 int dematerialized_index = 0;
546 for (int i = 0; i < translation_size; ++i) {
547 LOperand* value = environment->values()->at(i);
549 environment, translation, value, environment->HasTaggedValueAt(i),
550 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
555 void LCodeGen::AddToTranslation(LEnvironment* environment,
556 Translation* translation,
560 int* object_index_pointer,
561 int* dematerialized_index_pointer) {
562 if (op == LEnvironment::materialization_marker()) {
563 int object_index = (*object_index_pointer)++;
564 if (environment->ObjectIsDuplicateAt(object_index)) {
565 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
566 translation->DuplicateObject(dupe_of);
569 int object_length = environment->ObjectLengthAt(object_index);
570 if (environment->ObjectIsArgumentsAt(object_index)) {
571 translation->BeginArgumentsObject(object_length);
573 translation->BeginCapturedObject(object_length);
575 int dematerialized_index = *dematerialized_index_pointer;
576 int env_offset = environment->translation_size() + dematerialized_index;
577 *dematerialized_index_pointer += object_length;
578 for (int i = 0; i < object_length; ++i) {
579 LOperand* value = environment->values()->at(env_offset + i);
580 AddToTranslation(environment,
583 environment->HasTaggedValueAt(env_offset + i),
584 environment->HasUint32ValueAt(env_offset + i),
585 object_index_pointer,
586 dematerialized_index_pointer);
591 if (op->IsStackSlot()) {
593 translation->StoreStackSlot(op->index());
594 } else if (is_uint32) {
595 translation->StoreUint32StackSlot(op->index());
597 translation->StoreInt32StackSlot(op->index());
599 } else if (op->IsDoubleStackSlot()) {
600 translation->StoreDoubleStackSlot(op->index());
601 } else if (op->IsRegister()) {
602 Register reg = ToRegister(op);
604 translation->StoreRegister(reg);
605 } else if (is_uint32) {
606 translation->StoreUint32Register(reg);
608 translation->StoreInt32Register(reg);
610 } else if (op->IsDoubleRegister()) {
611 XMMRegister reg = ToDoubleRegister(op);
612 translation->StoreDoubleRegister(reg);
613 } else if (op->IsConstantOperand()) {
614 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
615 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
616 translation->StoreLiteral(src_index);
623 void LCodeGen::CallCodeGeneric(Handle<Code> code,
624 RelocInfo::Mode mode,
626 SafepointMode safepoint_mode,
628 DCHECK(instr != NULL);
630 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
632 // Signal that we don't inline smi code before these stubs in the
633 // optimizing code generator.
634 if (code->kind() == Code::BINARY_OP_IC ||
635 code->kind() == Code::COMPARE_IC) {
641 void LCodeGen::CallCode(Handle<Code> code,
642 RelocInfo::Mode mode,
643 LInstruction* instr) {
644 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
648 void LCodeGen::CallRuntime(const Runtime::Function* function,
651 SaveFPRegsMode save_doubles) {
652 DCHECK(instr != NULL);
653 DCHECK(instr->HasPointerMap());
655 __ CallRuntime(function, num_arguments, save_doubles);
657 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
661 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
662 if (context->IsRegister()) {
663 if (!ToRegister(context).is(rsi)) {
664 __ movp(rsi, ToRegister(context));
666 } else if (context->IsStackSlot()) {
667 __ movp(rsi, ToOperand(context));
668 } else if (context->IsConstantOperand()) {
669 HConstant* constant =
670 chunk_->LookupConstant(LConstantOperand::cast(context));
671 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
679 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
683 LoadContextFromDeferred(context);
685 __ CallRuntimeSaveDoubles(id);
686 RecordSafepointWithRegisters(
687 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
691 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
692 Safepoint::DeoptMode mode) {
693 environment->set_has_been_used();
694 if (!environment->HasBeenRegistered()) {
695 // Physical stack frame layout:
696 // -x ............. -4 0 ..................................... y
697 // [incoming arguments] [spill slots] [pushed outgoing arguments]
699 // Layout of the environment:
700 // 0 ..................................................... size-1
701 // [parameters] [locals] [expression stack including arguments]
703 // Layout of the translation:
704 // 0 ........................................................ size - 1 + 4
705 // [expression stack including arguments] [locals] [4 words] [parameters]
706 // |>------------ translation_size ------------<|
709 int jsframe_count = 0;
710 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
712 if (e->frame_type() == JS_FUNCTION) {
716 Translation translation(&translations_, frame_count, jsframe_count, zone());
717 WriteTranslation(environment, &translation);
718 int deoptimization_index = deoptimizations_.length();
719 int pc_offset = masm()->pc_offset();
720 environment->Register(deoptimization_index,
722 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
723 deoptimizations_.Add(environment, environment->zone());
728 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
729 Deoptimizer::DeoptReason deopt_reason,
730 Deoptimizer::BailoutType bailout_type) {
731 LEnvironment* environment = instr->environment();
732 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
733 DCHECK(environment->HasBeenRegistered());
734 int id = environment->deoptimization_index();
735 DCHECK(info()->IsOptimizing() || info()->IsStub());
737 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
739 Abort(kBailoutWasNotPrepared);
743 if (DeoptEveryNTimes()) {
744 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
748 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
749 __ movl(rax, count_operand);
750 __ subl(rax, Immediate(1));
751 __ j(not_zero, &no_deopt, Label::kNear);
752 if (FLAG_trap_on_deopt) __ int3();
753 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
754 __ movl(count_operand, rax);
757 DCHECK(frame_is_built_);
758 __ call(entry, RelocInfo::RUNTIME_ENTRY);
760 __ movl(count_operand, rax);
765 if (info()->ShouldTrapOnDeopt()) {
767 if (cc != no_condition) {
768 __ j(NegateCondition(cc), &done, Label::kNear);
774 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
776 DCHECK(info()->IsStub() || frame_is_built_);
777 // Go through jump table if we need to handle condition, build frame, or
778 // restore caller doubles.
779 if (cc == no_condition && frame_is_built_ &&
780 !info()->saves_caller_doubles()) {
781 DeoptComment(deopt_info);
782 __ call(entry, RelocInfo::RUNTIME_ENTRY);
783 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
785 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
787 // We often have several deopts to the same entry, reuse the last
788 // jump entry if this is the case.
789 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
790 jump_table_.is_empty() ||
791 !table_entry.IsEquivalentTo(jump_table_.last())) {
792 jump_table_.Add(table_entry, zone());
794 if (cc == no_condition) {
795 __ jmp(&jump_table_.last().label);
797 __ j(cc, &jump_table_.last().label);
803 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
804 Deoptimizer::DeoptReason deopt_reason) {
805 Deoptimizer::BailoutType bailout_type = info()->IsStub()
807 : Deoptimizer::EAGER;
808 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
812 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
813 int length = deoptimizations_.length();
814 if (length == 0) return;
815 Handle<DeoptimizationInputData> data =
816 DeoptimizationInputData::New(isolate(), length, TENURED);
818 Handle<ByteArray> translations =
819 translations_.CreateByteArray(isolate()->factory());
820 data->SetTranslationByteArray(*translations);
821 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
822 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
823 if (info_->IsOptimizing()) {
824 // Reference to shared function info does not change between phases.
825 AllowDeferredHandleDereference allow_handle_dereference;
826 data->SetSharedFunctionInfo(*info_->shared_info());
828 data->SetSharedFunctionInfo(Smi::FromInt(0));
830 data->SetWeakCellCache(Smi::FromInt(0));
832 Handle<FixedArray> literals =
833 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
834 { AllowDeferredHandleDereference copy_handles;
835 for (int i = 0; i < deoptimization_literals_.length(); i++) {
836 literals->set(i, *deoptimization_literals_[i]);
838 data->SetLiteralArray(*literals);
841 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
842 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
844 // Populate the deoptimization entries.
845 for (int i = 0; i < length; i++) {
846 LEnvironment* env = deoptimizations_[i];
847 data->SetAstId(i, env->ast_id());
848 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
849 data->SetArgumentsStackHeight(i,
850 Smi::FromInt(env->arguments_stack_height()));
851 data->SetPc(i, Smi::FromInt(env->pc_offset()));
853 code->set_deoptimization_data(*data);
857 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
858 DCHECK_EQ(0, deoptimization_literals_.length());
859 for (auto function : chunk()->inlined_functions()) {
860 DefineDeoptimizationLiteral(function);
862 inlined_function_count_ = deoptimization_literals_.length();
866 void LCodeGen::RecordSafepointWithLazyDeopt(
867 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
868 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
869 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
871 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
872 RecordSafepointWithRegisters(
873 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
878 void LCodeGen::RecordSafepoint(
879 LPointerMap* pointers,
880 Safepoint::Kind kind,
882 Safepoint::DeoptMode deopt_mode) {
883 DCHECK(kind == expected_safepoint_kind_);
885 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
887 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
888 kind, arguments, deopt_mode);
889 for (int i = 0; i < operands->length(); i++) {
890 LOperand* pointer = operands->at(i);
891 if (pointer->IsStackSlot()) {
892 safepoint.DefinePointerSlot(pointer->index(), zone());
893 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
894 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
900 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
901 Safepoint::DeoptMode deopt_mode) {
902 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
906 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
907 LPointerMap empty_pointers(zone());
908 RecordSafepoint(&empty_pointers, deopt_mode);
912 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
914 Safepoint::DeoptMode deopt_mode) {
915 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
919 void LCodeGen::RecordAndWritePosition(int position) {
920 if (position == RelocInfo::kNoPosition) return;
921 masm()->positions_recorder()->RecordPosition(position);
922 masm()->positions_recorder()->WriteRecordedPositions();
926 static const char* LabelType(LLabel* label) {
927 if (label->is_loop_header()) return " (loop header)";
928 if (label->is_osr_entry()) return " (OSR entry)";
933 void LCodeGen::DoLabel(LLabel* label) {
934 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
935 current_instruction_,
936 label->hydrogen_value()->id(),
939 __ bind(label->label());
940 current_block_ = label->block_id();
945 void LCodeGen::DoParallelMove(LParallelMove* move) {
946 resolver_.Resolve(move);
950 void LCodeGen::DoGap(LGap* gap) {
951 for (int i = LGap::FIRST_INNER_POSITION;
952 i <= LGap::LAST_INNER_POSITION;
954 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
955 LParallelMove* move = gap->GetParallelMove(inner_pos);
956 if (move != NULL) DoParallelMove(move);
961 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
966 void LCodeGen::DoParameter(LParameter* instr) {
971 void LCodeGen::DoCallStub(LCallStub* instr) {
972 DCHECK(ToRegister(instr->context()).is(rsi));
973 DCHECK(ToRegister(instr->result()).is(rax));
974 switch (instr->hydrogen()->major_key()) {
975 case CodeStub::RegExpExec: {
976 RegExpExecStub stub(isolate());
977 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
980 case CodeStub::SubString: {
981 SubStringStub stub(isolate());
982 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
985 case CodeStub::StringCompare: {
986 StringCompareStub stub(isolate());
987 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
996 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
997 GenerateOsrPrologue();
1001 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1002 Register dividend = ToRegister(instr->dividend());
1003 int32_t divisor = instr->divisor();
1004 DCHECK(dividend.is(ToRegister(instr->result())));
1006 // Theoretically, a variation of the branch-free code for integer division by
1007 // a power of 2 (calculating the remainder via an additional multiplication
1008 // (which gets simplified to an 'and') and subtraction) should be faster, and
1009 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1010 // indicate that positive dividends are heavily favored, so the branching
1011 // version performs better.
1012 HMod* hmod = instr->hydrogen();
1013 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1014 Label dividend_is_not_negative, done;
1015 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1016 __ testl(dividend, dividend);
1017 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1018 // Note that this is correct even for kMinInt operands.
1020 __ andl(dividend, Immediate(mask));
1022 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1023 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1025 __ jmp(&done, Label::kNear);
1028 __ bind(÷nd_is_not_negative);
1029 __ andl(dividend, Immediate(mask));
1034 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1035 Register dividend = ToRegister(instr->dividend());
1036 int32_t divisor = instr->divisor();
1037 DCHECK(ToRegister(instr->result()).is(rax));
1040 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1044 __ TruncatingDiv(dividend, Abs(divisor));
1045 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1046 __ movl(rax, dividend);
1049 // Check for negative zero.
1050 HMod* hmod = instr->hydrogen();
1051 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1052 Label remainder_not_zero;
1053 __ j(not_zero, &remainder_not_zero, Label::kNear);
1054 __ cmpl(dividend, Immediate(0));
1055 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1056 __ bind(&remainder_not_zero);
1061 void LCodeGen::DoModI(LModI* instr) {
1062 HMod* hmod = instr->hydrogen();
1064 Register left_reg = ToRegister(instr->left());
1065 DCHECK(left_reg.is(rax));
1066 Register right_reg = ToRegister(instr->right());
1067 DCHECK(!right_reg.is(rax));
1068 DCHECK(!right_reg.is(rdx));
1069 Register result_reg = ToRegister(instr->result());
1070 DCHECK(result_reg.is(rdx));
1073 // Check for x % 0, idiv would signal a divide error. We have to
1074 // deopt in this case because we can't return a NaN.
1075 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1076 __ testl(right_reg, right_reg);
1077 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1080 // Check for kMinInt % -1, idiv would signal a divide error. We
1081 // have to deopt if we care about -0, because we can't return that.
1082 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1083 Label no_overflow_possible;
1084 __ cmpl(left_reg, Immediate(kMinInt));
1085 __ j(not_zero, &no_overflow_possible, Label::kNear);
1086 __ cmpl(right_reg, Immediate(-1));
1087 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1088 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1090 __ j(not_equal, &no_overflow_possible, Label::kNear);
1091 __ Set(result_reg, 0);
1092 __ jmp(&done, Label::kNear);
1094 __ bind(&no_overflow_possible);
1097 // Sign extend dividend in eax into edx:eax, since we are using only the low
1098 // 32 bits of the values.
1101 // If we care about -0, test if the dividend is <0 and the result is 0.
1102 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1103 Label positive_left;
1104 __ testl(left_reg, left_reg);
1105 __ j(not_sign, &positive_left, Label::kNear);
1106 __ idivl(right_reg);
1107 __ testl(result_reg, result_reg);
1108 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1109 __ jmp(&done, Label::kNear);
1110 __ bind(&positive_left);
1112 __ idivl(right_reg);
1117 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1118 Register dividend = ToRegister(instr->dividend());
1119 int32_t divisor = instr->divisor();
1120 DCHECK(dividend.is(ToRegister(instr->result())));
1122 // If the divisor is positive, things are easy: There can be no deopts and we
1123 // can simply do an arithmetic right shift.
1124 if (divisor == 1) return;
1125 int32_t shift = WhichPowerOf2Abs(divisor);
1127 __ sarl(dividend, Immediate(shift));
1131 // If the divisor is negative, we have to negate and handle edge cases.
1133 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1134 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1137 // Dividing by -1 is basically negation, unless we overflow.
1138 if (divisor == -1) {
1139 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1140 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1145 // If the negation could not overflow, simply shifting is OK.
1146 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1147 __ sarl(dividend, Immediate(shift));
1151 Label not_kmin_int, done;
1152 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1153 __ movl(dividend, Immediate(kMinInt / divisor));
1154 __ jmp(&done, Label::kNear);
1155 __ bind(¬_kmin_int);
1156 __ sarl(dividend, Immediate(shift));
1161 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1162 Register dividend = ToRegister(instr->dividend());
1163 int32_t divisor = instr->divisor();
1164 DCHECK(ToRegister(instr->result()).is(rdx));
1167 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1171 // Check for (0 / -x) that will produce negative zero.
1172 HMathFloorOfDiv* hdiv = instr->hydrogen();
1173 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1174 __ testl(dividend, dividend);
1175 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1178 // Easy case: We need no dynamic check for the dividend and the flooring
1179 // division is the same as the truncating division.
1180 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1181 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1182 __ TruncatingDiv(dividend, Abs(divisor));
1183 if (divisor < 0) __ negl(rdx);
1187 // In the general case we may need to adjust before and after the truncating
1188 // division to get a flooring division.
1189 Register temp = ToRegister(instr->temp3());
1190 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1191 Label needs_adjustment, done;
1192 __ cmpl(dividend, Immediate(0));
1193 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1194 __ TruncatingDiv(dividend, Abs(divisor));
1195 if (divisor < 0) __ negl(rdx);
1196 __ jmp(&done, Label::kNear);
1197 __ bind(&needs_adjustment);
1198 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1199 __ TruncatingDiv(temp, Abs(divisor));
1200 if (divisor < 0) __ negl(rdx);
1206 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1207 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1208 HBinaryOperation* hdiv = instr->hydrogen();
1209 Register dividend = ToRegister(instr->dividend());
1210 Register divisor = ToRegister(instr->divisor());
1211 Register remainder = ToRegister(instr->temp());
1212 Register result = ToRegister(instr->result());
1213 DCHECK(dividend.is(rax));
1214 DCHECK(remainder.is(rdx));
1215 DCHECK(result.is(rax));
1216 DCHECK(!divisor.is(rax));
1217 DCHECK(!divisor.is(rdx));
1220 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1221 __ testl(divisor, divisor);
1222 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1225 // Check for (0 / -x) that will produce negative zero.
1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1227 Label dividend_not_zero;
1228 __ testl(dividend, dividend);
1229 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1230 __ testl(divisor, divisor);
1231 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1232 __ bind(÷nd_not_zero);
1235 // Check for (kMinInt / -1).
1236 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1237 Label dividend_not_min_int;
1238 __ cmpl(dividend, Immediate(kMinInt));
1239 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1240 __ cmpl(divisor, Immediate(-1));
1241 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1242 __ bind(÷nd_not_min_int);
1245 // Sign extend to rdx (= remainder).
1250 __ testl(remainder, remainder);
1251 __ j(zero, &done, Label::kNear);
1252 __ xorl(remainder, divisor);
1253 __ sarl(remainder, Immediate(31));
1254 __ addl(result, remainder);
1259 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1260 Register dividend = ToRegister(instr->dividend());
1261 int32_t divisor = instr->divisor();
1262 Register result = ToRegister(instr->result());
1263 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1264 DCHECK(!result.is(dividend));
1266 // Check for (0 / -x) that will produce negative zero.
1267 HDiv* hdiv = instr->hydrogen();
1268 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1269 __ testl(dividend, dividend);
1270 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1272 // Check for (kMinInt / -1).
1273 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1274 __ cmpl(dividend, Immediate(kMinInt));
1275 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1277 // Deoptimize if remainder will not be 0.
1278 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1279 divisor != 1 && divisor != -1) {
1280 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1281 __ testl(dividend, Immediate(mask));
1282 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1284 __ Move(result, dividend);
1285 int32_t shift = WhichPowerOf2Abs(divisor);
1287 // The arithmetic shift is always OK, the 'if' is an optimization only.
1288 if (shift > 1) __ sarl(result, Immediate(31));
1289 __ shrl(result, Immediate(32 - shift));
1290 __ addl(result, dividend);
1291 __ sarl(result, Immediate(shift));
1293 if (divisor < 0) __ negl(result);
1297 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1298 Register dividend = ToRegister(instr->dividend());
1299 int32_t divisor = instr->divisor();
1300 DCHECK(ToRegister(instr->result()).is(rdx));
1303 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1307 // Check for (0 / -x) that will produce negative zero.
1308 HDiv* hdiv = instr->hydrogen();
1309 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1310 __ testl(dividend, dividend);
1311 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1314 __ TruncatingDiv(dividend, Abs(divisor));
1315 if (divisor < 0) __ negl(rdx);
1317 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1319 __ imull(rax, rax, Immediate(divisor));
1320 __ subl(rax, dividend);
1321 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1326 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1327 void LCodeGen::DoDivI(LDivI* instr) {
1328 HBinaryOperation* hdiv = instr->hydrogen();
1329 Register dividend = ToRegister(instr->dividend());
1330 Register divisor = ToRegister(instr->divisor());
1331 Register remainder = ToRegister(instr->temp());
1332 DCHECK(dividend.is(rax));
1333 DCHECK(remainder.is(rdx));
1334 DCHECK(ToRegister(instr->result()).is(rax));
1335 DCHECK(!divisor.is(rax));
1336 DCHECK(!divisor.is(rdx));
1339 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1340 __ testl(divisor, divisor);
1341 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1344 // Check for (0 / -x) that will produce negative zero.
1345 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1346 Label dividend_not_zero;
1347 __ testl(dividend, dividend);
1348 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1349 __ testl(divisor, divisor);
1350 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1351 __ bind(÷nd_not_zero);
1354 // Check for (kMinInt / -1).
1355 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1356 Label dividend_not_min_int;
1357 __ cmpl(dividend, Immediate(kMinInt));
1358 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1359 __ cmpl(divisor, Immediate(-1));
1360 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1361 __ bind(÷nd_not_min_int);
1364 // Sign extend to rdx (= remainder).
1368 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1369 // Deoptimize if remainder is not 0.
1370 __ testl(remainder, remainder);
1371 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1376 void LCodeGen::DoMulI(LMulI* instr) {
1377 Register left = ToRegister(instr->left());
1378 LOperand* right = instr->right();
1380 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1381 if (instr->hydrogen_value()->representation().IsSmi()) {
1382 __ movp(kScratchRegister, left);
1384 __ movl(kScratchRegister, left);
1389 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1390 if (right->IsConstantOperand()) {
1391 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1392 if (right_value == -1) {
1394 } else if (right_value == 0) {
1395 __ xorl(left, left);
1396 } else if (right_value == 2) {
1397 __ addl(left, left);
1398 } else if (!can_overflow) {
1399 // If the multiplication is known to not overflow, we
1400 // can use operations that don't set the overflow flag
1402 switch (right_value) {
1407 __ leal(left, Operand(left, left, times_2, 0));
1410 __ shll(left, Immediate(2));
1413 __ leal(left, Operand(left, left, times_4, 0));
1416 __ shll(left, Immediate(3));
1419 __ leal(left, Operand(left, left, times_8, 0));
1422 __ shll(left, Immediate(4));
1425 __ imull(left, left, Immediate(right_value));
1429 __ imull(left, left, Immediate(right_value));
1431 } else if (right->IsStackSlot()) {
1432 if (instr->hydrogen_value()->representation().IsSmi()) {
1433 __ SmiToInteger64(left, left);
1434 __ imulp(left, ToOperand(right));
1436 __ imull(left, ToOperand(right));
1439 if (instr->hydrogen_value()->representation().IsSmi()) {
1440 __ SmiToInteger64(left, left);
1441 __ imulp(left, ToRegister(right));
1443 __ imull(left, ToRegister(right));
1448 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1451 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1452 // Bail out if the result is supposed to be negative zero.
1454 if (instr->hydrogen_value()->representation().IsSmi()) {
1455 __ testp(left, left);
1457 __ testl(left, left);
1459 __ j(not_zero, &done, Label::kNear);
1460 if (right->IsConstantOperand()) {
1461 // Constant can't be represented as 32-bit Smi due to immediate size
1463 DCHECK(SmiValuesAre32Bits()
1464 ? !instr->hydrogen_value()->representation().IsSmi()
1465 : SmiValuesAre31Bits());
1466 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1467 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1468 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1469 __ cmpl(kScratchRegister, Immediate(0));
1470 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1472 } else if (right->IsStackSlot()) {
1473 if (instr->hydrogen_value()->representation().IsSmi()) {
1474 __ orp(kScratchRegister, ToOperand(right));
1476 __ orl(kScratchRegister, ToOperand(right));
1478 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1480 // Test the non-zero operand for negative sign.
1481 if (instr->hydrogen_value()->representation().IsSmi()) {
1482 __ orp(kScratchRegister, ToRegister(right));
1484 __ orl(kScratchRegister, ToRegister(right));
1486 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1493 void LCodeGen::DoBitI(LBitI* instr) {
1494 LOperand* left = instr->left();
1495 LOperand* right = instr->right();
1496 DCHECK(left->Equals(instr->result()));
1497 DCHECK(left->IsRegister());
1499 if (right->IsConstantOperand()) {
1500 int32_t right_operand =
1501 ToRepresentation(LConstantOperand::cast(right),
1502 instr->hydrogen()->right()->representation());
1503 switch (instr->op()) {
1504 case Token::BIT_AND:
1505 __ andl(ToRegister(left), Immediate(right_operand));
1508 __ orl(ToRegister(left), Immediate(right_operand));
1510 case Token::BIT_XOR:
1511 if (right_operand == int32_t(~0)) {
1512 __ notl(ToRegister(left));
1514 __ xorl(ToRegister(left), Immediate(right_operand));
1521 } else if (right->IsStackSlot()) {
1522 switch (instr->op()) {
1523 case Token::BIT_AND:
1524 if (instr->IsInteger32()) {
1525 __ andl(ToRegister(left), ToOperand(right));
1527 __ andp(ToRegister(left), ToOperand(right));
1531 if (instr->IsInteger32()) {
1532 __ orl(ToRegister(left), ToOperand(right));
1534 __ orp(ToRegister(left), ToOperand(right));
1537 case Token::BIT_XOR:
1538 if (instr->IsInteger32()) {
1539 __ xorl(ToRegister(left), ToOperand(right));
1541 __ xorp(ToRegister(left), ToOperand(right));
1549 DCHECK(right->IsRegister());
1550 switch (instr->op()) {
1551 case Token::BIT_AND:
1552 if (instr->IsInteger32()) {
1553 __ andl(ToRegister(left), ToRegister(right));
1555 __ andp(ToRegister(left), ToRegister(right));
1559 if (instr->IsInteger32()) {
1560 __ orl(ToRegister(left), ToRegister(right));
1562 __ orp(ToRegister(left), ToRegister(right));
1565 case Token::BIT_XOR:
1566 if (instr->IsInteger32()) {
1567 __ xorl(ToRegister(left), ToRegister(right));
1569 __ xorp(ToRegister(left), ToRegister(right));
1580 void LCodeGen::DoShiftI(LShiftI* instr) {
1581 LOperand* left = instr->left();
1582 LOperand* right = instr->right();
1583 DCHECK(left->Equals(instr->result()));
1584 DCHECK(left->IsRegister());
1585 if (right->IsRegister()) {
1586 DCHECK(ToRegister(right).is(rcx));
1588 switch (instr->op()) {
1590 __ rorl_cl(ToRegister(left));
1593 __ sarl_cl(ToRegister(left));
1596 __ shrl_cl(ToRegister(left));
1597 if (instr->can_deopt()) {
1598 __ testl(ToRegister(left), ToRegister(left));
1599 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1603 __ shll_cl(ToRegister(left));
1610 int32_t value = ToInteger32(LConstantOperand::cast(right));
1611 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1612 switch (instr->op()) {
1614 if (shift_count != 0) {
1615 __ rorl(ToRegister(left), Immediate(shift_count));
1619 if (shift_count != 0) {
1620 __ sarl(ToRegister(left), Immediate(shift_count));
1624 if (shift_count != 0) {
1625 __ shrl(ToRegister(left), Immediate(shift_count));
1626 } else if (instr->can_deopt()) {
1627 __ testl(ToRegister(left), ToRegister(left));
1628 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1632 if (shift_count != 0) {
1633 if (instr->hydrogen_value()->representation().IsSmi()) {
1634 if (SmiValuesAre32Bits()) {
1635 __ shlp(ToRegister(left), Immediate(shift_count));
1637 DCHECK(SmiValuesAre31Bits());
1638 if (instr->can_deopt()) {
1639 if (shift_count != 1) {
1640 __ shll(ToRegister(left), Immediate(shift_count - 1));
1642 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1643 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1645 __ shll(ToRegister(left), Immediate(shift_count));
1649 __ shll(ToRegister(left), Immediate(shift_count));
1661 void LCodeGen::DoSubI(LSubI* instr) {
1662 LOperand* left = instr->left();
1663 LOperand* right = instr->right();
1664 DCHECK(left->Equals(instr->result()));
1666 if (right->IsConstantOperand()) {
1667 int32_t right_operand =
1668 ToRepresentation(LConstantOperand::cast(right),
1669 instr->hydrogen()->right()->representation());
1670 __ subl(ToRegister(left), Immediate(right_operand));
1671 } else if (right->IsRegister()) {
1672 if (instr->hydrogen_value()->representation().IsSmi()) {
1673 __ subp(ToRegister(left), ToRegister(right));
1675 __ subl(ToRegister(left), ToRegister(right));
1678 if (instr->hydrogen_value()->representation().IsSmi()) {
1679 __ subp(ToRegister(left), ToOperand(right));
1681 __ subl(ToRegister(left), ToOperand(right));
1685 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1686 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1691 void LCodeGen::DoConstantI(LConstantI* instr) {
1692 Register dst = ToRegister(instr->result());
1693 if (instr->value() == 0) {
1696 __ movl(dst, Immediate(instr->value()));
1701 void LCodeGen::DoConstantS(LConstantS* instr) {
1702 __ Move(ToRegister(instr->result()), instr->value());
1706 void LCodeGen::DoConstantD(LConstantD* instr) {
1707 __ Move(ToDoubleRegister(instr->result()), instr->bits());
1711 void LCodeGen::DoConstantE(LConstantE* instr) {
1712 __ LoadAddress(ToRegister(instr->result()), instr->value());
1716 void LCodeGen::DoConstantT(LConstantT* instr) {
1717 Handle<Object> object = instr->value(isolate());
1718 AllowDeferredHandleDereference smi_check;
1719 __ Move(ToRegister(instr->result()), object);
1723 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1724 Register result = ToRegister(instr->result());
1725 Register map = ToRegister(instr->value());
1726 __ EnumLength(result, map);
1730 void LCodeGen::DoDateField(LDateField* instr) {
1731 Register object = ToRegister(instr->date());
1732 Register result = ToRegister(instr->result());
1733 Smi* index = instr->index();
1734 DCHECK(object.is(result));
1735 DCHECK(object.is(rax));
1737 if (FLAG_debug_code) {
1738 __ AssertNotSmi(object);
1739 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1740 __ Check(equal, kOperandIsNotADate);
1743 if (index->value() == 0) {
1744 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1746 Label runtime, done;
1747 if (index->value() < JSDate::kFirstUncachedField) {
1748 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1749 Operand stamp_operand = __ ExternalOperand(stamp);
1750 __ movp(kScratchRegister, stamp_operand);
1751 __ cmpp(kScratchRegister, FieldOperand(object,
1752 JSDate::kCacheStampOffset));
1753 __ j(not_equal, &runtime, Label::kNear);
1754 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1755 kPointerSize * index->value()));
1756 __ jmp(&done, Label::kNear);
1759 __ PrepareCallCFunction(2);
1760 __ movp(arg_reg_1, object);
1761 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1762 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1768 Operand LCodeGen::BuildSeqStringOperand(Register string,
1770 String::Encoding encoding) {
1771 if (index->IsConstantOperand()) {
1772 int offset = ToInteger32(LConstantOperand::cast(index));
1773 if (encoding == String::TWO_BYTE_ENCODING) {
1774 offset *= kUC16Size;
1776 STATIC_ASSERT(kCharSize == 1);
1777 return FieldOperand(string, SeqString::kHeaderSize + offset);
1779 return FieldOperand(
1780 string, ToRegister(index),
1781 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1782 SeqString::kHeaderSize);
1786 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1787 String::Encoding encoding = instr->hydrogen()->encoding();
1788 Register result = ToRegister(instr->result());
1789 Register string = ToRegister(instr->string());
1791 if (FLAG_debug_code) {
1793 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1794 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1796 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1797 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1798 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1799 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1800 ? one_byte_seq_type : two_byte_seq_type));
1801 __ Check(equal, kUnexpectedStringType);
1805 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1806 if (encoding == String::ONE_BYTE_ENCODING) {
1807 __ movzxbl(result, operand);
1809 __ movzxwl(result, operand);
1814 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1815 String::Encoding encoding = instr->hydrogen()->encoding();
1816 Register string = ToRegister(instr->string());
1818 if (FLAG_debug_code) {
1819 Register value = ToRegister(instr->value());
1820 Register index = ToRegister(instr->index());
1821 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1822 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1824 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1825 ? one_byte_seq_type : two_byte_seq_type;
1826 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1829 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1830 if (instr->value()->IsConstantOperand()) {
1831 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1832 DCHECK_LE(0, value);
1833 if (encoding == String::ONE_BYTE_ENCODING) {
1834 DCHECK_LE(value, String::kMaxOneByteCharCode);
1835 __ movb(operand, Immediate(value));
1837 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1838 __ movw(operand, Immediate(value));
1841 Register value = ToRegister(instr->value());
1842 if (encoding == String::ONE_BYTE_ENCODING) {
1843 __ movb(operand, value);
1845 __ movw(operand, value);
1851 void LCodeGen::DoAddI(LAddI* instr) {
1852 LOperand* left = instr->left();
1853 LOperand* right = instr->right();
1855 Representation target_rep = instr->hydrogen()->representation();
1856 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1858 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1859 if (right->IsConstantOperand()) {
1860 // No support for smi-immediates for 32-bit SMI.
1861 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1863 ToRepresentation(LConstantOperand::cast(right),
1864 instr->hydrogen()->right()->representation());
1866 __ leap(ToRegister(instr->result()),
1867 MemOperand(ToRegister(left), offset));
1869 __ leal(ToRegister(instr->result()),
1870 MemOperand(ToRegister(left), offset));
1873 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1875 __ leap(ToRegister(instr->result()), address);
1877 __ leal(ToRegister(instr->result()), address);
1881 if (right->IsConstantOperand()) {
1882 // No support for smi-immediates for 32-bit SMI.
1883 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1884 int32_t right_operand =
1885 ToRepresentation(LConstantOperand::cast(right),
1886 instr->hydrogen()->right()->representation());
1888 __ addp(ToRegister(left), Immediate(right_operand));
1890 __ addl(ToRegister(left), Immediate(right_operand));
1892 } else if (right->IsRegister()) {
1894 __ addp(ToRegister(left), ToRegister(right));
1896 __ addl(ToRegister(left), ToRegister(right));
1900 __ addp(ToRegister(left), ToOperand(right));
1902 __ addl(ToRegister(left), ToOperand(right));
1905 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1906 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1912 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1913 LOperand* left = instr->left();
1914 LOperand* right = instr->right();
1915 DCHECK(left->Equals(instr->result()));
1916 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1917 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1919 Condition condition = (operation == HMathMinMax::kMathMin)
1922 Register left_reg = ToRegister(left);
1923 if (right->IsConstantOperand()) {
1924 Immediate right_imm = Immediate(
1925 ToRepresentation(LConstantOperand::cast(right),
1926 instr->hydrogen()->right()->representation()));
1927 DCHECK(SmiValuesAre32Bits()
1928 ? !instr->hydrogen()->representation().IsSmi()
1929 : SmiValuesAre31Bits());
1930 __ cmpl(left_reg, right_imm);
1931 __ j(condition, &return_left, Label::kNear);
1932 __ movp(left_reg, right_imm);
1933 } else if (right->IsRegister()) {
1934 Register right_reg = ToRegister(right);
1935 if (instr->hydrogen_value()->representation().IsSmi()) {
1936 __ cmpp(left_reg, right_reg);
1938 __ cmpl(left_reg, right_reg);
1940 __ j(condition, &return_left, Label::kNear);
1941 __ movp(left_reg, right_reg);
1943 Operand right_op = ToOperand(right);
1944 if (instr->hydrogen_value()->representation().IsSmi()) {
1945 __ cmpp(left_reg, right_op);
1947 __ cmpl(left_reg, right_op);
1949 __ j(condition, &return_left, Label::kNear);
1950 __ movp(left_reg, right_op);
1952 __ bind(&return_left);
1954 DCHECK(instr->hydrogen()->representation().IsDouble());
1955 Label check_nan_left, check_zero, return_left, return_right;
1956 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1957 XMMRegister left_reg = ToDoubleRegister(left);
1958 XMMRegister right_reg = ToDoubleRegister(right);
1959 __ ucomisd(left_reg, right_reg);
1960 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1961 __ j(equal, &check_zero, Label::kNear); // left == right.
1962 __ j(condition, &return_left, Label::kNear);
1963 __ jmp(&return_right, Label::kNear);
1965 __ bind(&check_zero);
1966 XMMRegister xmm_scratch = double_scratch0();
1967 __ xorps(xmm_scratch, xmm_scratch);
1968 __ ucomisd(left_reg, xmm_scratch);
1969 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1970 // At this point, both left and right are either 0 or -0.
1971 if (operation == HMathMinMax::kMathMin) {
1972 __ orps(left_reg, right_reg);
1974 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1975 __ addsd(left_reg, right_reg);
1977 __ jmp(&return_left, Label::kNear);
1979 __ bind(&check_nan_left);
1980 __ ucomisd(left_reg, left_reg); // NaN check.
1981 __ j(parity_even, &return_left, Label::kNear);
1982 __ bind(&return_right);
1983 __ movaps(left_reg, right_reg);
1985 __ bind(&return_left);
1990 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1991 XMMRegister left = ToDoubleRegister(instr->left());
1992 XMMRegister right = ToDoubleRegister(instr->right());
1993 XMMRegister result = ToDoubleRegister(instr->result());
1994 switch (instr->op()) {
1996 if (CpuFeatures::IsSupported(AVX)) {
1997 CpuFeatureScope scope(masm(), AVX);
1998 __ vaddsd(result, left, right);
2000 DCHECK(result.is(left));
2001 __ addsd(left, right);
2005 if (CpuFeatures::IsSupported(AVX)) {
2006 CpuFeatureScope scope(masm(), AVX);
2007 __ vsubsd(result, left, right);
2009 DCHECK(result.is(left));
2010 __ subsd(left, right);
2014 if (CpuFeatures::IsSupported(AVX)) {
2015 CpuFeatureScope scope(masm(), AVX);
2016 __ vmulsd(result, left, right);
2018 DCHECK(result.is(left));
2019 __ mulsd(left, right);
2023 if (CpuFeatures::IsSupported(AVX)) {
2024 CpuFeatureScope scope(masm(), AVX);
2025 __ vdivsd(result, left, right);
2027 DCHECK(result.is(left));
2028 __ divsd(left, right);
2030 // Don't delete this mov. It may improve performance on some CPUs,
2031 // when there is a (v)mulsd depending on the result
2032 __ movaps(result, result);
2035 XMMRegister xmm_scratch = double_scratch0();
2036 __ PrepareCallCFunction(2);
2037 __ movaps(xmm_scratch, left);
2038 DCHECK(right.is(xmm1));
2040 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2041 __ movaps(result, xmm_scratch);
2051 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2052 DCHECK(ToRegister(instr->context()).is(rsi));
2053 DCHECK(ToRegister(instr->left()).is(rdx));
2054 DCHECK(ToRegister(instr->right()).is(rax));
2055 DCHECK(ToRegister(instr->result()).is(rax));
2058 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2059 CallCode(code, RelocInfo::CODE_TARGET, instr);
2063 template<class InstrType>
2064 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2065 int left_block = instr->TrueDestination(chunk_);
2066 int right_block = instr->FalseDestination(chunk_);
2068 int next_block = GetNextEmittedBlock();
2070 if (right_block == left_block || cc == no_condition) {
2071 EmitGoto(left_block);
2072 } else if (left_block == next_block) {
2073 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2074 } else if (right_block == next_block) {
2075 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2077 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2079 __ jmp(chunk_->GetAssemblyLabel(right_block));
2085 template<class InstrType>
2086 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2087 int false_block = instr->FalseDestination(chunk_);
2088 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2092 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2097 void LCodeGen::DoBranch(LBranch* instr) {
2098 Representation r = instr->hydrogen()->value()->representation();
2099 if (r.IsInteger32()) {
2100 DCHECK(!info()->IsStub());
2101 Register reg = ToRegister(instr->value());
2103 EmitBranch(instr, not_zero);
2104 } else if (r.IsSmi()) {
2105 DCHECK(!info()->IsStub());
2106 Register reg = ToRegister(instr->value());
2108 EmitBranch(instr, not_zero);
2109 } else if (r.IsDouble()) {
2110 DCHECK(!info()->IsStub());
2111 XMMRegister reg = ToDoubleRegister(instr->value());
2112 XMMRegister xmm_scratch = double_scratch0();
2113 __ xorps(xmm_scratch, xmm_scratch);
2114 __ ucomisd(reg, xmm_scratch);
2115 EmitBranch(instr, not_equal);
2117 DCHECK(r.IsTagged());
2118 Register reg = ToRegister(instr->value());
2119 HType type = instr->hydrogen()->value()->type();
2120 if (type.IsBoolean()) {
2121 DCHECK(!info()->IsStub());
2122 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2123 EmitBranch(instr, equal);
2124 } else if (type.IsSmi()) {
2125 DCHECK(!info()->IsStub());
2126 __ SmiCompare(reg, Smi::FromInt(0));
2127 EmitBranch(instr, not_equal);
2128 } else if (type.IsJSArray()) {
2129 DCHECK(!info()->IsStub());
2130 EmitBranch(instr, no_condition);
2131 } else if (type.IsHeapNumber()) {
2132 DCHECK(!info()->IsStub());
2133 XMMRegister xmm_scratch = double_scratch0();
2134 __ xorps(xmm_scratch, xmm_scratch);
2135 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2136 EmitBranch(instr, not_equal);
2137 } else if (type.IsString()) {
2138 DCHECK(!info()->IsStub());
2139 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2140 EmitBranch(instr, not_equal);
2142 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2143 // Avoid deopts in the case where we've never executed this path before.
2144 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2146 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2147 // undefined -> false.
2148 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2149 __ j(equal, instr->FalseLabel(chunk_));
2151 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2153 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2154 __ j(equal, instr->TrueLabel(chunk_));
2156 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2157 __ j(equal, instr->FalseLabel(chunk_));
2159 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2161 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2162 __ j(equal, instr->FalseLabel(chunk_));
2165 if (expected.Contains(ToBooleanStub::SMI)) {
2166 // Smis: 0 -> false, all other -> true.
2167 __ Cmp(reg, Smi::FromInt(0));
2168 __ j(equal, instr->FalseLabel(chunk_));
2169 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2170 } else if (expected.NeedsMap()) {
2171 // If we need a map later and have a Smi -> deopt.
2172 __ testb(reg, Immediate(kSmiTagMask));
2173 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2176 const Register map = kScratchRegister;
2177 if (expected.NeedsMap()) {
2178 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2180 if (expected.CanBeUndetectable()) {
2181 // Undetectable -> false.
2182 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2183 Immediate(1 << Map::kIsUndetectable));
2184 __ j(not_zero, instr->FalseLabel(chunk_));
2188 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2189 // spec object -> true.
2190 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2191 __ j(above_equal, instr->TrueLabel(chunk_));
2194 if (expected.Contains(ToBooleanStub::STRING)) {
2195 // String value -> false iff empty.
2197 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2198 __ j(above_equal, ¬_string, Label::kNear);
2199 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2200 __ j(not_zero, instr->TrueLabel(chunk_));
2201 __ jmp(instr->FalseLabel(chunk_));
2202 __ bind(¬_string);
2205 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2206 // Symbol value -> true.
2207 __ CmpInstanceType(map, SYMBOL_TYPE);
2208 __ j(equal, instr->TrueLabel(chunk_));
2211 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2212 // SIMD value -> true.
2213 __ CmpInstanceType(map, FLOAT32X4_TYPE);
2214 __ j(equal, instr->TrueLabel(chunk_));
2217 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2218 // heap number -> false iff +0, -0, or NaN.
2219 Label not_heap_number;
2220 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2221 __ j(not_equal, ¬_heap_number, Label::kNear);
2222 XMMRegister xmm_scratch = double_scratch0();
2223 __ xorps(xmm_scratch, xmm_scratch);
2224 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2225 __ j(zero, instr->FalseLabel(chunk_));
2226 __ jmp(instr->TrueLabel(chunk_));
2227 __ bind(¬_heap_number);
2230 if (!expected.IsGeneric()) {
2231 // We've seen something for the first time -> deopt.
2232 // This can only happen if we are not generic already.
2233 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2240 void LCodeGen::EmitGoto(int block) {
2241 if (!IsNextEmittedBlock(block)) {
2242 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2247 void LCodeGen::DoGoto(LGoto* instr) {
2248 EmitGoto(instr->block_id());
2252 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2253 Condition cond = no_condition;
2256 case Token::EQ_STRICT:
2260 case Token::NE_STRICT:
2264 cond = is_unsigned ? below : less;
2267 cond = is_unsigned ? above : greater;
2270 cond = is_unsigned ? below_equal : less_equal;
2273 cond = is_unsigned ? above_equal : greater_equal;
2276 case Token::INSTANCEOF:
2284 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2285 LOperand* left = instr->left();
2286 LOperand* right = instr->right();
2288 instr->is_double() ||
2289 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2290 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2291 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2293 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2294 // We can statically evaluate the comparison.
2295 double left_val = ToDouble(LConstantOperand::cast(left));
2296 double right_val = ToDouble(LConstantOperand::cast(right));
2297 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2298 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2299 EmitGoto(next_block);
2301 if (instr->is_double()) {
2302 // Don't base result on EFLAGS when a NaN is involved. Instead
2303 // jump to the false block.
2304 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2305 __ j(parity_even, instr->FalseLabel(chunk_));
2308 if (right->IsConstantOperand()) {
2309 value = ToInteger32(LConstantOperand::cast(right));
2310 if (instr->hydrogen_value()->representation().IsSmi()) {
2311 __ Cmp(ToRegister(left), Smi::FromInt(value));
2313 __ cmpl(ToRegister(left), Immediate(value));
2315 } else if (left->IsConstantOperand()) {
2316 value = ToInteger32(LConstantOperand::cast(left));
2317 if (instr->hydrogen_value()->representation().IsSmi()) {
2318 if (right->IsRegister()) {
2319 __ Cmp(ToRegister(right), Smi::FromInt(value));
2321 __ Cmp(ToOperand(right), Smi::FromInt(value));
2323 } else if (right->IsRegister()) {
2324 __ cmpl(ToRegister(right), Immediate(value));
2326 __ cmpl(ToOperand(right), Immediate(value));
2328 // We commuted the operands, so commute the condition.
2329 cc = CommuteCondition(cc);
2330 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2331 if (right->IsRegister()) {
2332 __ cmpp(ToRegister(left), ToRegister(right));
2334 __ cmpp(ToRegister(left), ToOperand(right));
2337 if (right->IsRegister()) {
2338 __ cmpl(ToRegister(left), ToRegister(right));
2340 __ cmpl(ToRegister(left), ToOperand(right));
2344 EmitBranch(instr, cc);
2349 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2350 Register left = ToRegister(instr->left());
2352 if (instr->right()->IsConstantOperand()) {
2353 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2354 __ Cmp(left, right);
2356 Register right = ToRegister(instr->right());
2357 __ cmpp(left, right);
2359 EmitBranch(instr, equal);
2363 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2364 if (instr->hydrogen()->representation().IsTagged()) {
2365 Register input_reg = ToRegister(instr->object());
2366 __ Cmp(input_reg, factory()->the_hole_value());
2367 EmitBranch(instr, equal);
2371 XMMRegister input_reg = ToDoubleRegister(instr->object());
2372 __ ucomisd(input_reg, input_reg);
2373 EmitFalseBranch(instr, parity_odd);
2375 __ subp(rsp, Immediate(kDoubleSize));
2376 __ movsd(MemOperand(rsp, 0), input_reg);
2377 __ addp(rsp, Immediate(kDoubleSize));
2379 int offset = sizeof(kHoleNanUpper32);
2380 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2381 EmitBranch(instr, equal);
2385 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2386 Representation rep = instr->hydrogen()->value()->representation();
2387 DCHECK(!rep.IsInteger32());
2389 if (rep.IsDouble()) {
2390 XMMRegister value = ToDoubleRegister(instr->value());
2391 XMMRegister xmm_scratch = double_scratch0();
2392 __ xorps(xmm_scratch, xmm_scratch);
2393 __ ucomisd(xmm_scratch, value);
2394 EmitFalseBranch(instr, not_equal);
2395 __ movmskpd(kScratchRegister, value);
2396 __ testl(kScratchRegister, Immediate(1));
2397 EmitBranch(instr, not_zero);
2399 Register value = ToRegister(instr->value());
2400 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2401 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2402 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2404 EmitFalseBranch(instr, no_overflow);
2405 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2406 Immediate(0x00000000));
2407 EmitBranch(instr, equal);
2412 Condition LCodeGen::EmitIsObject(Register input,
2413 Label* is_not_object,
2415 DCHECK(!input.is(kScratchRegister));
2417 __ JumpIfSmi(input, is_not_object);
2419 __ CompareRoot(input, Heap::kNullValueRootIndex);
2420 __ j(equal, is_object);
2422 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2423 // Undetectable objects behave like undefined.
2424 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2425 Immediate(1 << Map::kIsUndetectable));
2426 __ j(not_zero, is_not_object);
2428 __ movzxbl(kScratchRegister,
2429 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2430 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2431 __ j(below, is_not_object);
2432 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2437 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2438 Register reg = ToRegister(instr->value());
2440 Condition true_cond = EmitIsObject(
2441 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2443 EmitBranch(instr, true_cond);
2447 Condition LCodeGen::EmitIsString(Register input,
2449 Label* is_not_string,
2450 SmiCheck check_needed = INLINE_SMI_CHECK) {
2451 if (check_needed == INLINE_SMI_CHECK) {
2452 __ JumpIfSmi(input, is_not_string);
2455 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2461 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2462 Register reg = ToRegister(instr->value());
2463 Register temp = ToRegister(instr->temp());
2465 SmiCheck check_needed =
2466 instr->hydrogen()->value()->type().IsHeapObject()
2467 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2469 Condition true_cond = EmitIsString(
2470 reg, temp, instr->FalseLabel(chunk_), check_needed);
2472 EmitBranch(instr, true_cond);
2476 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2478 if (instr->value()->IsRegister()) {
2479 Register input = ToRegister(instr->value());
2480 is_smi = masm()->CheckSmi(input);
2482 Operand input = ToOperand(instr->value());
2483 is_smi = masm()->CheckSmi(input);
2485 EmitBranch(instr, is_smi);
2489 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2490 Register input = ToRegister(instr->value());
2491 Register temp = ToRegister(instr->temp());
2493 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2494 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2496 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2497 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2498 Immediate(1 << Map::kIsUndetectable));
2499 EmitBranch(instr, not_zero);
2503 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2504 DCHECK(ToRegister(instr->context()).is(rsi));
2505 Token::Value op = instr->op();
2508 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2509 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2511 Condition condition = TokenToCondition(op, false);
2514 EmitBranch(instr, condition);
2518 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2519 InstanceType from = instr->from();
2520 InstanceType to = instr->to();
2521 if (from == FIRST_TYPE) return to;
2522 DCHECK(from == to || to == LAST_TYPE);
2527 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2528 InstanceType from = instr->from();
2529 InstanceType to = instr->to();
2530 if (from == to) return equal;
2531 if (to == LAST_TYPE) return above_equal;
2532 if (from == FIRST_TYPE) return below_equal;
2538 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2539 Register input = ToRegister(instr->value());
2541 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2542 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2545 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2546 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2550 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2551 Register input = ToRegister(instr->value());
2552 Register result = ToRegister(instr->result());
2554 __ AssertString(input);
2556 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2557 DCHECK(String::kHashShift >= kSmiTagSize);
2558 __ IndexFromHash(result, result);
2562 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2563 LHasCachedArrayIndexAndBranch* instr) {
2564 Register input = ToRegister(instr->value());
2566 __ testl(FieldOperand(input, String::kHashFieldOffset),
2567 Immediate(String::kContainsCachedArrayIndexMask));
2568 EmitBranch(instr, equal);
2572 // Branches to a label or falls through with the answer in the z flag.
2573 // Trashes the temp register.
2574 void LCodeGen::EmitClassOfTest(Label* is_true,
2576 Handle<String> class_name,
2580 DCHECK(!input.is(temp));
2581 DCHECK(!input.is(temp2));
2582 DCHECK(!temp.is(temp2));
2584 __ JumpIfSmi(input, is_false);
2586 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2587 // Assuming the following assertions, we can use the same compares to test
2588 // for both being a function type and being in the object type range.
2589 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2590 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2591 FIRST_SPEC_OBJECT_TYPE + 1);
2592 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2593 LAST_SPEC_OBJECT_TYPE - 1);
2594 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2595 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2596 __ j(below, is_false);
2597 __ j(equal, is_true);
2598 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2599 __ j(equal, is_true);
2601 // Faster code path to avoid two compares: subtract lower bound from the
2602 // actual type and do a signed compare with the width of the type range.
2603 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2604 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2605 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2606 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2607 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2608 __ j(above, is_false);
2611 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2612 // Check if the constructor in the map is a function.
2613 __ GetMapConstructor(temp, temp, kScratchRegister);
2615 // Objects with a non-function constructor have class 'Object'.
2616 __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
2617 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2618 __ j(not_equal, is_true);
2620 __ j(not_equal, is_false);
2623 // temp now contains the constructor function. Grab the
2624 // instance class name from there.
2625 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2626 __ movp(temp, FieldOperand(temp,
2627 SharedFunctionInfo::kInstanceClassNameOffset));
2628 // The class name we are testing against is internalized since it's a literal.
2629 // The name in the constructor is internalized because of the way the context
2630 // is booted. This routine isn't expected to work for random API-created
2631 // classes and it doesn't have to because you can't access it with natives
2632 // syntax. Since both sides are internalized it is sufficient to use an
2633 // identity comparison.
2634 DCHECK(class_name->IsInternalizedString());
2635 __ Cmp(temp, class_name);
2636 // End with the answer in the z flag.
2640 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2641 Register input = ToRegister(instr->value());
2642 Register temp = ToRegister(instr->temp());
2643 Register temp2 = ToRegister(instr->temp2());
2644 Handle<String> class_name = instr->hydrogen()->class_name();
2646 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2647 class_name, input, temp, temp2);
2649 EmitBranch(instr, equal);
2653 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2654 Register reg = ToRegister(instr->value());
2656 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2657 EmitBranch(instr, equal);
2661 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2662 DCHECK(ToRegister(instr->context()).is(rsi));
2663 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2664 __ Push(ToRegister(instr->left()));
2665 __ Push(ToRegister(instr->right()));
2666 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2667 Label true_value, done;
2669 __ j(zero, &true_value, Label::kNear);
2670 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2671 __ jmp(&done, Label::kNear);
2672 __ bind(&true_value);
2673 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2678 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2679 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2681 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2682 LInstanceOfKnownGlobal* instr)
2683 : LDeferredCode(codegen), instr_(instr) { }
2684 void Generate() override {
2685 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2687 LInstruction* instr() override { return instr_; }
2688 Label* map_check() { return &map_check_; }
2690 LInstanceOfKnownGlobal* instr_;
2694 DCHECK(ToRegister(instr->context()).is(rsi));
2695 DeferredInstanceOfKnownGlobal* deferred;
2696 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2698 Label done, false_result;
2699 Register object = ToRegister(instr->value());
2701 // A Smi is not an instance of anything.
2702 __ JumpIfSmi(object, &false_result, Label::kNear);
2704 // This is the inlined call site instanceof cache. The two occurences of the
2705 // hole value will be patched to the last map/result pair generated by the
2708 // Use a temp register to avoid memory operands with variable lengths.
2709 Register map = ToRegister(instr->temp());
2710 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2711 __ bind(deferred->map_check()); // Label for calculating code patching.
2712 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2713 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2714 __ cmpp(map, Operand(kScratchRegister, 0));
2715 __ j(not_equal, &cache_miss, Label::kNear);
2716 // Patched to load either true or false.
2717 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2719 // Check that the code size between patch label and patch sites is invariant.
2720 Label end_of_patched_code;
2721 __ bind(&end_of_patched_code);
2724 __ jmp(&done, Label::kNear);
2726 // The inlined call site cache did not match. Check for null and string
2727 // before calling the deferred code.
2728 __ bind(&cache_miss); // Null is not an instance of anything.
2729 __ CompareRoot(object, Heap::kNullValueRootIndex);
2730 __ j(equal, &false_result, Label::kNear);
2732 // String values are not instances of anything.
2733 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2735 __ bind(&false_result);
2736 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2738 __ bind(deferred->exit());
2743 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2746 PushSafepointRegistersScope scope(this);
2747 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2748 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2749 InstanceofStub stub(isolate(), flags);
2751 __ Push(ToRegister(instr->value()));
2752 __ Push(instr->function());
2754 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2756 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2758 __ PushImm32(delta);
2760 // We are pushing three values on the stack but recording a
2761 // safepoint with two arguments because stub is going to
2762 // remove the third argument from the stack before jumping
2763 // to instanceof builtin on the slow path.
2764 CallCodeGeneric(stub.GetCode(),
2765 RelocInfo::CODE_TARGET,
2767 RECORD_SAFEPOINT_WITH_REGISTERS,
2769 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2770 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2771 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2772 // Move result to a register that survives the end of the
2773 // PushSafepointRegisterScope.
2774 __ movp(kScratchRegister, rax);
2776 __ testp(kScratchRegister, kScratchRegister);
2779 __ j(not_zero, &load_false, Label::kNear);
2780 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2781 __ jmp(&done, Label::kNear);
2782 __ bind(&load_false);
2783 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2788 void LCodeGen::DoCmpT(LCmpT* instr) {
2789 DCHECK(ToRegister(instr->context()).is(rsi));
2790 Token::Value op = instr->op();
2793 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2794 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2796 Condition condition = TokenToCondition(op, false);
2797 Label true_value, done;
2799 __ j(condition, &true_value, Label::kNear);
2800 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2801 __ jmp(&done, Label::kNear);
2802 __ bind(&true_value);
2803 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2808 void LCodeGen::DoReturn(LReturn* instr) {
2809 if (FLAG_trace && info()->IsOptimizing()) {
2810 // Preserve the return value on the stack and rely on the runtime call
2811 // to return the value in the same register. We're leaving the code
2812 // managed by the register allocator and tearing down the frame, it's
2813 // safe to write to the context register.
2815 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2816 __ CallRuntime(Runtime::kTraceExit, 1);
2818 if (info()->saves_caller_doubles()) {
2819 RestoreCallerDoubles();
2821 int no_frame_start = -1;
2822 if (NeedsEagerFrame()) {
2825 no_frame_start = masm_->pc_offset();
2827 if (instr->has_constant_parameter_count()) {
2828 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2831 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2832 Register reg = ToRegister(instr->parameter_count());
2833 // The argument count parameter is a smi
2834 __ SmiToInteger32(reg, reg);
2835 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2836 __ PopReturnAddressTo(return_addr_reg);
2837 __ shlp(reg, Immediate(kPointerSizeLog2));
2839 __ jmp(return_addr_reg);
2841 if (no_frame_start != -1) {
2842 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2848 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2849 Register vector_register = ToRegister(instr->temp_vector());
2850 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2851 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2852 DCHECK(slot_register.is(rax));
2854 AllowDeferredHandleDereference vector_structure_check;
2855 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2856 __ Move(vector_register, vector);
2857 // No need to allocate this register.
2858 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2859 int index = vector->GetIndex(slot);
2860 __ Move(slot_register, Smi::FromInt(index));
2865 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2866 Register vector_register = ToRegister(instr->temp_vector());
2867 Register slot_register = ToRegister(instr->temp_slot());
2869 AllowDeferredHandleDereference vector_structure_check;
2870 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2871 __ Move(vector_register, vector);
2872 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2873 int index = vector->GetIndex(slot);
2874 __ Move(slot_register, Smi::FromInt(index));
2878 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2879 DCHECK(ToRegister(instr->context()).is(rsi));
2880 DCHECK(ToRegister(instr->global_object())
2881 .is(LoadDescriptor::ReceiverRegister()));
2882 DCHECK(ToRegister(instr->result()).is(rax));
2884 __ Move(LoadDescriptor::NameRegister(), instr->name());
2885 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2887 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2888 SLOPPY, PREMONOMORPHIC).code();
2889 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2893 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2894 Register context = ToRegister(instr->context());
2895 Register result = ToRegister(instr->result());
2896 __ movp(result, ContextOperand(context, instr->slot_index()));
2897 if (instr->hydrogen()->RequiresHoleCheck()) {
2898 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2899 if (instr->hydrogen()->DeoptimizesOnHole()) {
2900 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2903 __ j(not_equal, &is_not_hole, Label::kNear);
2904 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2905 __ bind(&is_not_hole);
2911 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2912 Register context = ToRegister(instr->context());
2913 Register value = ToRegister(instr->value());
2915 Operand target = ContextOperand(context, instr->slot_index());
2917 Label skip_assignment;
2918 if (instr->hydrogen()->RequiresHoleCheck()) {
2919 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2920 if (instr->hydrogen()->DeoptimizesOnHole()) {
2921 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2923 __ j(not_equal, &skip_assignment);
2926 __ movp(target, value);
2928 if (instr->hydrogen()->NeedsWriteBarrier()) {
2929 SmiCheck check_needed =
2930 instr->hydrogen()->value()->type().IsHeapObject()
2931 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2932 int offset = Context::SlotOffset(instr->slot_index());
2933 Register scratch = ToRegister(instr->temp());
2934 __ RecordWriteContextSlot(context,
2939 EMIT_REMEMBERED_SET,
2943 __ bind(&skip_assignment);
2947 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2948 HObjectAccess access = instr->hydrogen()->access();
2949 int offset = access.offset();
2951 if (access.IsExternalMemory()) {
2952 Register result = ToRegister(instr->result());
2953 if (instr->object()->IsConstantOperand()) {
2954 DCHECK(result.is(rax));
2955 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2957 Register object = ToRegister(instr->object());
2958 __ Load(result, MemOperand(object, offset), access.representation());
2963 Register object = ToRegister(instr->object());
2964 if (instr->hydrogen()->representation().IsDouble()) {
2965 DCHECK(access.IsInobject());
2966 XMMRegister result = ToDoubleRegister(instr->result());
2967 __ movsd(result, FieldOperand(object, offset));
2971 Register result = ToRegister(instr->result());
2972 if (!access.IsInobject()) {
2973 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2977 Representation representation = access.representation();
2978 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2979 instr->hydrogen()->representation().IsInteger32()) {
2980 if (FLAG_debug_code) {
2981 Register scratch = kScratchRegister;
2982 __ Load(scratch, FieldOperand(object, offset), representation);
2983 __ AssertSmi(scratch);
2986 // Read int value directly from upper half of the smi.
2987 STATIC_ASSERT(kSmiTag == 0);
2988 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2989 offset += kPointerSize / 2;
2990 representation = Representation::Integer32();
2992 __ Load(result, FieldOperand(object, offset), representation);
2996 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2997 DCHECK(ToRegister(instr->context()).is(rsi));
2998 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2999 DCHECK(ToRegister(instr->result()).is(rax));
3001 __ Move(LoadDescriptor::NameRegister(), instr->name());
3002 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3004 CodeFactory::LoadICInOptimizedCode(
3005 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3006 instr->hydrogen()->initialization_state()).code();
3007 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3011 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3012 Register function = ToRegister(instr->function());
3013 Register result = ToRegister(instr->result());
3015 // Get the prototype or initial map from the function.
3017 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3019 // Check that the function has a prototype or an initial map.
3020 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3021 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3023 // If the function does not have an initial map, we're done.
3025 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3026 __ j(not_equal, &done, Label::kNear);
3028 // Get the prototype from the initial map.
3029 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3036 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3037 Register result = ToRegister(instr->result());
3038 __ LoadRoot(result, instr->index());
3042 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3043 Register arguments = ToRegister(instr->arguments());
3044 Register result = ToRegister(instr->result());
3046 if (instr->length()->IsConstantOperand() &&
3047 instr->index()->IsConstantOperand()) {
3048 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3049 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3050 if (const_index >= 0 && const_index < const_length) {
3051 StackArgumentsAccessor args(arguments, const_length,
3052 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3053 __ movp(result, args.GetArgumentOperand(const_index));
3054 } else if (FLAG_debug_code) {
3058 Register length = ToRegister(instr->length());
3059 // There are two words between the frame pointer and the last argument.
3060 // Subtracting from length accounts for one of them add one more.
3061 if (instr->index()->IsRegister()) {
3062 __ subl(length, ToRegister(instr->index()));
3064 __ subl(length, ToOperand(instr->index()));
3066 StackArgumentsAccessor args(arguments, length,
3067 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3068 __ movp(result, args.GetArgumentOperand(0));
3073 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3074 ElementsKind elements_kind = instr->elements_kind();
3075 LOperand* key = instr->key();
3076 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3077 Register key_reg = ToRegister(key);
3078 Representation key_representation =
3079 instr->hydrogen()->key()->representation();
3080 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3081 __ SmiToInteger64(key_reg, key_reg);
3082 } else if (instr->hydrogen()->IsDehoisted()) {
3083 // Sign extend key because it could be a 32 bit negative value
3084 // and the dehoisted address computation happens in 64 bits
3085 __ movsxlq(key_reg, key_reg);
3088 Operand operand(BuildFastArrayOperand(
3091 instr->hydrogen()->key()->representation(),
3093 instr->base_offset()));
3095 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3096 elements_kind == FLOAT32_ELEMENTS) {
3097 XMMRegister result(ToDoubleRegister(instr->result()));
3098 __ movss(result, operand);
3099 __ cvtss2sd(result, result);
3100 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3101 elements_kind == FLOAT64_ELEMENTS) {
3102 __ movsd(ToDoubleRegister(instr->result()), operand);
3104 Register result(ToRegister(instr->result()));
3105 switch (elements_kind) {
3106 case EXTERNAL_INT8_ELEMENTS:
3108 __ movsxbl(result, operand);
3110 case EXTERNAL_UINT8_ELEMENTS:
3111 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3112 case UINT8_ELEMENTS:
3113 case UINT8_CLAMPED_ELEMENTS:
3114 __ movzxbl(result, operand);
3116 case EXTERNAL_INT16_ELEMENTS:
3117 case INT16_ELEMENTS:
3118 __ movsxwl(result, operand);
3120 case EXTERNAL_UINT16_ELEMENTS:
3121 case UINT16_ELEMENTS:
3122 __ movzxwl(result, operand);
3124 case EXTERNAL_INT32_ELEMENTS:
3125 case INT32_ELEMENTS:
3126 __ movl(result, operand);
3128 case EXTERNAL_UINT32_ELEMENTS:
3129 case UINT32_ELEMENTS:
3130 __ movl(result, operand);
3131 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3132 __ testl(result, result);
3133 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3136 case EXTERNAL_FLOAT32_ELEMENTS:
3137 case EXTERNAL_FLOAT64_ELEMENTS:
3138 case FLOAT32_ELEMENTS:
3139 case FLOAT64_ELEMENTS:
3141 case FAST_SMI_ELEMENTS:
3142 case FAST_DOUBLE_ELEMENTS:
3143 case FAST_HOLEY_ELEMENTS:
3144 case FAST_HOLEY_SMI_ELEMENTS:
3145 case FAST_HOLEY_DOUBLE_ELEMENTS:
3146 case DICTIONARY_ELEMENTS:
3147 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3148 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3156 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3157 XMMRegister result(ToDoubleRegister(instr->result()));
3158 LOperand* key = instr->key();
3159 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3160 instr->hydrogen()->IsDehoisted()) {
3161 // Sign extend key because it could be a 32 bit negative value
3162 // and the dehoisted address computation happens in 64 bits
3163 __ movsxlq(ToRegister(key), ToRegister(key));
3165 if (instr->hydrogen()->RequiresHoleCheck()) {
3166 Operand hole_check_operand = BuildFastArrayOperand(
3169 instr->hydrogen()->key()->representation(),
3170 FAST_DOUBLE_ELEMENTS,
3171 instr->base_offset() + sizeof(kHoleNanLower32));
3172 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3173 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3176 Operand double_load_operand = BuildFastArrayOperand(
3179 instr->hydrogen()->key()->representation(),
3180 FAST_DOUBLE_ELEMENTS,
3181 instr->base_offset());
3182 __ movsd(result, double_load_operand);
3186 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3187 HLoadKeyed* hinstr = instr->hydrogen();
3188 Register result = ToRegister(instr->result());
3189 LOperand* key = instr->key();
3190 bool requires_hole_check = hinstr->RequiresHoleCheck();
3191 Representation representation = hinstr->representation();
3192 int offset = instr->base_offset();
3194 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3195 instr->hydrogen()->IsDehoisted()) {
3196 // Sign extend key because it could be a 32 bit negative value
3197 // and the dehoisted address computation happens in 64 bits
3198 __ movsxlq(ToRegister(key), ToRegister(key));
3200 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3201 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3202 DCHECK(!requires_hole_check);
3203 if (FLAG_debug_code) {
3204 Register scratch = kScratchRegister;
3206 BuildFastArrayOperand(instr->elements(),
3208 instr->hydrogen()->key()->representation(),
3211 Representation::Smi());
3212 __ AssertSmi(scratch);
3214 // Read int value directly from upper half of the smi.
3215 STATIC_ASSERT(kSmiTag == 0);
3216 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3217 offset += kPointerSize / 2;
3221 BuildFastArrayOperand(instr->elements(), key,
3222 instr->hydrogen()->key()->representation(),
3223 FAST_ELEMENTS, offset),
3226 // Check for the hole value.
3227 if (requires_hole_check) {
3228 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3229 Condition smi = __ CheckSmi(result);
3230 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
3232 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3233 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3235 } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3236 DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
3238 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3239 __ j(not_equal, &done);
3240 if (info()->IsStub()) {
3241 // A stub can safely convert the hole to undefined only if the array
3242 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3243 // it needs to bail out.
3244 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3245 __ Cmp(FieldOperand(result, Cell::kValueOffset),
3246 Smi::FromInt(Isolate::kArrayProtectorValid));
3247 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3249 __ Move(result, isolate()->factory()->undefined_value());
3255 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3256 if (instr->is_typed_elements()) {
3257 DoLoadKeyedExternalArray(instr);
3258 } else if (instr->hydrogen()->representation().IsDouble()) {
3259 DoLoadKeyedFixedDoubleArray(instr);
3261 DoLoadKeyedFixedArray(instr);
3266 Operand LCodeGen::BuildFastArrayOperand(
3267 LOperand* elements_pointer,
3269 Representation key_representation,
3270 ElementsKind elements_kind,
3272 Register elements_pointer_reg = ToRegister(elements_pointer);
3273 int shift_size = ElementsKindToShiftSize(elements_kind);
3274 if (key->IsConstantOperand()) {
3275 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3276 if (constant_value & 0xF0000000) {
3277 Abort(kArrayIndexConstantValueTooBig);
3279 return Operand(elements_pointer_reg,
3280 (constant_value << shift_size) + offset);
3282 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
3283 DCHECK(key_representation.IsInteger32());
3285 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3286 return Operand(elements_pointer_reg,
3294 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3295 DCHECK(ToRegister(instr->context()).is(rsi));
3296 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3297 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3299 if (instr->hydrogen()->HasVectorAndSlot()) {
3300 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3303 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3304 isolate(), instr->hydrogen()->language_mode(),
3305 instr->hydrogen()->initialization_state()).code();
3306 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3310 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3311 Register result = ToRegister(instr->result());
3313 if (instr->hydrogen()->from_inlined()) {
3314 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3316 // Check for arguments adapter frame.
3317 Label done, adapted;
3318 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3319 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3320 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3321 __ j(equal, &adapted, Label::kNear);
3323 // No arguments adaptor frame.
3324 __ movp(result, rbp);
3325 __ jmp(&done, Label::kNear);
3327 // Arguments adaptor frame present.
3329 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3331 // Result is the frame pointer for the frame if not adapted and for the real
3332 // frame below the adaptor frame if adapted.
3338 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3339 Register result = ToRegister(instr->result());
3343 // If no arguments adaptor frame the number of arguments is fixed.
3344 if (instr->elements()->IsRegister()) {
3345 __ cmpp(rbp, ToRegister(instr->elements()));
3347 __ cmpp(rbp, ToOperand(instr->elements()));
3349 __ movl(result, Immediate(scope()->num_parameters()));
3350 __ j(equal, &done, Label::kNear);
3352 // Arguments adaptor frame present. Get argument length from there.
3353 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3354 __ SmiToInteger32(result,
3356 ArgumentsAdaptorFrameConstants::kLengthOffset));
3358 // Argument length is in result register.
3363 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3364 Register receiver = ToRegister(instr->receiver());
3365 Register function = ToRegister(instr->function());
3367 // If the receiver is null or undefined, we have to pass the global
3368 // object as a receiver to normal functions. Values have to be
3369 // passed unchanged to builtins and strict-mode functions.
3370 Label global_object, receiver_ok;
3371 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3373 if (!instr->hydrogen()->known_function()) {
3374 // Do not transform the receiver to object for strict mode
3376 __ movp(kScratchRegister,
3377 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3378 __ testb(FieldOperand(kScratchRegister,
3379 SharedFunctionInfo::kStrictModeByteOffset),
3380 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3381 __ j(not_equal, &receiver_ok, dist);
3383 // Do not transform the receiver to object for builtins.
3384 __ testb(FieldOperand(kScratchRegister,
3385 SharedFunctionInfo::kNativeByteOffset),
3386 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3387 __ j(not_equal, &receiver_ok, dist);
3390 // Normal function. Replace undefined or null with global receiver.
3391 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3392 __ j(equal, &global_object, Label::kNear);
3393 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3394 __ j(equal, &global_object, Label::kNear);
3396 // The receiver should be a JS object.
3397 Condition is_smi = __ CheckSmi(receiver);
3398 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
3399 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3400 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3402 __ jmp(&receiver_ok, Label::kNear);
3403 __ bind(&global_object);
3404 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3407 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3408 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3410 __ bind(&receiver_ok);
3414 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3415 Register receiver = ToRegister(instr->receiver());
3416 Register function = ToRegister(instr->function());
3417 Register length = ToRegister(instr->length());
3418 Register elements = ToRegister(instr->elements());
3419 DCHECK(receiver.is(rax)); // Used for parameter count.
3420 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3421 DCHECK(ToRegister(instr->result()).is(rax));
3423 // Copy the arguments to this function possibly from the
3424 // adaptor frame below it.
3425 const uint32_t kArgumentsLimit = 1 * KB;
3426 __ cmpp(length, Immediate(kArgumentsLimit));
3427 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3430 __ movp(receiver, length);
3432 // Loop through the arguments pushing them onto the execution
3435 // length is a small non-negative integer, due to the test above.
3436 __ testl(length, length);
3437 __ j(zero, &invoke, Label::kNear);
3439 StackArgumentsAccessor args(elements, length,
3440 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3441 __ Push(args.GetArgumentOperand(0));
3443 __ j(not_zero, &loop);
3445 // Invoke the function.
3447 DCHECK(instr->HasPointerMap());
3448 LPointerMap* pointers = instr->pointer_map();
3449 SafepointGenerator safepoint_generator(
3450 this, pointers, Safepoint::kLazyDeopt);
3451 ParameterCount actual(rax);
3452 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3456 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3457 LOperand* argument = instr->value();
3458 EmitPushTaggedOperand(argument);
3462 void LCodeGen::DoDrop(LDrop* instr) {
3463 __ Drop(instr->count());
3467 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3468 Register result = ToRegister(instr->result());
3469 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3473 void LCodeGen::DoContext(LContext* instr) {
3474 Register result = ToRegister(instr->result());
3475 if (info()->IsOptimizing()) {
3476 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3478 // If there is no frame, the context must be in rsi.
3479 DCHECK(result.is(rsi));
3484 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3485 DCHECK(ToRegister(instr->context()).is(rsi));
3486 __ Push(rsi); // The context is the first argument.
3487 __ Push(instr->hydrogen()->pairs());
3488 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3489 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3493 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3494 int formal_parameter_count, int arity,
3495 LInstruction* instr) {
3496 bool dont_adapt_arguments =
3497 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3498 bool can_invoke_directly =
3499 dont_adapt_arguments || formal_parameter_count == arity;
3501 Register function_reg = rdi;
3502 LPointerMap* pointers = instr->pointer_map();
3504 if (can_invoke_directly) {
3506 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3508 // Set rax to arguments count if adaption is not needed. Assumes that rax
3509 // is available to write to at this point.
3510 if (dont_adapt_arguments) {
3515 if (function.is_identical_to(info()->closure())) {
3518 __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3521 // Set up deoptimization.
3522 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3524 // We need to adapt arguments.
3525 SafepointGenerator generator(
3526 this, pointers, Safepoint::kLazyDeopt);
3527 ParameterCount count(arity);
3528 ParameterCount expected(formal_parameter_count);
3529 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3534 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3535 DCHECK(ToRegister(instr->result()).is(rax));
3537 if (instr->hydrogen()->IsTailCall()) {
3538 if (NeedsEagerFrame()) __ leave();
3540 if (instr->target()->IsConstantOperand()) {
3541 LConstantOperand* target = LConstantOperand::cast(instr->target());
3542 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3543 __ jmp(code, RelocInfo::CODE_TARGET);
3545 DCHECK(instr->target()->IsRegister());
3546 Register target = ToRegister(instr->target());
3547 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3551 LPointerMap* pointers = instr->pointer_map();
3552 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3554 if (instr->target()->IsConstantOperand()) {
3555 LConstantOperand* target = LConstantOperand::cast(instr->target());
3556 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3557 generator.BeforeCall(__ CallSize(code));
3558 __ call(code, RelocInfo::CODE_TARGET);
3560 DCHECK(instr->target()->IsRegister());
3561 Register target = ToRegister(instr->target());
3562 generator.BeforeCall(__ CallSize(target));
3563 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3566 generator.AfterCall();
3571 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3572 DCHECK(ToRegister(instr->function()).is(rdi));
3573 DCHECK(ToRegister(instr->result()).is(rax));
3575 if (instr->hydrogen()->pass_argument_count()) {
3576 __ Set(rax, instr->arity());
3580 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3582 LPointerMap* pointers = instr->pointer_map();
3583 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3585 bool is_self_call = false;
3586 if (instr->hydrogen()->function()->IsConstant()) {
3587 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3588 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3589 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3590 is_self_call = jsfun.is_identical_to(info()->closure());
3596 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3597 generator.BeforeCall(__ CallSize(target));
3600 generator.AfterCall();
3604 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3605 Register input_reg = ToRegister(instr->value());
3606 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3607 Heap::kHeapNumberMapRootIndex);
3608 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3610 Label slow, allocated, done;
3611 Register tmp = input_reg.is(rax) ? rcx : rax;
3612 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3614 // Preserve the value of all registers.
3615 PushSafepointRegistersScope scope(this);
3617 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3618 // Check the sign of the argument. If the argument is positive, just
3619 // return it. We do not need to patch the stack since |input| and
3620 // |result| are the same register and |input| will be restored
3621 // unchanged by popping safepoint registers.
3622 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3625 __ AllocateHeapNumber(tmp, tmp2, &slow);
3626 __ jmp(&allocated, Label::kNear);
3628 // Slow case: Call the runtime system to do the number allocation.
3630 CallRuntimeFromDeferred(
3631 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3632 // Set the pointer to the new heap number in tmp.
3633 if (!tmp.is(rax)) __ movp(tmp, rax);
3634 // Restore input_reg after call to runtime.
3635 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3637 __ bind(&allocated);
3638 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3639 __ shlq(tmp2, Immediate(1));
3640 __ shrq(tmp2, Immediate(1));
3641 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3642 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3648 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3649 Register input_reg = ToRegister(instr->value());
3650 __ testl(input_reg, input_reg);
3652 __ j(not_sign, &is_positive, Label::kNear);
3653 __ negl(input_reg); // Sets flags.
3654 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3655 __ bind(&is_positive);
3659 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3660 Register input_reg = ToRegister(instr->value());
3661 __ testp(input_reg, input_reg);
3663 __ j(not_sign, &is_positive, Label::kNear);
3664 __ negp(input_reg); // Sets flags.
3665 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3666 __ bind(&is_positive);
3670 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3671 // Class for deferred case.
3672 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3674 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3675 : LDeferredCode(codegen), instr_(instr) { }
3676 void Generate() override {
3677 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3679 LInstruction* instr() override { return instr_; }
3685 DCHECK(instr->value()->Equals(instr->result()));
3686 Representation r = instr->hydrogen()->value()->representation();
3689 XMMRegister scratch = double_scratch0();
3690 XMMRegister input_reg = ToDoubleRegister(instr->value());
3691 __ xorps(scratch, scratch);
3692 __ subsd(scratch, input_reg);
3693 __ andps(input_reg, scratch);
3694 } else if (r.IsInteger32()) {
3695 EmitIntegerMathAbs(instr);
3696 } else if (r.IsSmi()) {
3697 EmitSmiMathAbs(instr);
3698 } else { // Tagged case.
3699 DeferredMathAbsTaggedHeapNumber* deferred =
3700 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3701 Register input_reg = ToRegister(instr->value());
3703 __ JumpIfNotSmi(input_reg, deferred->entry());
3704 EmitSmiMathAbs(instr);
3705 __ bind(deferred->exit());
3710 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3711 XMMRegister xmm_scratch = double_scratch0();
3712 Register output_reg = ToRegister(instr->result());
3713 XMMRegister input_reg = ToDoubleRegister(instr->value());
3715 if (CpuFeatures::IsSupported(SSE4_1)) {
3716 CpuFeatureScope scope(masm(), SSE4_1);
3717 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3718 // Deoptimize if minus zero.
3719 __ movq(output_reg, input_reg);
3720 __ subq(output_reg, Immediate(1));
3721 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
3723 __ roundsd(xmm_scratch, input_reg, kRoundDown);
3724 __ cvttsd2si(output_reg, xmm_scratch);
3725 __ cmpl(output_reg, Immediate(0x1));
3726 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3728 Label negative_sign, done;
3729 // Deoptimize on unordered.
3730 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3731 __ ucomisd(input_reg, xmm_scratch);
3732 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3733 __ j(below, &negative_sign, Label::kNear);
3735 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3736 // Check for negative zero.
3737 Label positive_sign;
3738 __ j(above, &positive_sign, Label::kNear);
3739 __ movmskpd(output_reg, input_reg);
3740 __ testq(output_reg, Immediate(1));
3741 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3742 __ Set(output_reg, 0);
3744 __ bind(&positive_sign);
3747 // Use truncating instruction (OK because input is positive).
3748 __ cvttsd2si(output_reg, input_reg);
3749 // Overflow is signalled with minint.
3750 __ cmpl(output_reg, Immediate(0x1));
3751 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3752 __ jmp(&done, Label::kNear);
3754 // Non-zero negative reaches here.
3755 __ bind(&negative_sign);
3756 // Truncate, then compare and compensate.
3757 __ cvttsd2si(output_reg, input_reg);
3758 __ Cvtlsi2sd(xmm_scratch, output_reg);
3759 __ ucomisd(input_reg, xmm_scratch);
3760 __ j(equal, &done, Label::kNear);
3761 __ subl(output_reg, Immediate(1));
3762 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3769 void LCodeGen::DoMathRound(LMathRound* instr) {
3770 const XMMRegister xmm_scratch = double_scratch0();
3771 Register output_reg = ToRegister(instr->result());
3772 XMMRegister input_reg = ToDoubleRegister(instr->value());
3773 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3774 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3775 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3777 Label done, round_to_zero, below_one_half;
3778 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3779 __ movq(kScratchRegister, one_half);
3780 __ movq(xmm_scratch, kScratchRegister);
3781 __ ucomisd(xmm_scratch, input_reg);
3782 __ j(above, &below_one_half, Label::kNear);
3784 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3785 __ addsd(xmm_scratch, input_reg);
3786 __ cvttsd2si(output_reg, xmm_scratch);
3787 // Overflow is signalled with minint.
3788 __ cmpl(output_reg, Immediate(0x1));
3789 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3790 __ jmp(&done, dist);
3792 __ bind(&below_one_half);
3793 __ movq(kScratchRegister, minus_one_half);
3794 __ movq(xmm_scratch, kScratchRegister);
3795 __ ucomisd(xmm_scratch, input_reg);
3796 __ j(below_equal, &round_to_zero, Label::kNear);
3798 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3799 // compare and compensate.
3800 __ movq(input_temp, input_reg); // Do not alter input_reg.
3801 __ subsd(input_temp, xmm_scratch);
3802 __ cvttsd2si(output_reg, input_temp);
3803 // Catch minint due to overflow, and to prevent overflow when compensating.
3804 __ cmpl(output_reg, Immediate(0x1));
3805 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3807 __ Cvtlsi2sd(xmm_scratch, output_reg);
3808 __ ucomisd(xmm_scratch, input_temp);
3809 __ j(equal, &done, dist);
3810 __ subl(output_reg, Immediate(1));
3811 // No overflow because we already ruled out minint.
3812 __ jmp(&done, dist);
3814 __ bind(&round_to_zero);
3815 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3816 // we can ignore the difference between a result of -0 and +0.
3817 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3818 __ movq(output_reg, input_reg);
3819 __ testq(output_reg, output_reg);
3820 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
3822 __ Set(output_reg, 0);
3827 void LCodeGen::DoMathFround(LMathFround* instr) {
3828 XMMRegister input_reg = ToDoubleRegister(instr->value());
3829 XMMRegister output_reg = ToDoubleRegister(instr->result());
3830 __ cvtsd2ss(output_reg, input_reg);
3831 __ cvtss2sd(output_reg, output_reg);
3835 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3836 XMMRegister output = ToDoubleRegister(instr->result());
3837 if (instr->value()->IsDoubleRegister()) {
3838 XMMRegister input = ToDoubleRegister(instr->value());
3839 __ sqrtsd(output, input);
3841 Operand input = ToOperand(instr->value());
3842 __ sqrtsd(output, input);
3847 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3848 XMMRegister xmm_scratch = double_scratch0();
3849 XMMRegister input_reg = ToDoubleRegister(instr->value());
3850 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3852 // Note that according to ECMA-262 15.8.2.13:
3853 // Math.pow(-Infinity, 0.5) == Infinity
3854 // Math.sqrt(-Infinity) == NaN
3856 // Check base for -Infinity. According to IEEE-754, double-precision
3857 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3858 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3859 __ movq(xmm_scratch, kScratchRegister);
3860 __ ucomisd(xmm_scratch, input_reg);
3861 // Comparing -Infinity with NaN results in "unordered", which sets the
3862 // zero flag as if both were equal. However, it also sets the carry flag.
3863 __ j(not_equal, &sqrt, Label::kNear);
3864 __ j(carry, &sqrt, Label::kNear);
3865 // If input is -Infinity, return Infinity.
3866 __ xorps(input_reg, input_reg);
3867 __ subsd(input_reg, xmm_scratch);
3868 __ jmp(&done, Label::kNear);
3872 __ xorps(xmm_scratch, xmm_scratch);
3873 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3874 __ sqrtsd(input_reg, input_reg);
3879 void LCodeGen::DoPower(LPower* instr) {
3880 Representation exponent_type = instr->hydrogen()->right()->representation();
3881 // Having marked this as a call, we can use any registers.
3882 // Just make sure that the input/output registers are the expected ones.
3884 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3885 DCHECK(!instr->right()->IsRegister() ||
3886 ToRegister(instr->right()).is(tagged_exponent));
3887 DCHECK(!instr->right()->IsDoubleRegister() ||
3888 ToDoubleRegister(instr->right()).is(xmm1));
3889 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3890 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3892 if (exponent_type.IsSmi()) {
3893 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3895 } else if (exponent_type.IsTagged()) {
3897 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3898 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3899 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3901 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3903 } else if (exponent_type.IsInteger32()) {
3904 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3907 DCHECK(exponent_type.IsDouble());
3908 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3914 void LCodeGen::DoMathExp(LMathExp* instr) {
3915 XMMRegister input = ToDoubleRegister(instr->value());
3916 XMMRegister result = ToDoubleRegister(instr->result());
3917 XMMRegister temp0 = double_scratch0();
3918 Register temp1 = ToRegister(instr->temp1());
3919 Register temp2 = ToRegister(instr->temp2());
3921 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3925 void LCodeGen::DoMathLog(LMathLog* instr) {
3926 DCHECK(instr->value()->Equals(instr->result()));
3927 XMMRegister input_reg = ToDoubleRegister(instr->value());
3928 XMMRegister xmm_scratch = double_scratch0();
3929 Label positive, done, zero;
3930 __ xorps(xmm_scratch, xmm_scratch);
3931 __ ucomisd(input_reg, xmm_scratch);
3932 __ j(above, &positive, Label::kNear);
3933 __ j(not_carry, &zero, Label::kNear);
3934 __ pcmpeqd(input_reg, input_reg);
3935 __ jmp(&done, Label::kNear);
3937 ExternalReference ninf =
3938 ExternalReference::address_of_negative_infinity();
3939 Operand ninf_operand = masm()->ExternalOperand(ninf);
3940 __ movsd(input_reg, ninf_operand);
3941 __ jmp(&done, Label::kNear);
3944 __ subp(rsp, Immediate(kDoubleSize));
3945 __ movsd(Operand(rsp, 0), input_reg);
3946 __ fld_d(Operand(rsp, 0));
3948 __ fstp_d(Operand(rsp, 0));
3949 __ movsd(input_reg, Operand(rsp, 0));
3950 __ addp(rsp, Immediate(kDoubleSize));
3955 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3956 Register input = ToRegister(instr->value());
3957 Register result = ToRegister(instr->result());
3959 __ Lzcntl(result, input);
3963 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3964 DCHECK(ToRegister(instr->context()).is(rsi));
3965 DCHECK(ToRegister(instr->function()).is(rdi));
3966 DCHECK(instr->HasPointerMap());
3968 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3969 if (known_function.is_null()) {
3970 LPointerMap* pointers = instr->pointer_map();
3971 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3972 ParameterCount count(instr->arity());
3973 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3975 CallKnownFunction(known_function,
3976 instr->hydrogen()->formal_parameter_count(),
3977 instr->arity(), instr);
3982 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3983 DCHECK(ToRegister(instr->context()).is(rsi));
3984 DCHECK(ToRegister(instr->function()).is(rdi));
3985 DCHECK(ToRegister(instr->result()).is(rax));
3987 int arity = instr->arity();
3988 CallFunctionFlags flags = instr->hydrogen()->function_flags();
3989 if (instr->hydrogen()->HasVectorAndSlot()) {
3990 Register slot_register = ToRegister(instr->temp_slot());
3991 Register vector_register = ToRegister(instr->temp_vector());
3992 DCHECK(slot_register.is(rdx));
3993 DCHECK(vector_register.is(rbx));
3995 AllowDeferredHandleDereference vector_structure_check;
3996 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3997 int index = vector->GetIndex(instr->hydrogen()->slot());
3999 __ Move(vector_register, vector);
4000 __ Move(slot_register, Smi::FromInt(index));
4002 CallICState::CallType call_type =
4003 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4006 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4007 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4009 CallFunctionStub stub(isolate(), arity, flags);
4010 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4015 void LCodeGen::DoCallNew(LCallNew* instr) {
4016 DCHECK(ToRegister(instr->context()).is(rsi));
4017 DCHECK(ToRegister(instr->constructor()).is(rdi));
4018 DCHECK(ToRegister(instr->result()).is(rax));
4020 __ Set(rax, instr->arity());
4021 // No cell in ebx for construct type feedback in optimized code
4022 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4023 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4024 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4028 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4029 DCHECK(ToRegister(instr->context()).is(rsi));
4030 DCHECK(ToRegister(instr->constructor()).is(rdi));
4031 DCHECK(ToRegister(instr->result()).is(rax));
4033 __ Set(rax, instr->arity());
4034 if (instr->arity() == 1) {
4035 // We only need the allocation site for the case we have a length argument.
4036 // The case may bail out to the runtime, which will determine the correct
4037 // elements kind with the site.
4038 __ Move(rbx, instr->hydrogen()->site());
4040 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4043 ElementsKind kind = instr->hydrogen()->elements_kind();
4044 AllocationSiteOverrideMode override_mode =
4045 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4046 ? DISABLE_ALLOCATION_SITES
4049 if (instr->arity() == 0) {
4050 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4051 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4052 } else if (instr->arity() == 1) {
4054 if (IsFastPackedElementsKind(kind)) {
4056 // We might need a change here
4057 // look at the first argument
4058 __ movp(rcx, Operand(rsp, 0));
4060 __ j(zero, &packed_case, Label::kNear);
4062 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4063 ArraySingleArgumentConstructorStub stub(isolate(),
4066 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4067 __ jmp(&done, Label::kNear);
4068 __ bind(&packed_case);
4071 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4072 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4075 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4076 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4081 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4082 DCHECK(ToRegister(instr->context()).is(rsi));
4083 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4087 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4088 Register function = ToRegister(instr->function());
4089 Register code_object = ToRegister(instr->code_object());
4090 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4091 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4095 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4096 Register result = ToRegister(instr->result());
4097 Register base = ToRegister(instr->base_object());
4098 if (instr->offset()->IsConstantOperand()) {
4099 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4100 __ leap(result, Operand(base, ToInteger32(offset)));
4102 Register offset = ToRegister(instr->offset());
4103 __ leap(result, Operand(base, offset, times_1, 0));
4108 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4109 HStoreNamedField* hinstr = instr->hydrogen();
4110 Representation representation = instr->representation();
4112 HObjectAccess access = hinstr->access();
4113 int offset = access.offset();
4115 if (access.IsExternalMemory()) {
4116 DCHECK(!hinstr->NeedsWriteBarrier());
4117 Register value = ToRegister(instr->value());
4118 if (instr->object()->IsConstantOperand()) {
4119 DCHECK(value.is(rax));
4120 LConstantOperand* object = LConstantOperand::cast(instr->object());
4121 __ store_rax(ToExternalReference(object));
4123 Register object = ToRegister(instr->object());
4124 __ Store(MemOperand(object, offset), value, representation);
4129 Register object = ToRegister(instr->object());
4130 __ AssertNotSmi(object);
4132 DCHECK(!representation.IsSmi() ||
4133 !instr->value()->IsConstantOperand() ||
4134 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4135 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4136 DCHECK(access.IsInobject());
4137 DCHECK(!hinstr->has_transition());
4138 DCHECK(!hinstr->NeedsWriteBarrier());
4139 XMMRegister value = ToDoubleRegister(instr->value());
4140 __ movsd(FieldOperand(object, offset), value);
4144 if (hinstr->has_transition()) {
4145 Handle<Map> transition = hinstr->transition_map();
4146 AddDeprecationDependency(transition);
4147 if (!hinstr->NeedsWriteBarrierForMap()) {
4148 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4150 Register temp = ToRegister(instr->temp());
4151 __ Move(kScratchRegister, transition);
4152 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4153 // Update the write barrier for the map field.
4154 __ RecordWriteForMap(object,
4162 Register write_register = object;
4163 if (!access.IsInobject()) {
4164 write_register = ToRegister(instr->temp());
4165 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4168 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4169 hinstr->value()->representation().IsInteger32()) {
4170 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4171 if (FLAG_debug_code) {
4172 Register scratch = kScratchRegister;
4173 __ Load(scratch, FieldOperand(write_register, offset), representation);
4174 __ AssertSmi(scratch);
4176 // Store int value directly to upper half of the smi.
4177 STATIC_ASSERT(kSmiTag == 0);
4178 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4179 offset += kPointerSize / 2;
4180 representation = Representation::Integer32();
4183 Operand operand = FieldOperand(write_register, offset);
4185 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4186 DCHECK(access.IsInobject());
4187 XMMRegister value = ToDoubleRegister(instr->value());
4188 __ movsd(operand, value);
4190 } else if (instr->value()->IsRegister()) {
4191 Register value = ToRegister(instr->value());
4192 __ Store(operand, value, representation);
4194 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4195 if (IsInteger32Constant(operand_value)) {
4196 DCHECK(!hinstr->NeedsWriteBarrier());
4197 int32_t value = ToInteger32(operand_value);
4198 if (representation.IsSmi()) {
4199 __ Move(operand, Smi::FromInt(value));
4202 __ movl(operand, Immediate(value));
4206 Handle<Object> handle_value = ToHandle(operand_value);
4207 DCHECK(!hinstr->NeedsWriteBarrier());
4208 __ Move(operand, handle_value);
4212 if (hinstr->NeedsWriteBarrier()) {
4213 Register value = ToRegister(instr->value());
4214 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4215 // Update the write barrier for the object for in-object properties.
4216 __ RecordWriteField(write_register,
4221 EMIT_REMEMBERED_SET,
4222 hinstr->SmiCheckForWriteBarrier(),
4223 hinstr->PointersToHereCheckForValue());
4228 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4229 DCHECK(ToRegister(instr->context()).is(rsi));
4230 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4231 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4233 if (instr->hydrogen()->HasVectorAndSlot()) {
4234 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4237 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
4238 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4239 isolate(), instr->language_mode(),
4240 instr->hydrogen()->initialization_state()).code();
4241 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4245 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4246 Representation representation = instr->hydrogen()->length()->representation();
4247 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4248 DCHECK(representation.IsSmiOrInteger32());
4250 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4251 if (instr->length()->IsConstantOperand()) {
4252 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4253 Register index = ToRegister(instr->index());
4254 if (representation.IsSmi()) {
4255 __ Cmp(index, Smi::FromInt(length));
4257 __ cmpl(index, Immediate(length));
4259 cc = CommuteCondition(cc);
4260 } else if (instr->index()->IsConstantOperand()) {
4261 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4262 if (instr->length()->IsRegister()) {
4263 Register length = ToRegister(instr->length());
4264 if (representation.IsSmi()) {
4265 __ Cmp(length, Smi::FromInt(index));
4267 __ cmpl(length, Immediate(index));
4270 Operand length = ToOperand(instr->length());
4271 if (representation.IsSmi()) {
4272 __ Cmp(length, Smi::FromInt(index));
4274 __ cmpl(length, Immediate(index));
4278 Register index = ToRegister(instr->index());
4279 if (instr->length()->IsRegister()) {
4280 Register length = ToRegister(instr->length());
4281 if (representation.IsSmi()) {
4282 __ cmpp(length, index);
4284 __ cmpl(length, index);
4287 Operand length = ToOperand(instr->length());
4288 if (representation.IsSmi()) {
4289 __ cmpp(length, index);
4291 __ cmpl(length, index);
4295 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4297 __ j(NegateCondition(cc), &done, Label::kNear);
4301 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4306 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4307 ElementsKind elements_kind = instr->elements_kind();
4308 LOperand* key = instr->key();
4309 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4310 Register key_reg = ToRegister(key);
4311 Representation key_representation =
4312 instr->hydrogen()->key()->representation();
4313 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4314 __ SmiToInteger64(key_reg, key_reg);
4315 } else if (instr->hydrogen()->IsDehoisted()) {
4316 // Sign extend key because it could be a 32 bit negative value
4317 // and the dehoisted address computation happens in 64 bits
4318 __ movsxlq(key_reg, key_reg);
4321 Operand operand(BuildFastArrayOperand(
4324 instr->hydrogen()->key()->representation(),
4326 instr->base_offset()));
4328 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4329 elements_kind == FLOAT32_ELEMENTS) {
4330 XMMRegister value(ToDoubleRegister(instr->value()));
4331 __ cvtsd2ss(value, value);
4332 __ movss(operand, value);
4333 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4334 elements_kind == FLOAT64_ELEMENTS) {
4335 __ movsd(operand, ToDoubleRegister(instr->value()));
4337 Register value(ToRegister(instr->value()));
4338 switch (elements_kind) {
4339 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4340 case EXTERNAL_INT8_ELEMENTS:
4341 case EXTERNAL_UINT8_ELEMENTS:
4343 case UINT8_ELEMENTS:
4344 case UINT8_CLAMPED_ELEMENTS:
4345 __ movb(operand, value);
4347 case EXTERNAL_INT16_ELEMENTS:
4348 case EXTERNAL_UINT16_ELEMENTS:
4349 case INT16_ELEMENTS:
4350 case UINT16_ELEMENTS:
4351 __ movw(operand, value);
4353 case EXTERNAL_INT32_ELEMENTS:
4354 case EXTERNAL_UINT32_ELEMENTS:
4355 case INT32_ELEMENTS:
4356 case UINT32_ELEMENTS:
4357 __ movl(operand, value);
4359 case EXTERNAL_FLOAT32_ELEMENTS:
4360 case EXTERNAL_FLOAT64_ELEMENTS:
4361 case FLOAT32_ELEMENTS:
4362 case FLOAT64_ELEMENTS:
4364 case FAST_SMI_ELEMENTS:
4365 case FAST_DOUBLE_ELEMENTS:
4366 case FAST_HOLEY_ELEMENTS:
4367 case FAST_HOLEY_SMI_ELEMENTS:
4368 case FAST_HOLEY_DOUBLE_ELEMENTS:
4369 case DICTIONARY_ELEMENTS:
4370 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4371 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4379 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4380 XMMRegister value = ToDoubleRegister(instr->value());
4381 LOperand* key = instr->key();
4382 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4383 instr->hydrogen()->IsDehoisted()) {
4384 // Sign extend key because it could be a 32 bit negative value
4385 // and the dehoisted address computation happens in 64 bits
4386 __ movsxlq(ToRegister(key), ToRegister(key));
4388 if (instr->NeedsCanonicalization()) {
4389 XMMRegister xmm_scratch = double_scratch0();
4390 // Turn potential sNaN value into qNaN.
4391 __ xorps(xmm_scratch, xmm_scratch);
4392 __ subsd(value, xmm_scratch);
4395 Operand double_store_operand = BuildFastArrayOperand(
4398 instr->hydrogen()->key()->representation(),
4399 FAST_DOUBLE_ELEMENTS,
4400 instr->base_offset());
4402 __ movsd(double_store_operand, value);
4406 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4407 HStoreKeyed* hinstr = instr->hydrogen();
4408 LOperand* key = instr->key();
4409 int offset = instr->base_offset();
4410 Representation representation = hinstr->value()->representation();
4412 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4413 instr->hydrogen()->IsDehoisted()) {
4414 // Sign extend key because it could be a 32 bit negative value
4415 // and the dehoisted address computation happens in 64 bits
4416 __ movsxlq(ToRegister(key), ToRegister(key));
4418 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4419 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4420 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4421 if (FLAG_debug_code) {
4422 Register scratch = kScratchRegister;
4424 BuildFastArrayOperand(instr->elements(),
4426 instr->hydrogen()->key()->representation(),
4429 Representation::Smi());
4430 __ AssertSmi(scratch);
4432 // Store int value directly to upper half of the smi.
4433 STATIC_ASSERT(kSmiTag == 0);
4434 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4435 offset += kPointerSize / 2;
4439 BuildFastArrayOperand(instr->elements(),
4441 instr->hydrogen()->key()->representation(),
4444 if (instr->value()->IsRegister()) {
4445 __ Store(operand, ToRegister(instr->value()), representation);
4447 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4448 if (IsInteger32Constant(operand_value)) {
4449 int32_t value = ToInteger32(operand_value);
4450 if (representation.IsSmi()) {
4451 __ Move(operand, Smi::FromInt(value));
4454 __ movl(operand, Immediate(value));
4457 Handle<Object> handle_value = ToHandle(operand_value);
4458 __ Move(operand, handle_value);
4462 if (hinstr->NeedsWriteBarrier()) {
4463 Register elements = ToRegister(instr->elements());
4464 DCHECK(instr->value()->IsRegister());
4465 Register value = ToRegister(instr->value());
4466 DCHECK(!key->IsConstantOperand());
4467 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4468 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4469 // Compute address of modified element and store it into key register.
4470 Register key_reg(ToRegister(key));
4471 __ leap(key_reg, operand);
4472 __ RecordWrite(elements,
4476 EMIT_REMEMBERED_SET,
4478 hinstr->PointersToHereCheckForValue());
4483 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4484 if (instr->is_typed_elements()) {
4485 DoStoreKeyedExternalArray(instr);
4486 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4487 DoStoreKeyedFixedDoubleArray(instr);
4489 DoStoreKeyedFixedArray(instr);
4494 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4495 DCHECK(ToRegister(instr->context()).is(rsi));
4496 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4497 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4498 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4500 if (instr->hydrogen()->HasVectorAndSlot()) {
4501 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4504 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4505 isolate(), instr->language_mode(),
4506 instr->hydrogen()->initialization_state()).code();
4507 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4511 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4512 class DeferredMaybeGrowElements final : public LDeferredCode {
4514 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4515 : LDeferredCode(codegen), instr_(instr) {}
4516 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4517 LInstruction* instr() override { return instr_; }
4520 LMaybeGrowElements* instr_;
4523 Register result = rax;
4524 DeferredMaybeGrowElements* deferred =
4525 new (zone()) DeferredMaybeGrowElements(this, instr);
4526 LOperand* key = instr->key();
4527 LOperand* current_capacity = instr->current_capacity();
4529 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4530 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4531 DCHECK(key->IsConstantOperand() || key->IsRegister());
4532 DCHECK(current_capacity->IsConstantOperand() ||
4533 current_capacity->IsRegister());
4535 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4536 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4537 int32_t constant_capacity =
4538 ToInteger32(LConstantOperand::cast(current_capacity));
4539 if (constant_key >= constant_capacity) {
4541 __ jmp(deferred->entry());
4543 } else if (key->IsConstantOperand()) {
4544 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4545 __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
4546 __ j(less_equal, deferred->entry());
4547 } else if (current_capacity->IsConstantOperand()) {
4548 int32_t constant_capacity =
4549 ToInteger32(LConstantOperand::cast(current_capacity));
4550 __ cmpl(ToRegister(key), Immediate(constant_capacity));
4551 __ j(greater_equal, deferred->entry());
4553 __ cmpl(ToRegister(key), ToRegister(current_capacity));
4554 __ j(greater_equal, deferred->entry());
4557 if (instr->elements()->IsRegister()) {
4558 __ movp(result, ToRegister(instr->elements()));
4560 __ movp(result, ToOperand(instr->elements()));
4563 __ bind(deferred->exit());
4567 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4568 // TODO(3095996): Get rid of this. For now, we need to make the
4569 // result register contain a valid pointer because it is already
4570 // contained in the register pointer map.
4571 Register result = rax;
4572 __ Move(result, Smi::FromInt(0));
4574 // We have to call a stub.
4576 PushSafepointRegistersScope scope(this);
4577 if (instr->object()->IsConstantOperand()) {
4578 LConstantOperand* constant_object =
4579 LConstantOperand::cast(instr->object());
4580 if (IsSmiConstant(constant_object)) {
4581 Smi* immediate = ToSmi(constant_object);
4582 __ Move(result, immediate);
4584 Handle<Object> handle_value = ToHandle(constant_object);
4585 __ Move(result, handle_value);
4587 } else if (instr->object()->IsRegister()) {
4588 __ Move(result, ToRegister(instr->object()));
4590 __ movp(result, ToOperand(instr->object()));
4593 LOperand* key = instr->key();
4594 if (key->IsConstantOperand()) {
4595 __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
4597 __ Move(rbx, ToRegister(key));
4598 __ Integer32ToSmi(rbx, rbx);
4601 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4602 instr->hydrogen()->kind());
4604 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4605 __ StoreToSafepointRegisterSlot(result, result);
4608 // Deopt on smi, which means the elements array changed to dictionary mode.
4609 Condition is_smi = __ CheckSmi(result);
4610 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
4614 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4615 Register object_reg = ToRegister(instr->object());
4617 Handle<Map> from_map = instr->original_map();
4618 Handle<Map> to_map = instr->transitioned_map();
4619 ElementsKind from_kind = instr->from_kind();
4620 ElementsKind to_kind = instr->to_kind();
4622 Label not_applicable;
4623 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4624 __ j(not_equal, ¬_applicable);
4625 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4626 Register new_map_reg = ToRegister(instr->new_map_temp());
4627 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4628 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4630 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4633 DCHECK(object_reg.is(rax));
4634 DCHECK(ToRegister(instr->context()).is(rsi));
4635 PushSafepointRegistersScope scope(this);
4636 __ Move(rbx, to_map);
4637 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4638 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4640 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4642 __ bind(¬_applicable);
4646 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4647 Register object = ToRegister(instr->object());
4648 Register temp = ToRegister(instr->temp());
4649 Label no_memento_found;
4650 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4651 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4652 __ bind(&no_memento_found);
4656 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4657 DCHECK(ToRegister(instr->context()).is(rsi));
4658 DCHECK(ToRegister(instr->left()).is(rdx));
4659 DCHECK(ToRegister(instr->right()).is(rax));
4660 StringAddStub stub(isolate(),
4661 instr->hydrogen()->flags(),
4662 instr->hydrogen()->pretenure_flag());
4663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4667 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4668 class DeferredStringCharCodeAt final : public LDeferredCode {
4670 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4671 : LDeferredCode(codegen), instr_(instr) { }
4672 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4673 LInstruction* instr() override { return instr_; }
4676 LStringCharCodeAt* instr_;
4679 DeferredStringCharCodeAt* deferred =
4680 new(zone()) DeferredStringCharCodeAt(this, instr);
4682 StringCharLoadGenerator::Generate(masm(),
4683 ToRegister(instr->string()),
4684 ToRegister(instr->index()),
4685 ToRegister(instr->result()),
4687 __ bind(deferred->exit());
4691 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4692 Register string = ToRegister(instr->string());
4693 Register result = ToRegister(instr->result());
4695 // TODO(3095996): Get rid of this. For now, we need to make the
4696 // result register contain a valid pointer because it is already
4697 // contained in the register pointer map.
4700 PushSafepointRegistersScope scope(this);
4702 // Push the index as a smi. This is safe because of the checks in
4703 // DoStringCharCodeAt above.
4704 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4705 if (instr->index()->IsConstantOperand()) {
4706 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4707 __ Push(Smi::FromInt(const_index));
4709 Register index = ToRegister(instr->index());
4710 __ Integer32ToSmi(index, index);
4713 CallRuntimeFromDeferred(
4714 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4716 __ SmiToInteger32(rax, rax);
4717 __ StoreToSafepointRegisterSlot(result, rax);
4721 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4722 class DeferredStringCharFromCode final : public LDeferredCode {
4724 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4725 : LDeferredCode(codegen), instr_(instr) { }
4726 void Generate() override {
4727 codegen()->DoDeferredStringCharFromCode(instr_);
4729 LInstruction* instr() override { return instr_; }
4732 LStringCharFromCode* instr_;
4735 DeferredStringCharFromCode* deferred =
4736 new(zone()) DeferredStringCharFromCode(this, instr);
4738 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4739 Register char_code = ToRegister(instr->char_code());
4740 Register result = ToRegister(instr->result());
4741 DCHECK(!char_code.is(result));
4743 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4744 __ j(above, deferred->entry());
4745 __ movsxlq(char_code, char_code);
4746 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4747 __ movp(result, FieldOperand(result,
4748 char_code, times_pointer_size,
4749 FixedArray::kHeaderSize));
4750 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4751 __ j(equal, deferred->entry());
4752 __ bind(deferred->exit());
4756 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4757 Register char_code = ToRegister(instr->char_code());
4758 Register result = ToRegister(instr->result());
4760 // TODO(3095996): Get rid of this. For now, we need to make the
4761 // result register contain a valid pointer because it is already
4762 // contained in the register pointer map.
4765 PushSafepointRegistersScope scope(this);
4766 __ Integer32ToSmi(char_code, char_code);
4768 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4769 __ StoreToSafepointRegisterSlot(result, rax);
4773 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4774 LOperand* input = instr->value();
4775 DCHECK(input->IsRegister() || input->IsStackSlot());
4776 LOperand* output = instr->result();
4777 DCHECK(output->IsDoubleRegister());
4778 if (input->IsRegister()) {
4779 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4781 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4786 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4787 LOperand* input = instr->value();
4788 LOperand* output = instr->result();
4790 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4794 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4795 class DeferredNumberTagI final : public LDeferredCode {
4797 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4798 : LDeferredCode(codegen), instr_(instr) { }
4799 void Generate() override {
4800 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4801 instr_->temp2(), SIGNED_INT32);
4803 LInstruction* instr() override { return instr_; }
4806 LNumberTagI* instr_;
4809 LOperand* input = instr->value();
4810 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4811 Register reg = ToRegister(input);
4813 if (SmiValuesAre32Bits()) {
4814 __ Integer32ToSmi(reg, reg);
4816 DCHECK(SmiValuesAre31Bits());
4817 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4818 __ Integer32ToSmi(reg, reg);
4819 __ j(overflow, deferred->entry());
4820 __ bind(deferred->exit());
4825 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4826 class DeferredNumberTagU final : public LDeferredCode {
4828 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4829 : LDeferredCode(codegen), instr_(instr) { }
4830 void Generate() override {
4831 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4832 instr_->temp2(), UNSIGNED_INT32);
4834 LInstruction* instr() override { return instr_; }
4837 LNumberTagU* instr_;
4840 LOperand* input = instr->value();
4841 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4842 Register reg = ToRegister(input);
4844 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4845 __ cmpl(reg, Immediate(Smi::kMaxValue));
4846 __ j(above, deferred->entry());
4847 __ Integer32ToSmi(reg, reg);
4848 __ bind(deferred->exit());
4852 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4856 IntegerSignedness signedness) {
4858 Register reg = ToRegister(value);
4859 Register tmp = ToRegister(temp1);
4860 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4862 // Load value into temp_xmm which will be preserved across potential call to
4863 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4864 // XMM registers on x64).
4865 if (signedness == SIGNED_INT32) {
4866 DCHECK(SmiValuesAre31Bits());
4867 // There was overflow, so bits 30 and 31 of the original integer
4868 // disagree. Try to allocate a heap number in new space and store
4869 // the value in there. If that fails, call the runtime system.
4870 __ SmiToInteger32(reg, reg);
4871 __ xorl(reg, Immediate(0x80000000));
4872 __ cvtlsi2sd(temp_xmm, reg);
4874 DCHECK(signedness == UNSIGNED_INT32);
4875 __ LoadUint32(temp_xmm, reg);
4878 if (FLAG_inline_new) {
4879 __ AllocateHeapNumber(reg, tmp, &slow);
4880 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4883 // Slow case: Call the runtime system to do the number allocation.
4886 // Put a valid pointer value in the stack slot where the result
4887 // register is stored, as this register is in the pointer map, but contains
4888 // an integer value.
4891 // Preserve the value of all registers.
4892 PushSafepointRegistersScope scope(this);
4894 // NumberTagIU uses the context from the frame, rather than
4895 // the environment's HContext or HInlinedContext value.
4896 // They only call Runtime::kAllocateHeapNumber.
4897 // The corresponding HChange instructions are added in a phase that does
4898 // not have easy access to the local context.
4899 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4900 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4901 RecordSafepointWithRegisters(
4902 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4903 __ StoreToSafepointRegisterSlot(reg, rax);
4906 // Done. Put the value in temp_xmm into the value of the allocated heap
4909 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4913 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4914 class DeferredNumberTagD final : public LDeferredCode {
4916 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4917 : LDeferredCode(codegen), instr_(instr) { }
4918 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4919 LInstruction* instr() override { return instr_; }
4922 LNumberTagD* instr_;
4925 XMMRegister input_reg = ToDoubleRegister(instr->value());
4926 Register reg = ToRegister(instr->result());
4927 Register tmp = ToRegister(instr->temp());
4929 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4930 if (FLAG_inline_new) {
4931 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4933 __ jmp(deferred->entry());
4935 __ bind(deferred->exit());
4936 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4940 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4941 // TODO(3095996): Get rid of this. For now, we need to make the
4942 // result register contain a valid pointer because it is already
4943 // contained in the register pointer map.
4944 Register reg = ToRegister(instr->result());
4945 __ Move(reg, Smi::FromInt(0));
4948 PushSafepointRegistersScope scope(this);
4949 // NumberTagD uses the context from the frame, rather than
4950 // the environment's HContext or HInlinedContext value.
4951 // They only call Runtime::kAllocateHeapNumber.
4952 // The corresponding HChange instructions are added in a phase that does
4953 // not have easy access to the local context.
4954 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4955 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4956 RecordSafepointWithRegisters(
4957 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4958 __ movp(kScratchRegister, rax);
4960 __ movp(reg, kScratchRegister);
4964 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4965 HChange* hchange = instr->hydrogen();
4966 Register input = ToRegister(instr->value());
4967 Register output = ToRegister(instr->result());
4968 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4969 hchange->value()->CheckFlag(HValue::kUint32)) {
4970 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4971 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
4973 __ Integer32ToSmi(output, input);
4974 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4975 !hchange->value()->CheckFlag(HValue::kUint32)) {
4976 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4981 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4982 DCHECK(instr->value()->Equals(instr->result()));
4983 Register input = ToRegister(instr->value());
4984 if (instr->needs_check()) {
4985 Condition is_smi = __ CheckSmi(input);
4986 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
4988 __ AssertSmi(input);
4990 __ SmiToInteger32(input, input);
4994 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4995 XMMRegister result_reg, NumberUntagDMode mode) {
4996 bool can_convert_undefined_to_nan =
4997 instr->hydrogen()->can_convert_undefined_to_nan();
4998 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5000 Label convert, load_smi, done;
5002 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5004 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5006 // Heap number map check.
5007 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5008 Heap::kHeapNumberMapRootIndex);
5010 // On x64 it is safe to load at heap number offset before evaluating the map
5011 // check, since all heap objects are at least two words long.
5012 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5014 if (can_convert_undefined_to_nan) {
5015 __ j(not_equal, &convert, Label::kNear);
5017 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5020 if (deoptimize_on_minus_zero) {
5021 XMMRegister xmm_scratch = double_scratch0();
5022 __ xorps(xmm_scratch, xmm_scratch);
5023 __ ucomisd(xmm_scratch, result_reg);
5024 __ j(not_equal, &done, Label::kNear);
5025 __ movmskpd(kScratchRegister, result_reg);
5026 __ testq(kScratchRegister, Immediate(1));
5027 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5029 __ jmp(&done, Label::kNear);
5031 if (can_convert_undefined_to_nan) {
5034 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5035 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5036 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5038 __ pcmpeqd(result_reg, result_reg);
5039 __ jmp(&done, Label::kNear);
5042 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5045 // Smi to XMM conversion
5047 __ SmiToInteger32(kScratchRegister, input_reg);
5048 __ Cvtlsi2sd(result_reg, kScratchRegister);
5053 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5054 Register input_reg = ToRegister(instr->value());
5056 if (instr->truncating()) {
5057 Label no_heap_number, check_bools, check_false;
5059 // Heap number map check.
5060 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5061 Heap::kHeapNumberMapRootIndex);
5062 __ j(not_equal, &no_heap_number, Label::kNear);
5063 __ TruncateHeapNumberToI(input_reg, input_reg);
5066 __ bind(&no_heap_number);
5067 // Check for Oddballs. Undefined/False is converted to zero and True to one
5068 // for truncating conversions.
5069 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5070 __ j(not_equal, &check_bools, Label::kNear);
5071 __ Set(input_reg, 0);
5074 __ bind(&check_bools);
5075 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5076 __ j(not_equal, &check_false, Label::kNear);
5077 __ Set(input_reg, 1);
5080 __ bind(&check_false);
5081 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5082 DeoptimizeIf(not_equal, instr,
5083 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5084 __ Set(input_reg, 0);
5086 XMMRegister scratch = ToDoubleRegister(instr->temp());
5087 DCHECK(!scratch.is(xmm0));
5088 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5089 Heap::kHeapNumberMapRootIndex);
5090 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5091 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5092 __ cvttsd2si(input_reg, xmm0);
5093 __ Cvtlsi2sd(scratch, input_reg);
5094 __ ucomisd(xmm0, scratch);
5095 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5096 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5097 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5098 __ testl(input_reg, input_reg);
5099 __ j(not_zero, done);
5100 __ movmskpd(input_reg, xmm0);
5101 __ andl(input_reg, Immediate(1));
5102 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5108 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5109 class DeferredTaggedToI final : public LDeferredCode {
5111 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5112 : LDeferredCode(codegen), instr_(instr) { }
5113 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
5114 LInstruction* instr() override { return instr_; }
5120 LOperand* input = instr->value();
5121 DCHECK(input->IsRegister());
5122 DCHECK(input->Equals(instr->result()));
5123 Register input_reg = ToRegister(input);
5125 if (instr->hydrogen()->value()->representation().IsSmi()) {
5126 __ SmiToInteger32(input_reg, input_reg);
5128 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5129 __ JumpIfNotSmi(input_reg, deferred->entry());
5130 __ SmiToInteger32(input_reg, input_reg);
5131 __ bind(deferred->exit());
5136 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5137 LOperand* input = instr->value();
5138 DCHECK(input->IsRegister());
5139 LOperand* result = instr->result();
5140 DCHECK(result->IsDoubleRegister());
5142 Register input_reg = ToRegister(input);
5143 XMMRegister result_reg = ToDoubleRegister(result);
5145 HValue* value = instr->hydrogen()->value();
5146 NumberUntagDMode mode = value->representation().IsSmi()
5147 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5149 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5153 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5154 LOperand* input = instr->value();
5155 DCHECK(input->IsDoubleRegister());
5156 LOperand* result = instr->result();
5157 DCHECK(result->IsRegister());
5159 XMMRegister input_reg = ToDoubleRegister(input);
5160 Register result_reg = ToRegister(result);
5162 if (instr->truncating()) {
5163 __ TruncateDoubleToI(result_reg, input_reg);
5165 Label lost_precision, is_nan, minus_zero, done;
5166 XMMRegister xmm_scratch = double_scratch0();
5167 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5168 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5169 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
5170 &is_nan, &minus_zero, dist);
5171 __ jmp(&done, dist);
5172 __ bind(&lost_precision);
5173 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5175 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5176 __ bind(&minus_zero);
5177 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5183 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5184 LOperand* input = instr->value();
5185 DCHECK(input->IsDoubleRegister());
5186 LOperand* result = instr->result();
5187 DCHECK(result->IsRegister());
5189 XMMRegister input_reg = ToDoubleRegister(input);
5190 Register result_reg = ToRegister(result);
5192 Label lost_precision, is_nan, minus_zero, done;
5193 XMMRegister xmm_scratch = double_scratch0();
5194 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5195 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5196 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
5198 __ jmp(&done, dist);
5199 __ bind(&lost_precision);
5200 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5202 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5203 __ bind(&minus_zero);
5204 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5206 __ Integer32ToSmi(result_reg, result_reg);
5207 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5211 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5212 LOperand* input = instr->value();
5213 Condition cc = masm()->CheckSmi(ToRegister(input));
5214 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
5218 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5219 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5220 LOperand* input = instr->value();
5221 Condition cc = masm()->CheckSmi(ToRegister(input));
5222 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5227 void LCodeGen::DoCheckArrayBufferNotNeutered(
5228 LCheckArrayBufferNotNeutered* instr) {
5229 Register view = ToRegister(instr->view());
5231 __ movp(kScratchRegister,
5232 FieldOperand(view, JSArrayBufferView::kBufferOffset));
5233 __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
5234 Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
5235 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5239 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5240 Register input = ToRegister(instr->value());
5242 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5244 if (instr->hydrogen()->is_interval_check()) {
5247 instr->hydrogen()->GetCheckInterval(&first, &last);
5249 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5250 Immediate(static_cast<int8_t>(first)));
5252 // If there is only one type in the interval check for equality.
5253 if (first == last) {
5254 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5256 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5257 // Omit check for the last type.
5258 if (last != LAST_TYPE) {
5259 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5260 Immediate(static_cast<int8_t>(last)));
5261 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5267 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5269 if (base::bits::IsPowerOfTwo32(mask)) {
5270 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5271 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5273 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5274 Deoptimizer::kWrongInstanceType);
5276 __ movzxbl(kScratchRegister,
5277 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5278 __ andb(kScratchRegister, Immediate(mask));
5279 __ cmpb(kScratchRegister, Immediate(tag));
5280 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5286 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5287 Register reg = ToRegister(instr->value());
5288 __ Cmp(reg, instr->hydrogen()->object().handle());
5289 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5293 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5295 PushSafepointRegistersScope scope(this);
5298 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5299 RecordSafepointWithRegisters(
5300 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5302 __ testp(rax, Immediate(kSmiTagMask));
5304 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5308 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5309 class DeferredCheckMaps final : public LDeferredCode {
5311 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5312 : LDeferredCode(codegen), instr_(instr), object_(object) {
5313 SetExit(check_maps());
5315 void Generate() override {
5316 codegen()->DoDeferredInstanceMigration(instr_, object_);
5318 Label* check_maps() { return &check_maps_; }
5319 LInstruction* instr() override { return instr_; }
5327 if (instr->hydrogen()->IsStabilityCheck()) {
5328 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5329 for (int i = 0; i < maps->size(); ++i) {
5330 AddStabilityDependency(maps->at(i).handle());
5335 LOperand* input = instr->value();
5336 DCHECK(input->IsRegister());
5337 Register reg = ToRegister(input);
5339 DeferredCheckMaps* deferred = NULL;
5340 if (instr->hydrogen()->HasMigrationTarget()) {
5341 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5342 __ bind(deferred->check_maps());
5345 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5347 for (int i = 0; i < maps->size() - 1; i++) {
5348 Handle<Map> map = maps->at(i).handle();
5349 __ CompareMap(reg, map);
5350 __ j(equal, &success, Label::kNear);
5353 Handle<Map> map = maps->at(maps->size() - 1).handle();
5354 __ CompareMap(reg, map);
5355 if (instr->hydrogen()->HasMigrationTarget()) {
5356 __ j(not_equal, deferred->entry());
5358 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5365 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5366 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5367 XMMRegister xmm_scratch = double_scratch0();
5368 Register result_reg = ToRegister(instr->result());
5369 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5373 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5374 DCHECK(instr->unclamped()->Equals(instr->result()));
5375 Register value_reg = ToRegister(instr->result());
5376 __ ClampUint8(value_reg);
5380 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5381 DCHECK(instr->unclamped()->Equals(instr->result()));
5382 Register input_reg = ToRegister(instr->unclamped());
5383 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5384 XMMRegister xmm_scratch = double_scratch0();
5385 Label is_smi, done, heap_number;
5386 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5387 __ JumpIfSmi(input_reg, &is_smi, dist);
5389 // Check for heap number
5390 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5391 factory()->heap_number_map());
5392 __ j(equal, &heap_number, Label::kNear);
5394 // Check for undefined. Undefined is converted to zero for clamping
5396 __ Cmp(input_reg, factory()->undefined_value());
5397 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5398 __ xorl(input_reg, input_reg);
5399 __ jmp(&done, Label::kNear);
5402 __ bind(&heap_number);
5403 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5404 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5405 __ jmp(&done, Label::kNear);
5409 __ SmiToInteger32(input_reg, input_reg);
5410 __ ClampUint8(input_reg);
5416 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5417 XMMRegister value_reg = ToDoubleRegister(instr->value());
5418 Register result_reg = ToRegister(instr->result());
5419 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5420 __ movq(result_reg, value_reg);
5421 __ shrq(result_reg, Immediate(32));
5423 __ movd(result_reg, value_reg);
5428 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5429 Register hi_reg = ToRegister(instr->hi());
5430 Register lo_reg = ToRegister(instr->lo());
5431 XMMRegister result_reg = ToDoubleRegister(instr->result());
5432 XMMRegister xmm_scratch = double_scratch0();
5433 __ movd(result_reg, hi_reg);
5434 __ psllq(result_reg, 32);
5435 __ movd(xmm_scratch, lo_reg);
5436 __ orps(result_reg, xmm_scratch);
5440 void LCodeGen::DoAllocate(LAllocate* instr) {
5441 class DeferredAllocate final : public LDeferredCode {
5443 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5444 : LDeferredCode(codegen), instr_(instr) { }
5445 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5446 LInstruction* instr() override { return instr_; }
5452 DeferredAllocate* deferred =
5453 new(zone()) DeferredAllocate(this, instr);
5455 Register result = ToRegister(instr->result());
5456 Register temp = ToRegister(instr->temp());
5458 // Allocate memory for the object.
5459 AllocationFlags flags = TAG_OBJECT;
5460 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5461 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5463 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5464 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5465 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5468 if (instr->size()->IsConstantOperand()) {
5469 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5470 if (size <= Page::kMaxRegularHeapObjectSize) {
5471 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5473 __ jmp(deferred->entry());
5476 Register size = ToRegister(instr->size());
5477 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5480 __ bind(deferred->exit());
5482 if (instr->hydrogen()->MustPrefillWithFiller()) {
5483 if (instr->size()->IsConstantOperand()) {
5484 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5485 __ movl(temp, Immediate((size / kPointerSize) - 1));
5487 temp = ToRegister(instr->size());
5488 __ sarp(temp, Immediate(kPointerSizeLog2));
5493 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5494 isolate()->factory()->one_pointer_filler_map());
5496 __ j(not_zero, &loop);
5501 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5502 Register result = ToRegister(instr->result());
5504 // TODO(3095996): Get rid of this. For now, we need to make the
5505 // result register contain a valid pointer because it is already
5506 // contained in the register pointer map.
5507 __ Move(result, Smi::FromInt(0));
5509 PushSafepointRegistersScope scope(this);
5510 if (instr->size()->IsRegister()) {
5511 Register size = ToRegister(instr->size());
5512 DCHECK(!size.is(result));
5513 __ Integer32ToSmi(size, size);
5516 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5517 __ Push(Smi::FromInt(size));
5521 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5522 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5523 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5525 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5527 __ Push(Smi::FromInt(flags));
5529 CallRuntimeFromDeferred(
5530 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5531 __ StoreToSafepointRegisterSlot(result, rax);
5535 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5536 DCHECK(ToRegister(instr->value()).is(rax));
5538 CallRuntime(Runtime::kToFastProperties, 1, instr);
5542 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5543 DCHECK(ToRegister(instr->context()).is(rsi));
5545 // Registers will be used as follows:
5546 // rcx = literals array.
5547 // rbx = regexp literal.
5548 // rax = regexp literal clone.
5549 int literal_offset =
5550 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5551 __ Move(rcx, instr->hydrogen()->literals());
5552 __ movp(rbx, FieldOperand(rcx, literal_offset));
5553 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5554 __ j(not_equal, &materialized, Label::kNear);
5556 // Create regexp literal using runtime function
5557 // Result will be in rax.
5559 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5560 __ Push(instr->hydrogen()->pattern());
5561 __ Push(instr->hydrogen()->flags());
5562 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5565 __ bind(&materialized);
5566 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5567 Label allocated, runtime_allocate;
5568 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5569 __ jmp(&allocated, Label::kNear);
5571 __ bind(&runtime_allocate);
5573 __ Push(Smi::FromInt(size));
5574 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5577 __ bind(&allocated);
5578 // Copy the content into the newly allocated memory.
5579 // (Unroll copy loop once for better throughput).
5580 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5581 __ movp(rdx, FieldOperand(rbx, i));
5582 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5583 __ movp(FieldOperand(rax, i), rdx);
5584 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5586 if ((size % (2 * kPointerSize)) != 0) {
5587 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5588 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5593 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5594 DCHECK(ToRegister(instr->context()).is(rsi));
5595 // Use the fast case closure allocation code that allocates in new
5596 // space for nested functions that don't need literals cloning.
5597 bool pretenure = instr->hydrogen()->pretenure();
5598 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5599 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5600 instr->hydrogen()->kind());
5601 __ Move(rbx, instr->hydrogen()->shared_info());
5602 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5605 __ Push(instr->hydrogen()->shared_info());
5606 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5607 Heap::kFalseValueRootIndex);
5608 CallRuntime(Runtime::kNewClosure, 3, instr);
5613 void LCodeGen::DoTypeof(LTypeof* instr) {
5614 DCHECK(ToRegister(instr->context()).is(rsi));
5615 DCHECK(ToRegister(instr->value()).is(rbx));
5617 Register value_register = ToRegister(instr->value());
5618 __ JumpIfNotSmi(value_register, &do_call);
5619 __ Move(rax, isolate()->factory()->number_string());
5622 TypeofStub stub(isolate());
5623 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5628 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5629 DCHECK(!operand->IsDoubleRegister());
5630 if (operand->IsConstantOperand()) {
5631 __ Push(ToHandle(LConstantOperand::cast(operand)));
5632 } else if (operand->IsRegister()) {
5633 __ Push(ToRegister(operand));
5635 __ Push(ToOperand(operand));
5640 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5641 Register input = ToRegister(instr->value());
5642 Condition final_branch_condition = EmitTypeofIs(instr, input);
5643 if (final_branch_condition != no_condition) {
5644 EmitBranch(instr, final_branch_condition);
5649 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5650 Label* true_label = instr->TrueLabel(chunk_);
5651 Label* false_label = instr->FalseLabel(chunk_);
5652 Handle<String> type_name = instr->type_literal();
5653 int left_block = instr->TrueDestination(chunk_);
5654 int right_block = instr->FalseDestination(chunk_);
5655 int next_block = GetNextEmittedBlock();
5657 Label::Distance true_distance = left_block == next_block ? Label::kNear
5659 Label::Distance false_distance = right_block == next_block ? Label::kNear
5661 Condition final_branch_condition = no_condition;
5662 Factory* factory = isolate()->factory();
5663 if (String::Equals(type_name, factory->number_string())) {
5664 __ JumpIfSmi(input, true_label, true_distance);
5665 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5666 Heap::kHeapNumberMapRootIndex);
5668 final_branch_condition = equal;
5670 } else if (String::Equals(type_name, factory->string_string())) {
5671 __ JumpIfSmi(input, false_label, false_distance);
5672 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5673 __ j(above_equal, false_label, false_distance);
5674 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5675 Immediate(1 << Map::kIsUndetectable));
5676 final_branch_condition = zero;
5678 } else if (String::Equals(type_name, factory->symbol_string())) {
5679 __ JumpIfSmi(input, false_label, false_distance);
5680 __ CmpObjectType(input, SYMBOL_TYPE, input);
5681 final_branch_condition = equal;
5683 } else if (String::Equals(type_name, factory->boolean_string())) {
5684 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5685 __ j(equal, true_label, true_distance);
5686 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5687 final_branch_condition = equal;
5689 } else if (String::Equals(type_name, factory->undefined_string())) {
5690 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5691 __ j(equal, true_label, true_distance);
5692 __ JumpIfSmi(input, false_label, false_distance);
5693 // Check for undetectable objects => true.
5694 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5695 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5696 Immediate(1 << Map::kIsUndetectable));
5697 final_branch_condition = not_zero;
5699 } else if (String::Equals(type_name, factory->function_string())) {
5700 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5701 __ JumpIfSmi(input, false_label, false_distance);
5702 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5703 __ j(equal, true_label, true_distance);
5704 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5705 final_branch_condition = equal;
5707 } else if (String::Equals(type_name, factory->object_string())) {
5708 __ JumpIfSmi(input, false_label, false_distance);
5709 __ CompareRoot(input, Heap::kNullValueRootIndex);
5710 __ j(equal, true_label, true_distance);
5711 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5712 __ j(below, false_label, false_distance);
5713 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5714 __ j(above, false_label, false_distance);
5715 // Check for undetectable objects => false.
5716 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5717 Immediate(1 << Map::kIsUndetectable));
5718 final_branch_condition = zero;
5720 } else if (String::Equals(type_name, factory->float32x4_string())) {
5721 __ JumpIfSmi(input, false_label, false_distance);
5722 __ CmpObjectType(input, FLOAT32X4_TYPE, input);
5723 final_branch_condition = equal;
5726 __ jmp(false_label, false_distance);
5729 return final_branch_condition;
5733 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5734 Register temp = ToRegister(instr->temp());
5736 EmitIsConstructCall(temp);
5737 EmitBranch(instr, equal);
5741 void LCodeGen::EmitIsConstructCall(Register temp) {
5742 // Get the frame pointer for the calling frame.
5743 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5745 // Skip the arguments adaptor frame if it exists.
5746 Label check_frame_marker;
5747 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5748 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5749 __ j(not_equal, &check_frame_marker, Label::kNear);
5750 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5752 // Check the marker in the calling frame.
5753 __ bind(&check_frame_marker);
5754 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5755 Smi::FromInt(StackFrame::CONSTRUCT));
5759 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5760 if (!info()->IsStub()) {
5761 // Ensure that we have enough space after the previous lazy-bailout
5762 // instruction for patching the code here.
5763 int current_pc = masm()->pc_offset();
5764 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5765 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5766 __ Nop(padding_size);
5769 last_lazy_deopt_pc_ = masm()->pc_offset();
5773 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5774 last_lazy_deopt_pc_ = masm()->pc_offset();
5775 DCHECK(instr->HasEnvironment());
5776 LEnvironment* env = instr->environment();
5777 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5778 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5782 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5783 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5784 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5785 // needed return address), even though the implementation of LAZY and EAGER is
5786 // now identical. When LAZY is eventually completely folded into EAGER, remove
5787 // the special case below.
5788 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5789 type = Deoptimizer::LAZY;
5791 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5795 void LCodeGen::DoDummy(LDummy* instr) {
5796 // Nothing to see here, move on!
5800 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5801 // Nothing to see here, move on!
5805 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5806 PushSafepointRegistersScope scope(this);
5807 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5808 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5809 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5810 DCHECK(instr->HasEnvironment());
5811 LEnvironment* env = instr->environment();
5812 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5816 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5817 class DeferredStackCheck final : public LDeferredCode {
5819 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5820 : LDeferredCode(codegen), instr_(instr) { }
5821 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5822 LInstruction* instr() override { return instr_; }
5825 LStackCheck* instr_;
5828 DCHECK(instr->HasEnvironment());
5829 LEnvironment* env = instr->environment();
5830 // There is no LLazyBailout instruction for stack-checks. We have to
5831 // prepare for lazy deoptimization explicitly here.
5832 if (instr->hydrogen()->is_function_entry()) {
5833 // Perform stack overflow check.
5835 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5836 __ j(above_equal, &done, Label::kNear);
5838 DCHECK(instr->context()->IsRegister());
5839 DCHECK(ToRegister(instr->context()).is(rsi));
5840 CallCode(isolate()->builtins()->StackCheck(),
5841 RelocInfo::CODE_TARGET,
5845 DCHECK(instr->hydrogen()->is_backwards_branch());
5846 // Perform stack overflow check if this goto needs it before jumping.
5847 DeferredStackCheck* deferred_stack_check =
5848 new(zone()) DeferredStackCheck(this, instr);
5849 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5850 __ j(below, deferred_stack_check->entry());
5851 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5852 __ bind(instr->done_label());
5853 deferred_stack_check->SetExit(instr->done_label());
5854 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5855 // Don't record a deoptimization index for the safepoint here.
5856 // This will be done explicitly when emitting call and the safepoint in
5857 // the deferred code.
5862 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5863 // This is a pseudo-instruction that ensures that the environment here is
5864 // properly registered for deoptimization and records the assembler's PC
5866 LEnvironment* environment = instr->environment();
5868 // If the environment were already registered, we would have no way of
5869 // backpatching it with the spill slot operands.
5870 DCHECK(!environment->HasBeenRegistered());
5871 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5873 GenerateOsrPrologue();
5877 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5878 DCHECK(ToRegister(instr->context()).is(rsi));
5880 Condition cc = masm()->CheckSmi(rax);
5881 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5883 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5884 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5885 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5887 Label use_cache, call_runtime;
5888 Register null_value = rdi;
5889 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5890 __ CheckEnumCache(null_value, &call_runtime);
5892 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5893 __ jmp(&use_cache, Label::kNear);
5895 // Get the set of properties to enumerate.
5896 __ bind(&call_runtime);
5898 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5900 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5901 Heap::kMetaMapRootIndex);
5902 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5903 __ bind(&use_cache);
5907 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5908 Register map = ToRegister(instr->map());
5909 Register result = ToRegister(instr->result());
5910 Label load_cache, done;
5911 __ EnumLength(result, map);
5912 __ Cmp(result, Smi::FromInt(0));
5913 __ j(not_equal, &load_cache, Label::kNear);
5914 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5915 __ jmp(&done, Label::kNear);
5916 __ bind(&load_cache);
5917 __ LoadInstanceDescriptors(map, result);
5919 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5921 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5923 Condition cc = masm()->CheckSmi(result);
5924 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
5928 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5929 Register object = ToRegister(instr->value());
5930 __ cmpp(ToRegister(instr->map()),
5931 FieldOperand(object, HeapObject::kMapOffset));
5932 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5936 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5939 PushSafepointRegistersScope scope(this);
5943 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5944 RecordSafepointWithRegisters(
5945 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5946 __ StoreToSafepointRegisterSlot(object, rax);
5950 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5951 class DeferredLoadMutableDouble final : public LDeferredCode {
5953 DeferredLoadMutableDouble(LCodeGen* codegen,
5954 LLoadFieldByIndex* instr,
5957 : LDeferredCode(codegen),
5962 void Generate() override {
5963 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5965 LInstruction* instr() override { return instr_; }
5968 LLoadFieldByIndex* instr_;
5973 Register object = ToRegister(instr->object());
5974 Register index = ToRegister(instr->index());
5976 DeferredLoadMutableDouble* deferred;
5977 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5979 Label out_of_object, done;
5980 __ Move(kScratchRegister, Smi::FromInt(1));
5981 __ testp(index, kScratchRegister);
5982 __ j(not_zero, deferred->entry());
5984 __ sarp(index, Immediate(1));
5986 __ SmiToInteger32(index, index);
5987 __ cmpl(index, Immediate(0));
5988 __ j(less, &out_of_object, Label::kNear);
5989 __ movp(object, FieldOperand(object,
5992 JSObject::kHeaderSize));
5993 __ jmp(&done, Label::kNear);
5995 __ bind(&out_of_object);
5996 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5998 // Index is now equal to out of object property index plus 1.
5999 __ movp(object, FieldOperand(object,
6002 FixedArray::kHeaderSize - kPointerSize));
6003 __ bind(deferred->exit());
6008 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6009 Register context = ToRegister(instr->context());
6010 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
6014 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6015 Handle<ScopeInfo> scope_info = instr->scope_info();
6016 __ Push(scope_info);
6017 __ Push(ToRegister(instr->function()));
6018 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6019 RecordSafepoint(Safepoint::kNoLazyDeopt);
6025 } // namespace internal
6028 #endif // V8_TARGET_ARCH_X64