1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/cpu-profiler.h"
11 #include "src/hydrogen-osr.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/x64/lithium-codegen-x64.h"
20 // When invoking builtins, we need to record the safepoint in the middle of
21 // the invoke instruction sequence generated by the macro assembler.
22 class SafepointGenerator final : public CallWrapper {
24 SafepointGenerator(LCodeGen* codegen,
25 LPointerMap* pointers,
26 Safepoint::DeoptMode mode)
30 virtual ~SafepointGenerator() {}
32 void BeforeCall(int call_size) const override {}
34 void AfterCall() const override {
35 codegen_->RecordSafepoint(pointers_, deopt_mode_);
40 LPointerMap* pointers_;
41 Safepoint::DeoptMode deopt_mode_;
47 bool LCodeGen::GenerateCode() {
48 LPhase phase("Z_Code generation", chunk());
52 // Open a frame scope to indicate that there is a frame on the stack. The
53 // MANUAL indicates that the scope shouldn't actually generate code to set up
54 // the frame (that is done in GeneratePrologue).
55 FrameScope frame_scope(masm_, StackFrame::MANUAL);
57 return GeneratePrologue() &&
59 GenerateDeferredCode() &&
60 GenerateJumpTable() &&
61 GenerateSafepointTable();
65 void LCodeGen::FinishCode(Handle<Code> code) {
67 code->set_stack_slots(GetStackSlotCount());
68 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
69 PopulateDeoptimizationData(code);
74 void LCodeGen::MakeSureStackPagesMapped(int offset) {
75 const int kPageSize = 4 * KB;
76 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
77 __ movp(Operand(rsp, offset), rax);
83 void LCodeGen::SaveCallerDoubles() {
84 DCHECK(info()->saves_caller_doubles());
85 DCHECK(NeedsEagerFrame());
86 Comment(";;; Save clobbered callee double registers");
88 BitVector* doubles = chunk()->allocated_double_registers();
89 BitVector::Iterator save_iterator(doubles);
90 while (!save_iterator.Done()) {
91 __ movsd(MemOperand(rsp, count * kDoubleSize),
92 XMMRegister::FromAllocationIndex(save_iterator.Current()));
93 save_iterator.Advance();
99 void LCodeGen::RestoreCallerDoubles() {
100 DCHECK(info()->saves_caller_doubles());
101 DCHECK(NeedsEagerFrame());
102 Comment(";;; Restore clobbered callee double registers");
103 BitVector* doubles = chunk()->allocated_double_registers();
104 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
107 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
108 MemOperand(rsp, count * kDoubleSize));
109 save_iterator.Advance();
115 bool LCodeGen::GeneratePrologue() {
116 DCHECK(is_generating());
118 if (info()->IsOptimizing()) {
119 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122 if (strlen(FLAG_stop_at) > 0 &&
123 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
128 // Sloppy mode functions need to replace the receiver with the global proxy
129 // when called as functions (without an explicit receiver object).
130 if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
132 StackArgumentsAccessor args(rsp, scope()->num_parameters());
133 __ movp(rcx, args.GetReceiverOperand());
135 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
136 __ j(not_equal, &ok, Label::kNear);
138 __ movp(rcx, GlobalObjectOperand());
139 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
141 __ movp(args.GetReceiverOperand(), rcx);
147 info()->set_prologue_offset(masm_->pc_offset());
148 if (NeedsEagerFrame()) {
149 DCHECK(!frame_is_built_);
150 frame_is_built_ = true;
151 if (info()->IsStub()) {
154 __ Prologue(info()->IsCodePreAgingActive());
156 info()->AddNoFrameRange(0, masm_->pc_offset());
159 // Reserve space for the stack slots needed by the code.
160 int slots = GetStackSlotCount();
162 if (FLAG_debug_code) {
163 __ subp(rsp, Immediate(slots * kPointerSize));
165 MakeSureStackPagesMapped(slots * kPointerSize);
169 __ Set(kScratchRegister, kSlotsZapValue);
172 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
175 __ j(not_zero, &loop);
178 __ subp(rsp, Immediate(slots * kPointerSize));
180 MakeSureStackPagesMapped(slots * kPointerSize);
184 if (info()->saves_caller_doubles()) {
188 return !is_aborted();
192 void LCodeGen::DoPrologue(LPrologue* instr) {
193 Comment(";;; Prologue begin");
195 // Possibly allocate a local context.
196 if (info_->num_heap_slots() > 0) {
197 Comment(";;; Allocate local context");
198 bool need_write_barrier = true;
199 // Argument to NewContext is the function, which is still in rdi.
200 int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
201 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
202 if (info()->scope()->is_script_scope()) {
204 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
205 __ CallRuntime(Runtime::kNewScriptContext, 2);
206 deopt_mode = Safepoint::kLazyDeopt;
207 } else if (slots <= FastNewContextStub::kMaximumSlots) {
208 FastNewContextStub stub(isolate(), slots);
210 // Result of FastNewContextStub is always in new space.
211 need_write_barrier = false;
214 __ CallRuntime(Runtime::kNewFunctionContext, 1);
216 RecordSafepoint(deopt_mode);
218 // Context is returned in rax. It replaces the context passed to us.
219 // It's saved in the stack and kept live in rsi.
221 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
223 // Copy any necessary parameters into the context.
224 int num_parameters = scope()->num_parameters();
225 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
226 for (int i = first_parameter; i < num_parameters; i++) {
227 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
228 if (var->IsContextSlot()) {
229 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
230 (num_parameters - 1 - i) * kPointerSize;
231 // Load parameter from stack.
232 __ movp(rax, Operand(rbp, parameter_offset));
233 // Store it in the context.
234 int context_offset = Context::SlotOffset(var->index());
235 __ movp(Operand(rsi, context_offset), rax);
236 // Update the write barrier. This clobbers rax and rbx.
237 if (need_write_barrier) {
238 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
239 } else if (FLAG_debug_code) {
241 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
242 __ Abort(kExpectedNewSpaceObject);
247 Comment(";;; End allocate local context");
250 Comment(";;; Prologue end");
254 void LCodeGen::GenerateOsrPrologue() {
255 // Generate the OSR entry prologue at the first unknown OSR value, or if there
256 // are none, at the OSR entrypoint instruction.
257 if (osr_pc_offset_ >= 0) return;
259 osr_pc_offset_ = masm()->pc_offset();
261 // Adjust the frame size, subsuming the unoptimized frame into the
263 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
265 __ subp(rsp, Immediate(slots * kPointerSize));
269 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
270 if (instr->IsCall()) {
271 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
273 if (!instr->IsLazyBailout() && !instr->IsGap()) {
274 safepoints_.BumpLastLazySafepointIndex();
279 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
280 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
281 instr->hydrogen_value()->representation().IsInteger32() &&
282 instr->result()->IsRegister()) {
283 __ AssertZeroExtended(ToRegister(instr->result()));
286 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
287 // We sign extend the dehoisted key at the definition point when the pointer
288 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
289 // points and MustSignExtendResult is always false. We can't use
290 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
291 DCHECK(kPointerSize == kInt64Size);
292 if (instr->result()->IsRegister()) {
293 Register result_reg = ToRegister(instr->result());
294 __ movsxlq(result_reg, result_reg);
296 // Sign extend the 32bit result in the stack slots.
297 DCHECK(instr->result()->IsStackSlot());
298 Operand src = ToOperand(instr->result());
299 __ movsxlq(kScratchRegister, src);
300 __ movq(src, kScratchRegister);
306 bool LCodeGen::GenerateJumpTable() {
307 if (jump_table_.length() == 0) return !is_aborted();
310 Comment(";;; -------------------- Jump table --------------------");
311 for (int i = 0; i < jump_table_.length(); i++) {
312 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
313 __ bind(&table_entry->label);
314 Address entry = table_entry->address;
315 DeoptComment(table_entry->deopt_info);
316 if (table_entry->needs_frame) {
317 DCHECK(!info()->saves_caller_doubles());
318 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
319 __ call(&needs_frame);
321 if (info()->saves_caller_doubles()) {
322 DCHECK(info()->IsStub());
323 RestoreCallerDoubles();
325 __ call(entry, RelocInfo::RUNTIME_ENTRY);
327 info()->LogDeoptCallPosition(masm()->pc_offset(),
328 table_entry->deopt_info.inlining_id);
331 if (needs_frame.is_linked()) {
332 __ bind(&needs_frame);
334 4: return address <-- rsp
340 // Reserve space for context and stub marker.
341 __ subp(rsp, Immediate(2 * kPointerSize));
342 __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
343 __ Push(kScratchRegister); // Save entry address for ret(0)
350 0: entry address <-- rsp
353 // Remember context pointer.
354 __ movp(kScratchRegister,
355 MemOperand(rbp, StandardFrameConstants::kContextOffset));
356 // Save context pointer into the stack frame.
357 __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
359 // Create a stack frame.
360 __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
361 __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
363 // This variant of deopt can only be used with stubs. Since we don't
364 // have a function pointer to install in the stack frame that we're
365 // building, install a special marker there instead.
366 DCHECK(info()->IsStub());
367 __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
374 0: entry address <-- rsp
379 return !is_aborted();
383 bool LCodeGen::GenerateDeferredCode() {
384 DCHECK(is_generating());
385 if (deferred_.length() > 0) {
386 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
387 LDeferredCode* code = deferred_[i];
390 instructions_->at(code->instruction_index())->hydrogen_value();
391 RecordAndWritePosition(
392 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
394 Comment(";;; <@%d,#%d> "
395 "-------------------- Deferred %s --------------------",
396 code->instruction_index(),
397 code->instr()->hydrogen_value()->id(),
398 code->instr()->Mnemonic());
399 __ bind(code->entry());
400 if (NeedsDeferredFrame()) {
401 Comment(";;; Build frame");
402 DCHECK(!frame_is_built_);
403 DCHECK(info()->IsStub());
404 frame_is_built_ = true;
405 // Build the frame in such a way that esi isn't trashed.
406 __ pushq(rbp); // Caller's frame pointer.
407 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
408 __ Push(Smi::FromInt(StackFrame::STUB));
409 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
410 Comment(";;; Deferred code");
413 if (NeedsDeferredFrame()) {
414 __ bind(code->done());
415 Comment(";;; Destroy frame");
416 DCHECK(frame_is_built_);
417 frame_is_built_ = false;
421 __ jmp(code->exit());
425 // Deferred code is the last part of the instruction sequence. Mark
426 // the generated code as done unless we bailed out.
427 if (!is_aborted()) status_ = DONE;
428 return !is_aborted();
432 bool LCodeGen::GenerateSafepointTable() {
434 safepoints_.Emit(masm(), GetStackSlotCount());
435 return !is_aborted();
439 Register LCodeGen::ToRegister(int index) const {
440 return Register::FromAllocationIndex(index);
444 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
445 return XMMRegister::FromAllocationIndex(index);
449 Register LCodeGen::ToRegister(LOperand* op) const {
450 DCHECK(op->IsRegister());
451 return ToRegister(op->index());
455 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
456 DCHECK(op->IsDoubleRegister());
457 return ToDoubleRegister(op->index());
461 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
462 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
466 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
467 return chunk_->LookupLiteralRepresentation(op).IsExternal();
471 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
472 return op->IsConstantOperand() &&
473 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
477 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
478 return chunk_->LookupLiteralRepresentation(op).IsSmi();
482 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
483 return ToRepresentation(op, Representation::Integer32());
487 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
488 const Representation& r) const {
489 HConstant* constant = chunk_->LookupConstant(op);
490 int32_t value = constant->Integer32Value();
491 if (r.IsInteger32()) return value;
492 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
493 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
497 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
498 HConstant* constant = chunk_->LookupConstant(op);
499 return Smi::FromInt(constant->Integer32Value());
503 double LCodeGen::ToDouble(LConstantOperand* op) const {
504 HConstant* constant = chunk_->LookupConstant(op);
505 DCHECK(constant->HasDoubleValue());
506 return constant->DoubleValue();
510 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
511 HConstant* constant = chunk_->LookupConstant(op);
512 DCHECK(constant->HasExternalReferenceValue());
513 return constant->ExternalReferenceValue();
517 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
518 HConstant* constant = chunk_->LookupConstant(op);
519 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
520 return constant->handle(isolate());
524 static int ArgumentsOffsetWithoutFrame(int index) {
526 return -(index + 1) * kPointerSize + kPCOnStackSize;
530 Operand LCodeGen::ToOperand(LOperand* op) const {
531 // Does not handle registers. In X64 assembler, plain registers are not
532 // representable as an Operand.
533 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
534 if (NeedsEagerFrame()) {
535 return Operand(rbp, StackSlotOffset(op->index()));
537 // Retrieve parameter without eager stack-frame relative to the
539 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
544 void LCodeGen::WriteTranslation(LEnvironment* environment,
545 Translation* translation) {
546 if (environment == NULL) return;
548 // The translation includes one command per value in the environment.
549 int translation_size = environment->translation_size();
551 WriteTranslation(environment->outer(), translation);
552 WriteTranslationFrame(environment, translation);
554 int object_index = 0;
555 int dematerialized_index = 0;
556 for (int i = 0; i < translation_size; ++i) {
557 LOperand* value = environment->values()->at(i);
559 environment, translation, value, environment->HasTaggedValueAt(i),
560 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
565 void LCodeGen::AddToTranslation(LEnvironment* environment,
566 Translation* translation,
570 int* object_index_pointer,
571 int* dematerialized_index_pointer) {
572 if (op == LEnvironment::materialization_marker()) {
573 int object_index = (*object_index_pointer)++;
574 if (environment->ObjectIsDuplicateAt(object_index)) {
575 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
576 translation->DuplicateObject(dupe_of);
579 int object_length = environment->ObjectLengthAt(object_index);
580 if (environment->ObjectIsArgumentsAt(object_index)) {
581 translation->BeginArgumentsObject(object_length);
583 translation->BeginCapturedObject(object_length);
585 int dematerialized_index = *dematerialized_index_pointer;
586 int env_offset = environment->translation_size() + dematerialized_index;
587 *dematerialized_index_pointer += object_length;
588 for (int i = 0; i < object_length; ++i) {
589 LOperand* value = environment->values()->at(env_offset + i);
590 AddToTranslation(environment,
593 environment->HasTaggedValueAt(env_offset + i),
594 environment->HasUint32ValueAt(env_offset + i),
595 object_index_pointer,
596 dematerialized_index_pointer);
601 if (op->IsStackSlot()) {
602 int index = op->index();
604 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
607 translation->StoreStackSlot(index);
608 } else if (is_uint32) {
609 translation->StoreUint32StackSlot(index);
611 translation->StoreInt32StackSlot(index);
613 } else if (op->IsDoubleStackSlot()) {
614 int index = op->index();
616 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
618 translation->StoreDoubleStackSlot(index);
619 } else if (op->IsRegister()) {
620 Register reg = ToRegister(op);
622 translation->StoreRegister(reg);
623 } else if (is_uint32) {
624 translation->StoreUint32Register(reg);
626 translation->StoreInt32Register(reg);
628 } else if (op->IsDoubleRegister()) {
629 XMMRegister reg = ToDoubleRegister(op);
630 translation->StoreDoubleRegister(reg);
631 } else if (op->IsConstantOperand()) {
632 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
633 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
634 translation->StoreLiteral(src_index);
641 void LCodeGen::CallCodeGeneric(Handle<Code> code,
642 RelocInfo::Mode mode,
644 SafepointMode safepoint_mode,
646 DCHECK(instr != NULL);
648 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
650 // Signal that we don't inline smi code before these stubs in the
651 // optimizing code generator.
652 if (code->kind() == Code::BINARY_OP_IC ||
653 code->kind() == Code::COMPARE_IC) {
659 void LCodeGen::CallCode(Handle<Code> code,
660 RelocInfo::Mode mode,
661 LInstruction* instr) {
662 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
666 void LCodeGen::CallRuntime(const Runtime::Function* function,
669 SaveFPRegsMode save_doubles) {
670 DCHECK(instr != NULL);
671 DCHECK(instr->HasPointerMap());
673 __ CallRuntime(function, num_arguments, save_doubles);
675 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
679 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
680 if (context->IsRegister()) {
681 if (!ToRegister(context).is(rsi)) {
682 __ movp(rsi, ToRegister(context));
684 } else if (context->IsStackSlot()) {
685 __ movp(rsi, ToOperand(context));
686 } else if (context->IsConstantOperand()) {
687 HConstant* constant =
688 chunk_->LookupConstant(LConstantOperand::cast(context));
689 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
697 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
701 LoadContextFromDeferred(context);
703 __ CallRuntimeSaveDoubles(id);
704 RecordSafepointWithRegisters(
705 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
709 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
710 Safepoint::DeoptMode mode) {
711 environment->set_has_been_used();
712 if (!environment->HasBeenRegistered()) {
713 // Physical stack frame layout:
714 // -x ............. -4 0 ..................................... y
715 // [incoming arguments] [spill slots] [pushed outgoing arguments]
717 // Layout of the environment:
718 // 0 ..................................................... size-1
719 // [parameters] [locals] [expression stack including arguments]
721 // Layout of the translation:
722 // 0 ........................................................ size - 1 + 4
723 // [expression stack including arguments] [locals] [4 words] [parameters]
724 // |>------------ translation_size ------------<|
727 int jsframe_count = 0;
728 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
730 if (e->frame_type() == JS_FUNCTION) {
734 Translation translation(&translations_, frame_count, jsframe_count, zone());
735 WriteTranslation(environment, &translation);
736 int deoptimization_index = deoptimizations_.length();
737 int pc_offset = masm()->pc_offset();
738 environment->Register(deoptimization_index,
740 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
741 deoptimizations_.Add(environment, environment->zone());
746 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
747 Deoptimizer::DeoptReason deopt_reason,
748 Deoptimizer::BailoutType bailout_type) {
749 LEnvironment* environment = instr->environment();
750 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
751 DCHECK(environment->HasBeenRegistered());
752 int id = environment->deoptimization_index();
754 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
756 Abort(kBailoutWasNotPrepared);
760 if (DeoptEveryNTimes()) {
761 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
765 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
766 __ movl(rax, count_operand);
767 __ subl(rax, Immediate(1));
768 __ j(not_zero, &no_deopt, Label::kNear);
769 if (FLAG_trap_on_deopt) __ int3();
770 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
771 __ movl(count_operand, rax);
774 DCHECK(frame_is_built_);
775 __ call(entry, RelocInfo::RUNTIME_ENTRY);
777 __ movl(count_operand, rax);
782 if (info()->ShouldTrapOnDeopt()) {
784 if (cc != no_condition) {
785 __ j(NegateCondition(cc), &done, Label::kNear);
791 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
793 DCHECK(info()->IsStub() || frame_is_built_);
794 // Go through jump table if we need to handle condition, build frame, or
795 // restore caller doubles.
796 if (cc == no_condition && frame_is_built_ &&
797 !info()->saves_caller_doubles()) {
798 DeoptComment(deopt_info);
799 __ call(entry, RelocInfo::RUNTIME_ENTRY);
800 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
802 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
804 // We often have several deopts to the same entry, reuse the last
805 // jump entry if this is the case.
806 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
807 jump_table_.is_empty() ||
808 !table_entry.IsEquivalentTo(jump_table_.last())) {
809 jump_table_.Add(table_entry, zone());
811 if (cc == no_condition) {
812 __ jmp(&jump_table_.last().label);
814 __ j(cc, &jump_table_.last().label);
820 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
821 Deoptimizer::DeoptReason deopt_reason) {
822 Deoptimizer::BailoutType bailout_type = info()->IsStub()
824 : Deoptimizer::EAGER;
825 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
829 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
830 int length = deoptimizations_.length();
831 if (length == 0) return;
832 Handle<DeoptimizationInputData> data =
833 DeoptimizationInputData::New(isolate(), length, TENURED);
835 Handle<ByteArray> translations =
836 translations_.CreateByteArray(isolate()->factory());
837 data->SetTranslationByteArray(*translations);
838 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
839 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
840 if (info_->IsOptimizing()) {
841 // Reference to shared function info does not change between phases.
842 AllowDeferredHandleDereference allow_handle_dereference;
843 data->SetSharedFunctionInfo(*info_->shared_info());
845 data->SetSharedFunctionInfo(Smi::FromInt(0));
847 data->SetWeakCellCache(Smi::FromInt(0));
849 Handle<FixedArray> literals =
850 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
851 { AllowDeferredHandleDereference copy_handles;
852 for (int i = 0; i < deoptimization_literals_.length(); i++) {
853 literals->set(i, *deoptimization_literals_[i]);
855 data->SetLiteralArray(*literals);
858 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
859 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
861 // Populate the deoptimization entries.
862 for (int i = 0; i < length; i++) {
863 LEnvironment* env = deoptimizations_[i];
864 data->SetAstId(i, env->ast_id());
865 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
866 data->SetArgumentsStackHeight(i,
867 Smi::FromInt(env->arguments_stack_height()));
868 data->SetPc(i, Smi::FromInt(env->pc_offset()));
870 code->set_deoptimization_data(*data);
874 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
875 DCHECK_EQ(0, deoptimization_literals_.length());
876 for (auto function : chunk()->inlined_functions()) {
877 DefineDeoptimizationLiteral(function);
879 inlined_function_count_ = deoptimization_literals_.length();
883 void LCodeGen::RecordSafepointWithLazyDeopt(
884 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
885 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
886 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
888 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
889 RecordSafepointWithRegisters(
890 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
895 void LCodeGen::RecordSafepoint(
896 LPointerMap* pointers,
897 Safepoint::Kind kind,
899 Safepoint::DeoptMode deopt_mode) {
900 DCHECK(kind == expected_safepoint_kind_);
902 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
904 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
905 kind, arguments, deopt_mode);
906 for (int i = 0; i < operands->length(); i++) {
907 LOperand* pointer = operands->at(i);
908 if (pointer->IsStackSlot()) {
909 safepoint.DefinePointerSlot(pointer->index(), zone());
910 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
911 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
917 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
918 Safepoint::DeoptMode deopt_mode) {
919 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
923 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
924 LPointerMap empty_pointers(zone());
925 RecordSafepoint(&empty_pointers, deopt_mode);
929 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
931 Safepoint::DeoptMode deopt_mode) {
932 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
936 void LCodeGen::RecordAndWritePosition(int position) {
937 if (position == RelocInfo::kNoPosition) return;
938 masm()->positions_recorder()->RecordPosition(position);
939 masm()->positions_recorder()->WriteRecordedPositions();
943 static const char* LabelType(LLabel* label) {
944 if (label->is_loop_header()) return " (loop header)";
945 if (label->is_osr_entry()) return " (OSR entry)";
950 void LCodeGen::DoLabel(LLabel* label) {
951 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
952 current_instruction_,
953 label->hydrogen_value()->id(),
956 __ bind(label->label());
957 current_block_ = label->block_id();
962 void LCodeGen::DoParallelMove(LParallelMove* move) {
963 resolver_.Resolve(move);
967 void LCodeGen::DoGap(LGap* gap) {
968 for (int i = LGap::FIRST_INNER_POSITION;
969 i <= LGap::LAST_INNER_POSITION;
971 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
972 LParallelMove* move = gap->GetParallelMove(inner_pos);
973 if (move != NULL) DoParallelMove(move);
978 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
983 void LCodeGen::DoParameter(LParameter* instr) {
988 void LCodeGen::DoCallStub(LCallStub* instr) {
989 DCHECK(ToRegister(instr->context()).is(rsi));
990 DCHECK(ToRegister(instr->result()).is(rax));
991 switch (instr->hydrogen()->major_key()) {
992 case CodeStub::RegExpExec: {
993 RegExpExecStub stub(isolate());
994 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
997 case CodeStub::SubString: {
998 SubStringStub stub(isolate());
999 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1008 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1009 GenerateOsrPrologue();
1013 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1014 Register dividend = ToRegister(instr->dividend());
1015 int32_t divisor = instr->divisor();
1016 DCHECK(dividend.is(ToRegister(instr->result())));
1018 // Theoretically, a variation of the branch-free code for integer division by
1019 // a power of 2 (calculating the remainder via an additional multiplication
1020 // (which gets simplified to an 'and') and subtraction) should be faster, and
1021 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1022 // indicate that positive dividends are heavily favored, so the branching
1023 // version performs better.
1024 HMod* hmod = instr->hydrogen();
1025 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1026 Label dividend_is_not_negative, done;
1027 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1028 __ testl(dividend, dividend);
1029 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1030 // Note that this is correct even for kMinInt operands.
1032 __ andl(dividend, Immediate(mask));
1034 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1035 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1037 __ jmp(&done, Label::kNear);
1040 __ bind(÷nd_is_not_negative);
1041 __ andl(dividend, Immediate(mask));
1046 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1047 Register dividend = ToRegister(instr->dividend());
1048 int32_t divisor = instr->divisor();
1049 DCHECK(ToRegister(instr->result()).is(rax));
1052 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1056 __ TruncatingDiv(dividend, Abs(divisor));
1057 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1058 __ movl(rax, dividend);
1061 // Check for negative zero.
1062 HMod* hmod = instr->hydrogen();
1063 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1064 Label remainder_not_zero;
1065 __ j(not_zero, &remainder_not_zero, Label::kNear);
1066 __ cmpl(dividend, Immediate(0));
1067 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1068 __ bind(&remainder_not_zero);
1073 void LCodeGen::DoModI(LModI* instr) {
1074 HMod* hmod = instr->hydrogen();
1076 Register left_reg = ToRegister(instr->left());
1077 DCHECK(left_reg.is(rax));
1078 Register right_reg = ToRegister(instr->right());
1079 DCHECK(!right_reg.is(rax));
1080 DCHECK(!right_reg.is(rdx));
1081 Register result_reg = ToRegister(instr->result());
1082 DCHECK(result_reg.is(rdx));
1085 // Check for x % 0, idiv would signal a divide error. We have to
1086 // deopt in this case because we can't return a NaN.
1087 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1088 __ testl(right_reg, right_reg);
1089 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1092 // Check for kMinInt % -1, idiv would signal a divide error. We
1093 // have to deopt if we care about -0, because we can't return that.
1094 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1095 Label no_overflow_possible;
1096 __ cmpl(left_reg, Immediate(kMinInt));
1097 __ j(not_zero, &no_overflow_possible, Label::kNear);
1098 __ cmpl(right_reg, Immediate(-1));
1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1100 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1102 __ j(not_equal, &no_overflow_possible, Label::kNear);
1103 __ Set(result_reg, 0);
1104 __ jmp(&done, Label::kNear);
1106 __ bind(&no_overflow_possible);
1109 // Sign extend dividend in eax into edx:eax, since we are using only the low
1110 // 32 bits of the values.
1113 // If we care about -0, test if the dividend is <0 and the result is 0.
1114 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1115 Label positive_left;
1116 __ testl(left_reg, left_reg);
1117 __ j(not_sign, &positive_left, Label::kNear);
1118 __ idivl(right_reg);
1119 __ testl(result_reg, result_reg);
1120 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1121 __ jmp(&done, Label::kNear);
1122 __ bind(&positive_left);
1124 __ idivl(right_reg);
1129 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1130 Register dividend = ToRegister(instr->dividend());
1131 int32_t divisor = instr->divisor();
1132 DCHECK(dividend.is(ToRegister(instr->result())));
1134 // If the divisor is positive, things are easy: There can be no deopts and we
1135 // can simply do an arithmetic right shift.
1136 if (divisor == 1) return;
1137 int32_t shift = WhichPowerOf2Abs(divisor);
1139 __ sarl(dividend, Immediate(shift));
1143 // If the divisor is negative, we have to negate and handle edge cases.
1145 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1146 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1149 // Dividing by -1 is basically negation, unless we overflow.
1150 if (divisor == -1) {
1151 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1152 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1157 // If the negation could not overflow, simply shifting is OK.
1158 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1159 __ sarl(dividend, Immediate(shift));
1163 Label not_kmin_int, done;
1164 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1165 __ movl(dividend, Immediate(kMinInt / divisor));
1166 __ jmp(&done, Label::kNear);
1167 __ bind(¬_kmin_int);
1168 __ sarl(dividend, Immediate(shift));
1173 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1174 Register dividend = ToRegister(instr->dividend());
1175 int32_t divisor = instr->divisor();
1176 DCHECK(ToRegister(instr->result()).is(rdx));
1179 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1183 // Check for (0 / -x) that will produce negative zero.
1184 HMathFloorOfDiv* hdiv = instr->hydrogen();
1185 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1186 __ testl(dividend, dividend);
1187 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1190 // Easy case: We need no dynamic check for the dividend and the flooring
1191 // division is the same as the truncating division.
1192 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1193 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1194 __ TruncatingDiv(dividend, Abs(divisor));
1195 if (divisor < 0) __ negl(rdx);
1199 // In the general case we may need to adjust before and after the truncating
1200 // division to get a flooring division.
1201 Register temp = ToRegister(instr->temp3());
1202 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1203 Label needs_adjustment, done;
1204 __ cmpl(dividend, Immediate(0));
1205 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1206 __ TruncatingDiv(dividend, Abs(divisor));
1207 if (divisor < 0) __ negl(rdx);
1208 __ jmp(&done, Label::kNear);
1209 __ bind(&needs_adjustment);
1210 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1211 __ TruncatingDiv(temp, Abs(divisor));
1212 if (divisor < 0) __ negl(rdx);
1218 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1219 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1220 HBinaryOperation* hdiv = instr->hydrogen();
1221 Register dividend = ToRegister(instr->dividend());
1222 Register divisor = ToRegister(instr->divisor());
1223 Register remainder = ToRegister(instr->temp());
1224 Register result = ToRegister(instr->result());
1225 DCHECK(dividend.is(rax));
1226 DCHECK(remainder.is(rdx));
1227 DCHECK(result.is(rax));
1228 DCHECK(!divisor.is(rax));
1229 DCHECK(!divisor.is(rdx));
1232 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1233 __ testl(divisor, divisor);
1234 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1237 // Check for (0 / -x) that will produce negative zero.
1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1239 Label dividend_not_zero;
1240 __ testl(dividend, dividend);
1241 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1242 __ testl(divisor, divisor);
1243 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1244 __ bind(÷nd_not_zero);
1247 // Check for (kMinInt / -1).
1248 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1249 Label dividend_not_min_int;
1250 __ cmpl(dividend, Immediate(kMinInt));
1251 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1252 __ cmpl(divisor, Immediate(-1));
1253 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1254 __ bind(÷nd_not_min_int);
1257 // Sign extend to rdx (= remainder).
1262 __ testl(remainder, remainder);
1263 __ j(zero, &done, Label::kNear);
1264 __ xorl(remainder, divisor);
1265 __ sarl(remainder, Immediate(31));
1266 __ addl(result, remainder);
1271 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1272 Register dividend = ToRegister(instr->dividend());
1273 int32_t divisor = instr->divisor();
1274 Register result = ToRegister(instr->result());
1275 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1276 DCHECK(!result.is(dividend));
1278 // Check for (0 / -x) that will produce negative zero.
1279 HDiv* hdiv = instr->hydrogen();
1280 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1281 __ testl(dividend, dividend);
1282 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1284 // Check for (kMinInt / -1).
1285 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1286 __ cmpl(dividend, Immediate(kMinInt));
1287 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1289 // Deoptimize if remainder will not be 0.
1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1291 divisor != 1 && divisor != -1) {
1292 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1293 __ testl(dividend, Immediate(mask));
1294 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1296 __ Move(result, dividend);
1297 int32_t shift = WhichPowerOf2Abs(divisor);
1299 // The arithmetic shift is always OK, the 'if' is an optimization only.
1300 if (shift > 1) __ sarl(result, Immediate(31));
1301 __ shrl(result, Immediate(32 - shift));
1302 __ addl(result, dividend);
1303 __ sarl(result, Immediate(shift));
1305 if (divisor < 0) __ negl(result);
1309 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1310 Register dividend = ToRegister(instr->dividend());
1311 int32_t divisor = instr->divisor();
1312 DCHECK(ToRegister(instr->result()).is(rdx));
1315 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1319 // Check for (0 / -x) that will produce negative zero.
1320 HDiv* hdiv = instr->hydrogen();
1321 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1322 __ testl(dividend, dividend);
1323 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1326 __ TruncatingDiv(dividend, Abs(divisor));
1327 if (divisor < 0) __ negl(rdx);
1329 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1331 __ imull(rax, rax, Immediate(divisor));
1332 __ subl(rax, dividend);
1333 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1338 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1339 void LCodeGen::DoDivI(LDivI* instr) {
1340 HBinaryOperation* hdiv = instr->hydrogen();
1341 Register dividend = ToRegister(instr->dividend());
1342 Register divisor = ToRegister(instr->divisor());
1343 Register remainder = ToRegister(instr->temp());
1344 DCHECK(dividend.is(rax));
1345 DCHECK(remainder.is(rdx));
1346 DCHECK(ToRegister(instr->result()).is(rax));
1347 DCHECK(!divisor.is(rax));
1348 DCHECK(!divisor.is(rdx));
1351 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1352 __ testl(divisor, divisor);
1353 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1356 // Check for (0 / -x) that will produce negative zero.
1357 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1358 Label dividend_not_zero;
1359 __ testl(dividend, dividend);
1360 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1361 __ testl(divisor, divisor);
1362 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1363 __ bind(÷nd_not_zero);
1366 // Check for (kMinInt / -1).
1367 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1368 Label dividend_not_min_int;
1369 __ cmpl(dividend, Immediate(kMinInt));
1370 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1371 __ cmpl(divisor, Immediate(-1));
1372 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1373 __ bind(÷nd_not_min_int);
1376 // Sign extend to rdx (= remainder).
1380 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1381 // Deoptimize if remainder is not 0.
1382 __ testl(remainder, remainder);
1383 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1388 void LCodeGen::DoMulI(LMulI* instr) {
1389 Register left = ToRegister(instr->left());
1390 LOperand* right = instr->right();
1392 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1393 if (instr->hydrogen_value()->representation().IsSmi()) {
1394 __ movp(kScratchRegister, left);
1396 __ movl(kScratchRegister, left);
1401 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1402 if (right->IsConstantOperand()) {
1403 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1404 if (right_value == -1) {
1406 } else if (right_value == 0) {
1407 __ xorl(left, left);
1408 } else if (right_value == 2) {
1409 __ addl(left, left);
1410 } else if (!can_overflow) {
1411 // If the multiplication is known to not overflow, we
1412 // can use operations that don't set the overflow flag
1414 switch (right_value) {
1419 __ leal(left, Operand(left, left, times_2, 0));
1422 __ shll(left, Immediate(2));
1425 __ leal(left, Operand(left, left, times_4, 0));
1428 __ shll(left, Immediate(3));
1431 __ leal(left, Operand(left, left, times_8, 0));
1434 __ shll(left, Immediate(4));
1437 __ imull(left, left, Immediate(right_value));
1441 __ imull(left, left, Immediate(right_value));
1443 } else if (right->IsStackSlot()) {
1444 if (instr->hydrogen_value()->representation().IsSmi()) {
1445 __ SmiToInteger64(left, left);
1446 __ imulp(left, ToOperand(right));
1448 __ imull(left, ToOperand(right));
1451 if (instr->hydrogen_value()->representation().IsSmi()) {
1452 __ SmiToInteger64(left, left);
1453 __ imulp(left, ToRegister(right));
1455 __ imull(left, ToRegister(right));
1460 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1463 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1464 // Bail out if the result is supposed to be negative zero.
1466 if (instr->hydrogen_value()->representation().IsSmi()) {
1467 __ testp(left, left);
1469 __ testl(left, left);
1471 __ j(not_zero, &done, Label::kNear);
1472 if (right->IsConstantOperand()) {
1473 // Constant can't be represented as 32-bit Smi due to immediate size
1475 DCHECK(SmiValuesAre32Bits()
1476 ? !instr->hydrogen_value()->representation().IsSmi()
1477 : SmiValuesAre31Bits());
1478 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1479 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1480 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1481 __ cmpl(kScratchRegister, Immediate(0));
1482 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1484 } else if (right->IsStackSlot()) {
1485 if (instr->hydrogen_value()->representation().IsSmi()) {
1486 __ orp(kScratchRegister, ToOperand(right));
1488 __ orl(kScratchRegister, ToOperand(right));
1490 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1492 // Test the non-zero operand for negative sign.
1493 if (instr->hydrogen_value()->representation().IsSmi()) {
1494 __ orp(kScratchRegister, ToRegister(right));
1496 __ orl(kScratchRegister, ToRegister(right));
1498 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1505 void LCodeGen::DoBitI(LBitI* instr) {
1506 LOperand* left = instr->left();
1507 LOperand* right = instr->right();
1508 DCHECK(left->Equals(instr->result()));
1509 DCHECK(left->IsRegister());
1511 if (right->IsConstantOperand()) {
1512 int32_t right_operand =
1513 ToRepresentation(LConstantOperand::cast(right),
1514 instr->hydrogen()->right()->representation());
1515 switch (instr->op()) {
1516 case Token::BIT_AND:
1517 __ andl(ToRegister(left), Immediate(right_operand));
1520 __ orl(ToRegister(left), Immediate(right_operand));
1522 case Token::BIT_XOR:
1523 if (right_operand == int32_t(~0)) {
1524 __ notl(ToRegister(left));
1526 __ xorl(ToRegister(left), Immediate(right_operand));
1533 } else if (right->IsStackSlot()) {
1534 switch (instr->op()) {
1535 case Token::BIT_AND:
1536 if (instr->IsInteger32()) {
1537 __ andl(ToRegister(left), ToOperand(right));
1539 __ andp(ToRegister(left), ToOperand(right));
1543 if (instr->IsInteger32()) {
1544 __ orl(ToRegister(left), ToOperand(right));
1546 __ orp(ToRegister(left), ToOperand(right));
1549 case Token::BIT_XOR:
1550 if (instr->IsInteger32()) {
1551 __ xorl(ToRegister(left), ToOperand(right));
1553 __ xorp(ToRegister(left), ToOperand(right));
1561 DCHECK(right->IsRegister());
1562 switch (instr->op()) {
1563 case Token::BIT_AND:
1564 if (instr->IsInteger32()) {
1565 __ andl(ToRegister(left), ToRegister(right));
1567 __ andp(ToRegister(left), ToRegister(right));
1571 if (instr->IsInteger32()) {
1572 __ orl(ToRegister(left), ToRegister(right));
1574 __ orp(ToRegister(left), ToRegister(right));
1577 case Token::BIT_XOR:
1578 if (instr->IsInteger32()) {
1579 __ xorl(ToRegister(left), ToRegister(right));
1581 __ xorp(ToRegister(left), ToRegister(right));
1592 void LCodeGen::DoShiftI(LShiftI* instr) {
1593 LOperand* left = instr->left();
1594 LOperand* right = instr->right();
1595 DCHECK(left->Equals(instr->result()));
1596 DCHECK(left->IsRegister());
1597 if (right->IsRegister()) {
1598 DCHECK(ToRegister(right).is(rcx));
1600 switch (instr->op()) {
1602 __ rorl_cl(ToRegister(left));
1605 __ sarl_cl(ToRegister(left));
1608 __ shrl_cl(ToRegister(left));
1609 if (instr->can_deopt()) {
1610 __ testl(ToRegister(left), ToRegister(left));
1611 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1615 __ shll_cl(ToRegister(left));
1622 int32_t value = ToInteger32(LConstantOperand::cast(right));
1623 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1624 switch (instr->op()) {
1626 if (shift_count != 0) {
1627 __ rorl(ToRegister(left), Immediate(shift_count));
1631 if (shift_count != 0) {
1632 __ sarl(ToRegister(left), Immediate(shift_count));
1636 if (shift_count != 0) {
1637 __ shrl(ToRegister(left), Immediate(shift_count));
1638 } else if (instr->can_deopt()) {
1639 __ testl(ToRegister(left), ToRegister(left));
1640 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1644 if (shift_count != 0) {
1645 if (instr->hydrogen_value()->representation().IsSmi()) {
1646 if (SmiValuesAre32Bits()) {
1647 __ shlp(ToRegister(left), Immediate(shift_count));
1649 DCHECK(SmiValuesAre31Bits());
1650 if (instr->can_deopt()) {
1651 if (shift_count != 1) {
1652 __ shll(ToRegister(left), Immediate(shift_count - 1));
1654 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1655 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1657 __ shll(ToRegister(left), Immediate(shift_count));
1661 __ shll(ToRegister(left), Immediate(shift_count));
1673 void LCodeGen::DoSubI(LSubI* instr) {
1674 LOperand* left = instr->left();
1675 LOperand* right = instr->right();
1676 DCHECK(left->Equals(instr->result()));
1678 if (right->IsConstantOperand()) {
1679 int32_t right_operand =
1680 ToRepresentation(LConstantOperand::cast(right),
1681 instr->hydrogen()->right()->representation());
1682 __ subl(ToRegister(left), Immediate(right_operand));
1683 } else if (right->IsRegister()) {
1684 if (instr->hydrogen_value()->representation().IsSmi()) {
1685 __ subp(ToRegister(left), ToRegister(right));
1687 __ subl(ToRegister(left), ToRegister(right));
1690 if (instr->hydrogen_value()->representation().IsSmi()) {
1691 __ subp(ToRegister(left), ToOperand(right));
1693 __ subl(ToRegister(left), ToOperand(right));
1697 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1698 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1703 void LCodeGen::DoConstantI(LConstantI* instr) {
1704 Register dst = ToRegister(instr->result());
1705 if (instr->value() == 0) {
1708 __ movl(dst, Immediate(instr->value()));
1713 void LCodeGen::DoConstantS(LConstantS* instr) {
1714 __ Move(ToRegister(instr->result()), instr->value());
1718 void LCodeGen::DoConstantD(LConstantD* instr) {
1719 __ Move(ToDoubleRegister(instr->result()), instr->bits());
1723 void LCodeGen::DoConstantE(LConstantE* instr) {
1724 __ LoadAddress(ToRegister(instr->result()), instr->value());
1728 void LCodeGen::DoConstantT(LConstantT* instr) {
1729 Handle<Object> object = instr->value(isolate());
1730 AllowDeferredHandleDereference smi_check;
1731 __ Move(ToRegister(instr->result()), object);
1735 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1736 Register result = ToRegister(instr->result());
1737 Register map = ToRegister(instr->value());
1738 __ EnumLength(result, map);
1742 void LCodeGen::DoDateField(LDateField* instr) {
1743 Register object = ToRegister(instr->date());
1744 Register result = ToRegister(instr->result());
1745 Smi* index = instr->index();
1746 DCHECK(object.is(result));
1747 DCHECK(object.is(rax));
1749 if (FLAG_debug_code) {
1750 __ AssertNotSmi(object);
1751 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1752 __ Check(equal, kOperandIsNotADate);
1755 if (index->value() == 0) {
1756 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1758 Label runtime, done;
1759 if (index->value() < JSDate::kFirstUncachedField) {
1760 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1761 Operand stamp_operand = __ ExternalOperand(stamp);
1762 __ movp(kScratchRegister, stamp_operand);
1763 __ cmpp(kScratchRegister, FieldOperand(object,
1764 JSDate::kCacheStampOffset));
1765 __ j(not_equal, &runtime, Label::kNear);
1766 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1767 kPointerSize * index->value()));
1768 __ jmp(&done, Label::kNear);
1771 __ PrepareCallCFunction(2);
1772 __ movp(arg_reg_1, object);
1773 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1774 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1780 Operand LCodeGen::BuildSeqStringOperand(Register string,
1782 String::Encoding encoding) {
1783 if (index->IsConstantOperand()) {
1784 int offset = ToInteger32(LConstantOperand::cast(index));
1785 if (encoding == String::TWO_BYTE_ENCODING) {
1786 offset *= kUC16Size;
1788 STATIC_ASSERT(kCharSize == 1);
1789 return FieldOperand(string, SeqString::kHeaderSize + offset);
1791 return FieldOperand(
1792 string, ToRegister(index),
1793 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1794 SeqString::kHeaderSize);
1798 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1799 String::Encoding encoding = instr->hydrogen()->encoding();
1800 Register result = ToRegister(instr->result());
1801 Register string = ToRegister(instr->string());
1803 if (FLAG_debug_code) {
1805 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1806 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1808 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1809 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1810 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1811 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1812 ? one_byte_seq_type : two_byte_seq_type));
1813 __ Check(equal, kUnexpectedStringType);
1817 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1818 if (encoding == String::ONE_BYTE_ENCODING) {
1819 __ movzxbl(result, operand);
1821 __ movzxwl(result, operand);
1826 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1827 String::Encoding encoding = instr->hydrogen()->encoding();
1828 Register string = ToRegister(instr->string());
1830 if (FLAG_debug_code) {
1831 Register value = ToRegister(instr->value());
1832 Register index = ToRegister(instr->index());
1833 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1834 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1836 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1837 ? one_byte_seq_type : two_byte_seq_type;
1838 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1841 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1842 if (instr->value()->IsConstantOperand()) {
1843 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1844 DCHECK_LE(0, value);
1845 if (encoding == String::ONE_BYTE_ENCODING) {
1846 DCHECK_LE(value, String::kMaxOneByteCharCode);
1847 __ movb(operand, Immediate(value));
1849 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1850 __ movw(operand, Immediate(value));
1853 Register value = ToRegister(instr->value());
1854 if (encoding == String::ONE_BYTE_ENCODING) {
1855 __ movb(operand, value);
1857 __ movw(operand, value);
1863 void LCodeGen::DoAddI(LAddI* instr) {
1864 LOperand* left = instr->left();
1865 LOperand* right = instr->right();
1867 Representation target_rep = instr->hydrogen()->representation();
1868 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1870 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1871 if (right->IsConstantOperand()) {
1872 // No support for smi-immediates for 32-bit SMI.
1873 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1875 ToRepresentation(LConstantOperand::cast(right),
1876 instr->hydrogen()->right()->representation());
1878 __ leap(ToRegister(instr->result()),
1879 MemOperand(ToRegister(left), offset));
1881 __ leal(ToRegister(instr->result()),
1882 MemOperand(ToRegister(left), offset));
1885 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1887 __ leap(ToRegister(instr->result()), address);
1889 __ leal(ToRegister(instr->result()), address);
1893 if (right->IsConstantOperand()) {
1894 // No support for smi-immediates for 32-bit SMI.
1895 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1896 int32_t right_operand =
1897 ToRepresentation(LConstantOperand::cast(right),
1898 instr->hydrogen()->right()->representation());
1900 __ addp(ToRegister(left), Immediate(right_operand));
1902 __ addl(ToRegister(left), Immediate(right_operand));
1904 } else if (right->IsRegister()) {
1906 __ addp(ToRegister(left), ToRegister(right));
1908 __ addl(ToRegister(left), ToRegister(right));
1912 __ addp(ToRegister(left), ToOperand(right));
1914 __ addl(ToRegister(left), ToOperand(right));
1917 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1918 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1924 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1925 LOperand* left = instr->left();
1926 LOperand* right = instr->right();
1927 DCHECK(left->Equals(instr->result()));
1928 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1929 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1931 Condition condition = (operation == HMathMinMax::kMathMin)
1934 Register left_reg = ToRegister(left);
1935 if (right->IsConstantOperand()) {
1936 Immediate right_imm = Immediate(
1937 ToRepresentation(LConstantOperand::cast(right),
1938 instr->hydrogen()->right()->representation()));
1939 DCHECK(SmiValuesAre32Bits()
1940 ? !instr->hydrogen()->representation().IsSmi()
1941 : SmiValuesAre31Bits());
1942 __ cmpl(left_reg, right_imm);
1943 __ j(condition, &return_left, Label::kNear);
1944 __ movp(left_reg, right_imm);
1945 } else if (right->IsRegister()) {
1946 Register right_reg = ToRegister(right);
1947 if (instr->hydrogen_value()->representation().IsSmi()) {
1948 __ cmpp(left_reg, right_reg);
1950 __ cmpl(left_reg, right_reg);
1952 __ j(condition, &return_left, Label::kNear);
1953 __ movp(left_reg, right_reg);
1955 Operand right_op = ToOperand(right);
1956 if (instr->hydrogen_value()->representation().IsSmi()) {
1957 __ cmpp(left_reg, right_op);
1959 __ cmpl(left_reg, right_op);
1961 __ j(condition, &return_left, Label::kNear);
1962 __ movp(left_reg, right_op);
1964 __ bind(&return_left);
1966 DCHECK(instr->hydrogen()->representation().IsDouble());
1967 Label check_nan_left, check_zero, return_left, return_right;
1968 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1969 XMMRegister left_reg = ToDoubleRegister(left);
1970 XMMRegister right_reg = ToDoubleRegister(right);
1971 __ ucomisd(left_reg, right_reg);
1972 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1973 __ j(equal, &check_zero, Label::kNear); // left == right.
1974 __ j(condition, &return_left, Label::kNear);
1975 __ jmp(&return_right, Label::kNear);
1977 __ bind(&check_zero);
1978 XMMRegister xmm_scratch = double_scratch0();
1979 __ xorps(xmm_scratch, xmm_scratch);
1980 __ ucomisd(left_reg, xmm_scratch);
1981 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1982 // At this point, both left and right are either 0 or -0.
1983 if (operation == HMathMinMax::kMathMin) {
1984 __ orps(left_reg, right_reg);
1986 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1987 __ addsd(left_reg, right_reg);
1989 __ jmp(&return_left, Label::kNear);
1991 __ bind(&check_nan_left);
1992 __ ucomisd(left_reg, left_reg); // NaN check.
1993 __ j(parity_even, &return_left, Label::kNear);
1994 __ bind(&return_right);
1995 __ movaps(left_reg, right_reg);
1997 __ bind(&return_left);
2002 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2003 XMMRegister left = ToDoubleRegister(instr->left());
2004 XMMRegister right = ToDoubleRegister(instr->right());
2005 XMMRegister result = ToDoubleRegister(instr->result());
2006 switch (instr->op()) {
2008 if (CpuFeatures::IsSupported(AVX)) {
2009 CpuFeatureScope scope(masm(), AVX);
2010 __ vaddsd(result, left, right);
2012 DCHECK(result.is(left));
2013 __ addsd(left, right);
2017 if (CpuFeatures::IsSupported(AVX)) {
2018 CpuFeatureScope scope(masm(), AVX);
2019 __ vsubsd(result, left, right);
2021 DCHECK(result.is(left));
2022 __ subsd(left, right);
2026 if (CpuFeatures::IsSupported(AVX)) {
2027 CpuFeatureScope scope(masm(), AVX);
2028 __ vmulsd(result, left, right);
2030 DCHECK(result.is(left));
2031 __ mulsd(left, right);
2035 if (CpuFeatures::IsSupported(AVX)) {
2036 CpuFeatureScope scope(masm(), AVX);
2037 __ vdivsd(result, left, right);
2039 DCHECK(result.is(left));
2040 __ divsd(left, right);
2042 // Don't delete this mov. It may improve performance on some CPUs,
2043 // when there is a (v)mulsd depending on the result
2044 __ movaps(result, result);
2047 XMMRegister xmm_scratch = double_scratch0();
2048 __ PrepareCallCFunction(2);
2049 __ movaps(xmm_scratch, left);
2050 DCHECK(right.is(xmm1));
2052 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2053 __ movaps(result, xmm_scratch);
2063 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2064 DCHECK(ToRegister(instr->context()).is(rsi));
2065 DCHECK(ToRegister(instr->left()).is(rdx));
2066 DCHECK(ToRegister(instr->right()).is(rax));
2067 DCHECK(ToRegister(instr->result()).is(rax));
2070 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2071 CallCode(code, RelocInfo::CODE_TARGET, instr);
2075 template<class InstrType>
2076 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2077 int left_block = instr->TrueDestination(chunk_);
2078 int right_block = instr->FalseDestination(chunk_);
2080 int next_block = GetNextEmittedBlock();
2082 if (right_block == left_block || cc == no_condition) {
2083 EmitGoto(left_block);
2084 } else if (left_block == next_block) {
2085 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2086 } else if (right_block == next_block) {
2087 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2089 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2091 __ jmp(chunk_->GetAssemblyLabel(right_block));
2097 template <class InstrType>
2098 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2099 int true_block = instr->TrueDestination(chunk_);
2100 __ j(cc, chunk_->GetAssemblyLabel(true_block));
2104 template <class InstrType>
2105 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2106 int false_block = instr->FalseDestination(chunk_);
2107 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2111 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2116 void LCodeGen::DoBranch(LBranch* instr) {
2117 Representation r = instr->hydrogen()->value()->representation();
2118 if (r.IsInteger32()) {
2119 DCHECK(!info()->IsStub());
2120 Register reg = ToRegister(instr->value());
2122 EmitBranch(instr, not_zero);
2123 } else if (r.IsSmi()) {
2124 DCHECK(!info()->IsStub());
2125 Register reg = ToRegister(instr->value());
2127 EmitBranch(instr, not_zero);
2128 } else if (r.IsDouble()) {
2129 DCHECK(!info()->IsStub());
2130 XMMRegister reg = ToDoubleRegister(instr->value());
2131 XMMRegister xmm_scratch = double_scratch0();
2132 __ xorps(xmm_scratch, xmm_scratch);
2133 __ ucomisd(reg, xmm_scratch);
2134 EmitBranch(instr, not_equal);
2136 DCHECK(r.IsTagged());
2137 Register reg = ToRegister(instr->value());
2138 HType type = instr->hydrogen()->value()->type();
2139 if (type.IsBoolean()) {
2140 DCHECK(!info()->IsStub());
2141 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2142 EmitBranch(instr, equal);
2143 } else if (type.IsSmi()) {
2144 DCHECK(!info()->IsStub());
2145 __ SmiCompare(reg, Smi::FromInt(0));
2146 EmitBranch(instr, not_equal);
2147 } else if (type.IsJSArray()) {
2148 DCHECK(!info()->IsStub());
2149 EmitBranch(instr, no_condition);
2150 } else if (type.IsHeapNumber()) {
2151 DCHECK(!info()->IsStub());
2152 XMMRegister xmm_scratch = double_scratch0();
2153 __ xorps(xmm_scratch, xmm_scratch);
2154 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2155 EmitBranch(instr, not_equal);
2156 } else if (type.IsString()) {
2157 DCHECK(!info()->IsStub());
2158 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2159 EmitBranch(instr, not_equal);
2161 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2162 // Avoid deopts in the case where we've never executed this path before.
2163 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2165 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2166 // undefined -> false.
2167 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2168 __ j(equal, instr->FalseLabel(chunk_));
2170 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2172 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2173 __ j(equal, instr->TrueLabel(chunk_));
2175 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2176 __ j(equal, instr->FalseLabel(chunk_));
2178 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2180 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2181 __ j(equal, instr->FalseLabel(chunk_));
2184 if (expected.Contains(ToBooleanStub::SMI)) {
2185 // Smis: 0 -> false, all other -> true.
2186 __ Cmp(reg, Smi::FromInt(0));
2187 __ j(equal, instr->FalseLabel(chunk_));
2188 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2189 } else if (expected.NeedsMap()) {
2190 // If we need a map later and have a Smi -> deopt.
2191 __ testb(reg, Immediate(kSmiTagMask));
2192 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2195 const Register map = kScratchRegister;
2196 if (expected.NeedsMap()) {
2197 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2199 if (expected.CanBeUndetectable()) {
2200 // Undetectable -> false.
2201 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2202 Immediate(1 << Map::kIsUndetectable));
2203 __ j(not_zero, instr->FalseLabel(chunk_));
2207 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2208 // spec object -> true.
2209 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2210 __ j(above_equal, instr->TrueLabel(chunk_));
2213 if (expected.Contains(ToBooleanStub::STRING)) {
2214 // String value -> false iff empty.
2216 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2217 __ j(above_equal, ¬_string, Label::kNear);
2218 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2219 __ j(not_zero, instr->TrueLabel(chunk_));
2220 __ jmp(instr->FalseLabel(chunk_));
2221 __ bind(¬_string);
2224 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2225 // Symbol value -> true.
2226 __ CmpInstanceType(map, SYMBOL_TYPE);
2227 __ j(equal, instr->TrueLabel(chunk_));
2230 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2231 // SIMD value -> true.
2232 __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2233 __ j(equal, instr->TrueLabel(chunk_));
2236 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2237 // heap number -> false iff +0, -0, or NaN.
2238 Label not_heap_number;
2239 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2240 __ j(not_equal, ¬_heap_number, Label::kNear);
2241 XMMRegister xmm_scratch = double_scratch0();
2242 __ xorps(xmm_scratch, xmm_scratch);
2243 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2244 __ j(zero, instr->FalseLabel(chunk_));
2245 __ jmp(instr->TrueLabel(chunk_));
2246 __ bind(¬_heap_number);
2249 if (!expected.IsGeneric()) {
2250 // We've seen something for the first time -> deopt.
2251 // This can only happen if we are not generic already.
2252 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2259 void LCodeGen::EmitGoto(int block) {
2260 if (!IsNextEmittedBlock(block)) {
2261 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2266 void LCodeGen::DoGoto(LGoto* instr) {
2267 EmitGoto(instr->block_id());
2271 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2272 Condition cond = no_condition;
2275 case Token::EQ_STRICT:
2279 case Token::NE_STRICT:
2283 cond = is_unsigned ? below : less;
2286 cond = is_unsigned ? above : greater;
2289 cond = is_unsigned ? below_equal : less_equal;
2292 cond = is_unsigned ? above_equal : greater_equal;
2295 case Token::INSTANCEOF:
2303 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2304 LOperand* left = instr->left();
2305 LOperand* right = instr->right();
2307 instr->is_double() ||
2308 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2309 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2310 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2312 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2313 // We can statically evaluate the comparison.
2314 double left_val = ToDouble(LConstantOperand::cast(left));
2315 double right_val = ToDouble(LConstantOperand::cast(right));
2316 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2317 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2318 EmitGoto(next_block);
2320 if (instr->is_double()) {
2321 // Don't base result on EFLAGS when a NaN is involved. Instead
2322 // jump to the false block.
2323 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2324 __ j(parity_even, instr->FalseLabel(chunk_));
2327 if (right->IsConstantOperand()) {
2328 value = ToInteger32(LConstantOperand::cast(right));
2329 if (instr->hydrogen_value()->representation().IsSmi()) {
2330 __ Cmp(ToRegister(left), Smi::FromInt(value));
2332 __ cmpl(ToRegister(left), Immediate(value));
2334 } else if (left->IsConstantOperand()) {
2335 value = ToInteger32(LConstantOperand::cast(left));
2336 if (instr->hydrogen_value()->representation().IsSmi()) {
2337 if (right->IsRegister()) {
2338 __ Cmp(ToRegister(right), Smi::FromInt(value));
2340 __ Cmp(ToOperand(right), Smi::FromInt(value));
2342 } else if (right->IsRegister()) {
2343 __ cmpl(ToRegister(right), Immediate(value));
2345 __ cmpl(ToOperand(right), Immediate(value));
2347 // We commuted the operands, so commute the condition.
2348 cc = CommuteCondition(cc);
2349 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2350 if (right->IsRegister()) {
2351 __ cmpp(ToRegister(left), ToRegister(right));
2353 __ cmpp(ToRegister(left), ToOperand(right));
2356 if (right->IsRegister()) {
2357 __ cmpl(ToRegister(left), ToRegister(right));
2359 __ cmpl(ToRegister(left), ToOperand(right));
2363 EmitBranch(instr, cc);
2368 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2369 Register left = ToRegister(instr->left());
2371 if (instr->right()->IsConstantOperand()) {
2372 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2373 __ Cmp(left, right);
2375 Register right = ToRegister(instr->right());
2376 __ cmpp(left, right);
2378 EmitBranch(instr, equal);
2382 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2383 if (instr->hydrogen()->representation().IsTagged()) {
2384 Register input_reg = ToRegister(instr->object());
2385 __ Cmp(input_reg, factory()->the_hole_value());
2386 EmitBranch(instr, equal);
2390 XMMRegister input_reg = ToDoubleRegister(instr->object());
2391 __ ucomisd(input_reg, input_reg);
2392 EmitFalseBranch(instr, parity_odd);
2394 __ subp(rsp, Immediate(kDoubleSize));
2395 __ movsd(MemOperand(rsp, 0), input_reg);
2396 __ addp(rsp, Immediate(kDoubleSize));
2398 int offset = sizeof(kHoleNanUpper32);
2399 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2400 EmitBranch(instr, equal);
2404 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2405 Representation rep = instr->hydrogen()->value()->representation();
2406 DCHECK(!rep.IsInteger32());
2408 if (rep.IsDouble()) {
2409 XMMRegister value = ToDoubleRegister(instr->value());
2410 XMMRegister xmm_scratch = double_scratch0();
2411 __ xorps(xmm_scratch, xmm_scratch);
2412 __ ucomisd(xmm_scratch, value);
2413 EmitFalseBranch(instr, not_equal);
2414 __ movmskpd(kScratchRegister, value);
2415 __ testl(kScratchRegister, Immediate(1));
2416 EmitBranch(instr, not_zero);
2418 Register value = ToRegister(instr->value());
2419 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2420 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2421 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2423 EmitFalseBranch(instr, no_overflow);
2424 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2425 Immediate(0x00000000));
2426 EmitBranch(instr, equal);
2431 Condition LCodeGen::EmitIsString(Register input,
2433 Label* is_not_string,
2434 SmiCheck check_needed = INLINE_SMI_CHECK) {
2435 if (check_needed == INLINE_SMI_CHECK) {
2436 __ JumpIfSmi(input, is_not_string);
2439 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2445 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2446 Register reg = ToRegister(instr->value());
2447 Register temp = ToRegister(instr->temp());
2449 SmiCheck check_needed =
2450 instr->hydrogen()->value()->type().IsHeapObject()
2451 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2453 Condition true_cond = EmitIsString(
2454 reg, temp, instr->FalseLabel(chunk_), check_needed);
2456 EmitBranch(instr, true_cond);
2460 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2462 if (instr->value()->IsRegister()) {
2463 Register input = ToRegister(instr->value());
2464 is_smi = masm()->CheckSmi(input);
2466 Operand input = ToOperand(instr->value());
2467 is_smi = masm()->CheckSmi(input);
2469 EmitBranch(instr, is_smi);
2473 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2474 Register input = ToRegister(instr->value());
2475 Register temp = ToRegister(instr->temp());
2477 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2478 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2480 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2481 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2482 Immediate(1 << Map::kIsUndetectable));
2483 EmitBranch(instr, not_zero);
2487 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2488 DCHECK(ToRegister(instr->context()).is(rsi));
2489 DCHECK(ToRegister(instr->left()).is(rdx));
2490 DCHECK(ToRegister(instr->right()).is(rax));
2492 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2493 CallCode(code, RelocInfo::CODE_TARGET, instr);
2496 EmitBranch(instr, TokenToCondition(instr->op(), false));
2500 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2501 InstanceType from = instr->from();
2502 InstanceType to = instr->to();
2503 if (from == FIRST_TYPE) return to;
2504 DCHECK(from == to || to == LAST_TYPE);
2509 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2510 InstanceType from = instr->from();
2511 InstanceType to = instr->to();
2512 if (from == to) return equal;
2513 if (to == LAST_TYPE) return above_equal;
2514 if (from == FIRST_TYPE) return below_equal;
2520 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2521 Register input = ToRegister(instr->value());
2523 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2524 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2527 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2528 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2532 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2533 Register input = ToRegister(instr->value());
2534 Register result = ToRegister(instr->result());
2536 __ AssertString(input);
2538 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2539 DCHECK(String::kHashShift >= kSmiTagSize);
2540 __ IndexFromHash(result, result);
2544 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2545 LHasCachedArrayIndexAndBranch* instr) {
2546 Register input = ToRegister(instr->value());
2548 __ testl(FieldOperand(input, String::kHashFieldOffset),
2549 Immediate(String::kContainsCachedArrayIndexMask));
2550 EmitBranch(instr, equal);
2554 // Branches to a label or falls through with the answer in the z flag.
2555 // Trashes the temp register.
2556 void LCodeGen::EmitClassOfTest(Label* is_true,
2558 Handle<String> class_name,
2562 DCHECK(!input.is(temp));
2563 DCHECK(!input.is(temp2));
2564 DCHECK(!temp.is(temp2));
2566 __ JumpIfSmi(input, is_false);
2568 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2569 // Assuming the following assertions, we can use the same compares to test
2570 // for both being a function type and being in the object type range.
2571 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2572 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2573 FIRST_SPEC_OBJECT_TYPE + 1);
2574 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2575 LAST_SPEC_OBJECT_TYPE - 1);
2576 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2577 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2578 __ j(below, is_false);
2579 __ j(equal, is_true);
2580 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2581 __ j(equal, is_true);
2583 // Faster code path to avoid two compares: subtract lower bound from the
2584 // actual type and do a signed compare with the width of the type range.
2585 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2586 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2587 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2588 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2589 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2590 __ j(above, is_false);
2593 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2594 // Check if the constructor in the map is a function.
2595 __ GetMapConstructor(temp, temp, kScratchRegister);
2597 // Objects with a non-function constructor have class 'Object'.
2598 __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
2599 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2600 __ j(not_equal, is_true);
2602 __ j(not_equal, is_false);
2605 // temp now contains the constructor function. Grab the
2606 // instance class name from there.
2607 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2608 __ movp(temp, FieldOperand(temp,
2609 SharedFunctionInfo::kInstanceClassNameOffset));
2610 // The class name we are testing against is internalized since it's a literal.
2611 // The name in the constructor is internalized because of the way the context
2612 // is booted. This routine isn't expected to work for random API-created
2613 // classes and it doesn't have to because you can't access it with natives
2614 // syntax. Since both sides are internalized it is sufficient to use an
2615 // identity comparison.
2616 DCHECK(class_name->IsInternalizedString());
2617 __ Cmp(temp, class_name);
2618 // End with the answer in the z flag.
2622 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2623 Register input = ToRegister(instr->value());
2624 Register temp = ToRegister(instr->temp());
2625 Register temp2 = ToRegister(instr->temp2());
2626 Handle<String> class_name = instr->hydrogen()->class_name();
2628 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2629 class_name, input, temp, temp2);
2631 EmitBranch(instr, equal);
2635 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2636 Register reg = ToRegister(instr->value());
2638 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2639 EmitBranch(instr, equal);
2643 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2644 DCHECK(ToRegister(instr->context()).is(rsi));
2645 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2646 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2647 DCHECK(ToRegister(instr->result()).is(rax));
2648 InstanceOfStub stub(isolate());
2649 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2653 void LCodeGen::DoHasInPrototypeChainAndBranch(
2654 LHasInPrototypeChainAndBranch* instr) {
2655 Register const object = ToRegister(instr->object());
2656 Register const object_map = kScratchRegister;
2657 Register const object_prototype = object_map;
2658 Register const prototype = ToRegister(instr->prototype());
2660 // The {object} must be a spec object. It's sufficient to know that {object}
2661 // is not a smi, since all other non-spec objects have {null} prototypes and
2662 // will be ruled out below.
2663 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2664 Condition is_smi = __ CheckSmi(object);
2665 EmitFalseBranch(instr, is_smi);
2668 // Loop through the {object}s prototype chain looking for the {prototype}.
2669 __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
2672 __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2673 __ cmpp(object_prototype, prototype);
2674 EmitTrueBranch(instr, equal);
2675 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2676 EmitFalseBranch(instr, equal);
2677 __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2682 void LCodeGen::DoCmpT(LCmpT* instr) {
2683 DCHECK(ToRegister(instr->context()).is(rsi));
2684 Token::Value op = instr->op();
2687 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2688 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2690 Condition condition = TokenToCondition(op, false);
2691 Label true_value, done;
2693 __ j(condition, &true_value, Label::kNear);
2694 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2695 __ jmp(&done, Label::kNear);
2696 __ bind(&true_value);
2697 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2702 void LCodeGen::DoReturn(LReturn* instr) {
2703 if (FLAG_trace && info()->IsOptimizing()) {
2704 // Preserve the return value on the stack and rely on the runtime call
2705 // to return the value in the same register. We're leaving the code
2706 // managed by the register allocator and tearing down the frame, it's
2707 // safe to write to the context register.
2709 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2710 __ CallRuntime(Runtime::kTraceExit, 1);
2712 if (info()->saves_caller_doubles()) {
2713 RestoreCallerDoubles();
2715 int no_frame_start = -1;
2716 if (NeedsEagerFrame()) {
2719 no_frame_start = masm_->pc_offset();
2721 if (instr->has_constant_parameter_count()) {
2722 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2725 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2726 Register reg = ToRegister(instr->parameter_count());
2727 // The argument count parameter is a smi
2728 __ SmiToInteger32(reg, reg);
2729 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2730 __ PopReturnAddressTo(return_addr_reg);
2731 __ shlp(reg, Immediate(kPointerSizeLog2));
2733 __ jmp(return_addr_reg);
2735 if (no_frame_start != -1) {
2736 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2742 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2743 Register vector_register = ToRegister(instr->temp_vector());
2744 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2745 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2746 DCHECK(slot_register.is(rax));
2748 AllowDeferredHandleDereference vector_structure_check;
2749 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2750 __ Move(vector_register, vector);
2751 // No need to allocate this register.
2752 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2753 int index = vector->GetIndex(slot);
2754 __ Move(slot_register, Smi::FromInt(index));
2759 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2760 Register vector_register = ToRegister(instr->temp_vector());
2761 Register slot_register = ToRegister(instr->temp_slot());
2763 AllowDeferredHandleDereference vector_structure_check;
2764 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2765 __ Move(vector_register, vector);
2766 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2767 int index = vector->GetIndex(slot);
2768 __ Move(slot_register, Smi::FromInt(index));
2772 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2773 DCHECK(ToRegister(instr->context()).is(rsi));
2774 DCHECK(ToRegister(instr->global_object())
2775 .is(LoadDescriptor::ReceiverRegister()));
2776 DCHECK(ToRegister(instr->result()).is(rax));
2778 __ Move(LoadDescriptor::NameRegister(), instr->name());
2779 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2781 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2782 SLOPPY, PREMONOMORPHIC).code();
2783 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2787 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
2788 DCHECK(ToRegister(instr->context()).is(rsi));
2789 DCHECK(ToRegister(instr->result()).is(rax));
2790 int const slot = instr->slot_index();
2791 int const depth = instr->depth();
2792 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
2793 __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
2795 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
2796 CallCode(stub, RelocInfo::CODE_TARGET, instr);
2798 __ Push(Smi::FromInt(slot));
2799 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
2804 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2805 Register context = ToRegister(instr->context());
2806 Register result = ToRegister(instr->result());
2807 __ movp(result, ContextOperand(context, instr->slot_index()));
2808 if (instr->hydrogen()->RequiresHoleCheck()) {
2809 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2810 if (instr->hydrogen()->DeoptimizesOnHole()) {
2811 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2814 __ j(not_equal, &is_not_hole, Label::kNear);
2815 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2816 __ bind(&is_not_hole);
2822 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2823 Register context = ToRegister(instr->context());
2824 Register value = ToRegister(instr->value());
2826 Operand target = ContextOperand(context, instr->slot_index());
2828 Label skip_assignment;
2829 if (instr->hydrogen()->RequiresHoleCheck()) {
2830 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2831 if (instr->hydrogen()->DeoptimizesOnHole()) {
2832 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2834 __ j(not_equal, &skip_assignment);
2837 __ movp(target, value);
2839 if (instr->hydrogen()->NeedsWriteBarrier()) {
2840 SmiCheck check_needed =
2841 instr->hydrogen()->value()->type().IsHeapObject()
2842 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2843 int offset = Context::SlotOffset(instr->slot_index());
2844 Register scratch = ToRegister(instr->temp());
2845 __ RecordWriteContextSlot(context,
2850 EMIT_REMEMBERED_SET,
2854 __ bind(&skip_assignment);
2858 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2859 HObjectAccess access = instr->hydrogen()->access();
2860 int offset = access.offset();
2862 if (access.IsExternalMemory()) {
2863 Register result = ToRegister(instr->result());
2864 if (instr->object()->IsConstantOperand()) {
2865 DCHECK(result.is(rax));
2866 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2868 Register object = ToRegister(instr->object());
2869 __ Load(result, MemOperand(object, offset), access.representation());
2874 Register object = ToRegister(instr->object());
2875 if (instr->hydrogen()->representation().IsDouble()) {
2876 DCHECK(access.IsInobject());
2877 XMMRegister result = ToDoubleRegister(instr->result());
2878 __ movsd(result, FieldOperand(object, offset));
2882 Register result = ToRegister(instr->result());
2883 if (!access.IsInobject()) {
2884 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2888 Representation representation = access.representation();
2889 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2890 instr->hydrogen()->representation().IsInteger32()) {
2891 if (FLAG_debug_code) {
2892 Register scratch = kScratchRegister;
2893 __ Load(scratch, FieldOperand(object, offset), representation);
2894 __ AssertSmi(scratch);
2897 // Read int value directly from upper half of the smi.
2898 STATIC_ASSERT(kSmiTag == 0);
2899 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2900 offset += kPointerSize / 2;
2901 representation = Representation::Integer32();
2903 __ Load(result, FieldOperand(object, offset), representation);
2907 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2908 DCHECK(ToRegister(instr->context()).is(rsi));
2909 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2910 DCHECK(ToRegister(instr->result()).is(rax));
2912 __ Move(LoadDescriptor::NameRegister(), instr->name());
2913 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2915 CodeFactory::LoadICInOptimizedCode(
2916 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
2917 instr->hydrogen()->initialization_state()).code();
2918 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2922 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2923 Register function = ToRegister(instr->function());
2924 Register result = ToRegister(instr->result());
2926 // Get the prototype or initial map from the function.
2928 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2930 // Check that the function has a prototype or an initial map.
2931 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2932 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2934 // If the function does not have an initial map, we're done.
2936 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2937 __ j(not_equal, &done, Label::kNear);
2939 // Get the prototype from the initial map.
2940 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2947 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2948 Register result = ToRegister(instr->result());
2949 __ LoadRoot(result, instr->index());
2953 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2954 Register arguments = ToRegister(instr->arguments());
2955 Register result = ToRegister(instr->result());
2957 if (instr->length()->IsConstantOperand() &&
2958 instr->index()->IsConstantOperand()) {
2959 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2960 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2961 if (const_index >= 0 && const_index < const_length) {
2962 StackArgumentsAccessor args(arguments, const_length,
2963 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2964 __ movp(result, args.GetArgumentOperand(const_index));
2965 } else if (FLAG_debug_code) {
2969 Register length = ToRegister(instr->length());
2970 // There are two words between the frame pointer and the last argument.
2971 // Subtracting from length accounts for one of them add one more.
2972 if (instr->index()->IsRegister()) {
2973 __ subl(length, ToRegister(instr->index()));
2975 __ subl(length, ToOperand(instr->index()));
2977 StackArgumentsAccessor args(arguments, length,
2978 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2979 __ movp(result, args.GetArgumentOperand(0));
2984 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2985 ElementsKind elements_kind = instr->elements_kind();
2986 LOperand* key = instr->key();
2987 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
2988 Register key_reg = ToRegister(key);
2989 Representation key_representation =
2990 instr->hydrogen()->key()->representation();
2991 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
2992 __ SmiToInteger64(key_reg, key_reg);
2993 } else if (instr->hydrogen()->IsDehoisted()) {
2994 // Sign extend key because it could be a 32 bit negative value
2995 // and the dehoisted address computation happens in 64 bits
2996 __ movsxlq(key_reg, key_reg);
2999 Operand operand(BuildFastArrayOperand(
3002 instr->hydrogen()->key()->representation(),
3004 instr->base_offset()));
3006 if (elements_kind == FLOAT32_ELEMENTS) {
3007 XMMRegister result(ToDoubleRegister(instr->result()));
3008 __ movss(result, operand);
3009 __ cvtss2sd(result, result);
3010 } else if (elements_kind == FLOAT64_ELEMENTS) {
3011 __ movsd(ToDoubleRegister(instr->result()), operand);
3013 Register result(ToRegister(instr->result()));
3014 switch (elements_kind) {
3016 __ movsxbl(result, operand);
3018 case UINT8_ELEMENTS:
3019 case UINT8_CLAMPED_ELEMENTS:
3020 __ movzxbl(result, operand);
3022 case INT16_ELEMENTS:
3023 __ movsxwl(result, operand);
3025 case UINT16_ELEMENTS:
3026 __ movzxwl(result, operand);
3028 case INT32_ELEMENTS:
3029 __ movl(result, operand);
3031 case UINT32_ELEMENTS:
3032 __ movl(result, operand);
3033 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3034 __ testl(result, result);
3035 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3038 case FLOAT32_ELEMENTS:
3039 case FLOAT64_ELEMENTS:
3041 case FAST_SMI_ELEMENTS:
3042 case FAST_DOUBLE_ELEMENTS:
3043 case FAST_HOLEY_ELEMENTS:
3044 case FAST_HOLEY_SMI_ELEMENTS:
3045 case FAST_HOLEY_DOUBLE_ELEMENTS:
3046 case DICTIONARY_ELEMENTS:
3047 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3048 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3056 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3057 XMMRegister result(ToDoubleRegister(instr->result()));
3058 LOperand* key = instr->key();
3059 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3060 instr->hydrogen()->IsDehoisted()) {
3061 // Sign extend key because it could be a 32 bit negative value
3062 // and the dehoisted address computation happens in 64 bits
3063 __ movsxlq(ToRegister(key), ToRegister(key));
3065 if (instr->hydrogen()->RequiresHoleCheck()) {
3066 Operand hole_check_operand = BuildFastArrayOperand(
3069 instr->hydrogen()->key()->representation(),
3070 FAST_DOUBLE_ELEMENTS,
3071 instr->base_offset() + sizeof(kHoleNanLower32));
3072 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3073 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3076 Operand double_load_operand = BuildFastArrayOperand(
3079 instr->hydrogen()->key()->representation(),
3080 FAST_DOUBLE_ELEMENTS,
3081 instr->base_offset());
3082 __ movsd(result, double_load_operand);
3086 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3087 HLoadKeyed* hinstr = instr->hydrogen();
3088 Register result = ToRegister(instr->result());
3089 LOperand* key = instr->key();
3090 bool requires_hole_check = hinstr->RequiresHoleCheck();
3091 Representation representation = hinstr->representation();
3092 int offset = instr->base_offset();
3094 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3095 instr->hydrogen()->IsDehoisted()) {
3096 // Sign extend key because it could be a 32 bit negative value
3097 // and the dehoisted address computation happens in 64 bits
3098 __ movsxlq(ToRegister(key), ToRegister(key));
3100 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3101 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3102 DCHECK(!requires_hole_check);
3103 if (FLAG_debug_code) {
3104 Register scratch = kScratchRegister;
3106 BuildFastArrayOperand(instr->elements(),
3108 instr->hydrogen()->key()->representation(),
3111 Representation::Smi());
3112 __ AssertSmi(scratch);
3114 // Read int value directly from upper half of the smi.
3115 STATIC_ASSERT(kSmiTag == 0);
3116 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3117 offset += kPointerSize / 2;
3121 BuildFastArrayOperand(instr->elements(), key,
3122 instr->hydrogen()->key()->representation(),
3123 FAST_ELEMENTS, offset),
3126 // Check for the hole value.
3127 if (requires_hole_check) {
3128 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3129 Condition smi = __ CheckSmi(result);
3130 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
3132 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3133 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3135 } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3136 DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
3138 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3139 __ j(not_equal, &done);
3140 if (info()->IsStub()) {
3141 // A stub can safely convert the hole to undefined only if the array
3142 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3143 // it needs to bail out.
3144 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3145 __ Cmp(FieldOperand(result, Cell::kValueOffset),
3146 Smi::FromInt(Isolate::kArrayProtectorValid));
3147 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3149 __ Move(result, isolate()->factory()->undefined_value());
3155 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3156 if (instr->is_fixed_typed_array()) {
3157 DoLoadKeyedExternalArray(instr);
3158 } else if (instr->hydrogen()->representation().IsDouble()) {
3159 DoLoadKeyedFixedDoubleArray(instr);
3161 DoLoadKeyedFixedArray(instr);
3166 Operand LCodeGen::BuildFastArrayOperand(
3167 LOperand* elements_pointer,
3169 Representation key_representation,
3170 ElementsKind elements_kind,
3172 Register elements_pointer_reg = ToRegister(elements_pointer);
3173 int shift_size = ElementsKindToShiftSize(elements_kind);
3174 if (key->IsConstantOperand()) {
3175 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3176 if (constant_value & 0xF0000000) {
3177 Abort(kArrayIndexConstantValueTooBig);
3179 return Operand(elements_pointer_reg,
3180 (constant_value << shift_size) + offset);
3182 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
3183 DCHECK(key_representation.IsInteger32());
3185 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3186 return Operand(elements_pointer_reg,
3194 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3195 DCHECK(ToRegister(instr->context()).is(rsi));
3196 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3197 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3199 if (instr->hydrogen()->HasVectorAndSlot()) {
3200 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3203 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3204 isolate(), instr->hydrogen()->language_mode(),
3205 instr->hydrogen()->initialization_state()).code();
3206 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3210 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3211 Register result = ToRegister(instr->result());
3213 if (instr->hydrogen()->from_inlined()) {
3214 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3216 // Check for arguments adapter frame.
3217 Label done, adapted;
3218 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3219 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3220 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3221 __ j(equal, &adapted, Label::kNear);
3223 // No arguments adaptor frame.
3224 __ movp(result, rbp);
3225 __ jmp(&done, Label::kNear);
3227 // Arguments adaptor frame present.
3229 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3231 // Result is the frame pointer for the frame if not adapted and for the real
3232 // frame below the adaptor frame if adapted.
3238 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3239 Register result = ToRegister(instr->result());
3243 // If no arguments adaptor frame the number of arguments is fixed.
3244 if (instr->elements()->IsRegister()) {
3245 __ cmpp(rbp, ToRegister(instr->elements()));
3247 __ cmpp(rbp, ToOperand(instr->elements()));
3249 __ movl(result, Immediate(scope()->num_parameters()));
3250 __ j(equal, &done, Label::kNear);
3252 // Arguments adaptor frame present. Get argument length from there.
3253 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3254 __ SmiToInteger32(result,
3256 ArgumentsAdaptorFrameConstants::kLengthOffset));
3258 // Argument length is in result register.
3263 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3264 Register receiver = ToRegister(instr->receiver());
3265 Register function = ToRegister(instr->function());
3267 // If the receiver is null or undefined, we have to pass the global
3268 // object as a receiver to normal functions. Values have to be
3269 // passed unchanged to builtins and strict-mode functions.
3270 Label global_object, receiver_ok;
3271 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3273 if (!instr->hydrogen()->known_function()) {
3274 // Do not transform the receiver to object for strict mode
3276 __ movp(kScratchRegister,
3277 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3278 __ testb(FieldOperand(kScratchRegister,
3279 SharedFunctionInfo::kStrictModeByteOffset),
3280 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3281 __ j(not_equal, &receiver_ok, dist);
3283 // Do not transform the receiver to object for builtins.
3284 __ testb(FieldOperand(kScratchRegister,
3285 SharedFunctionInfo::kNativeByteOffset),
3286 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3287 __ j(not_equal, &receiver_ok, dist);
3290 // Normal function. Replace undefined or null with global receiver.
3291 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3292 __ j(equal, &global_object, Label::kNear);
3293 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3294 __ j(equal, &global_object, Label::kNear);
3296 // The receiver should be a JS object.
3297 Condition is_smi = __ CheckSmi(receiver);
3298 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
3299 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3300 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3302 __ jmp(&receiver_ok, Label::kNear);
3303 __ bind(&global_object);
3304 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3307 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3308 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3310 __ bind(&receiver_ok);
3314 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3315 Register receiver = ToRegister(instr->receiver());
3316 Register function = ToRegister(instr->function());
3317 Register length = ToRegister(instr->length());
3318 Register elements = ToRegister(instr->elements());
3319 DCHECK(receiver.is(rax)); // Used for parameter count.
3320 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3321 DCHECK(ToRegister(instr->result()).is(rax));
3323 // Copy the arguments to this function possibly from the
3324 // adaptor frame below it.
3325 const uint32_t kArgumentsLimit = 1 * KB;
3326 __ cmpp(length, Immediate(kArgumentsLimit));
3327 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3330 __ movp(receiver, length);
3332 // Loop through the arguments pushing them onto the execution
3335 // length is a small non-negative integer, due to the test above.
3336 __ testl(length, length);
3337 __ j(zero, &invoke, Label::kNear);
3339 StackArgumentsAccessor args(elements, length,
3340 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3341 __ Push(args.GetArgumentOperand(0));
3343 __ j(not_zero, &loop);
3345 // Invoke the function.
3347 DCHECK(instr->HasPointerMap());
3348 LPointerMap* pointers = instr->pointer_map();
3349 SafepointGenerator safepoint_generator(
3350 this, pointers, Safepoint::kLazyDeopt);
3351 ParameterCount actual(rax);
3352 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3356 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3357 LOperand* argument = instr->value();
3358 EmitPushTaggedOperand(argument);
3362 void LCodeGen::DoDrop(LDrop* instr) {
3363 __ Drop(instr->count());
3367 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3368 Register result = ToRegister(instr->result());
3369 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3373 void LCodeGen::DoContext(LContext* instr) {
3374 Register result = ToRegister(instr->result());
3375 if (info()->IsOptimizing()) {
3376 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3378 // If there is no frame, the context must be in rsi.
3379 DCHECK(result.is(rsi));
3384 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3385 DCHECK(ToRegister(instr->context()).is(rsi));
3386 __ Push(instr->hydrogen()->pairs());
3387 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3388 CallRuntime(Runtime::kDeclareGlobals, 2, instr);
3392 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3393 int formal_parameter_count, int arity,
3394 LInstruction* instr) {
3395 bool dont_adapt_arguments =
3396 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3397 bool can_invoke_directly =
3398 dont_adapt_arguments || formal_parameter_count == arity;
3400 Register function_reg = rdi;
3401 LPointerMap* pointers = instr->pointer_map();
3403 if (can_invoke_directly) {
3405 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3407 // Always initialize rax to the number of actual arguments.
3411 if (function.is_identical_to(info()->closure())) {
3414 __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3417 // Set up deoptimization.
3418 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3420 // We need to adapt arguments.
3421 SafepointGenerator generator(
3422 this, pointers, Safepoint::kLazyDeopt);
3423 ParameterCount count(arity);
3424 ParameterCount expected(formal_parameter_count);
3425 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3430 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3431 DCHECK(ToRegister(instr->result()).is(rax));
3433 if (instr->hydrogen()->IsTailCall()) {
3434 if (NeedsEagerFrame()) __ leave();
3436 if (instr->target()->IsConstantOperand()) {
3437 LConstantOperand* target = LConstantOperand::cast(instr->target());
3438 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3439 __ jmp(code, RelocInfo::CODE_TARGET);
3441 DCHECK(instr->target()->IsRegister());
3442 Register target = ToRegister(instr->target());
3443 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3447 LPointerMap* pointers = instr->pointer_map();
3448 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3450 if (instr->target()->IsConstantOperand()) {
3451 LConstantOperand* target = LConstantOperand::cast(instr->target());
3452 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3453 generator.BeforeCall(__ CallSize(code));
3454 __ call(code, RelocInfo::CODE_TARGET);
3456 DCHECK(instr->target()->IsRegister());
3457 Register target = ToRegister(instr->target());
3458 generator.BeforeCall(__ CallSize(target));
3459 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3462 generator.AfterCall();
3467 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3468 DCHECK(ToRegister(instr->function()).is(rdi));
3469 DCHECK(ToRegister(instr->result()).is(rax));
3471 __ Set(rax, instr->arity());
3474 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3476 LPointerMap* pointers = instr->pointer_map();
3477 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3479 bool is_self_call = false;
3480 if (instr->hydrogen()->function()->IsConstant()) {
3481 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3482 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3483 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3484 is_self_call = jsfun.is_identical_to(info()->closure());
3490 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3491 generator.BeforeCall(__ CallSize(target));
3494 generator.AfterCall();
3498 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3499 Register input_reg = ToRegister(instr->value());
3500 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3501 Heap::kHeapNumberMapRootIndex);
3502 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3504 Label slow, allocated, done;
3505 Register tmp = input_reg.is(rax) ? rcx : rax;
3506 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3508 // Preserve the value of all registers.
3509 PushSafepointRegistersScope scope(this);
3511 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3512 // Check the sign of the argument. If the argument is positive, just
3513 // return it. We do not need to patch the stack since |input| and
3514 // |result| are the same register and |input| will be restored
3515 // unchanged by popping safepoint registers.
3516 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3519 __ AllocateHeapNumber(tmp, tmp2, &slow);
3520 __ jmp(&allocated, Label::kNear);
3522 // Slow case: Call the runtime system to do the number allocation.
3524 CallRuntimeFromDeferred(
3525 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3526 // Set the pointer to the new heap number in tmp.
3527 if (!tmp.is(rax)) __ movp(tmp, rax);
3528 // Restore input_reg after call to runtime.
3529 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3531 __ bind(&allocated);
3532 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3533 __ shlq(tmp2, Immediate(1));
3534 __ shrq(tmp2, Immediate(1));
3535 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3536 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3542 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3543 Register input_reg = ToRegister(instr->value());
3544 __ testl(input_reg, input_reg);
3546 __ j(not_sign, &is_positive, Label::kNear);
3547 __ negl(input_reg); // Sets flags.
3548 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3549 __ bind(&is_positive);
3553 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3554 Register input_reg = ToRegister(instr->value());
3555 __ testp(input_reg, input_reg);
3557 __ j(not_sign, &is_positive, Label::kNear);
3558 __ negp(input_reg); // Sets flags.
3559 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3560 __ bind(&is_positive);
3564 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3565 // Class for deferred case.
3566 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3568 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3569 : LDeferredCode(codegen), instr_(instr) { }
3570 void Generate() override {
3571 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3573 LInstruction* instr() override { return instr_; }
3579 DCHECK(instr->value()->Equals(instr->result()));
3580 Representation r = instr->hydrogen()->value()->representation();
3583 XMMRegister scratch = double_scratch0();
3584 XMMRegister input_reg = ToDoubleRegister(instr->value());
3585 __ xorps(scratch, scratch);
3586 __ subsd(scratch, input_reg);
3587 __ andps(input_reg, scratch);
3588 } else if (r.IsInteger32()) {
3589 EmitIntegerMathAbs(instr);
3590 } else if (r.IsSmi()) {
3591 EmitSmiMathAbs(instr);
3592 } else { // Tagged case.
3593 DeferredMathAbsTaggedHeapNumber* deferred =
3594 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3595 Register input_reg = ToRegister(instr->value());
3597 __ JumpIfNotSmi(input_reg, deferred->entry());
3598 EmitSmiMathAbs(instr);
3599 __ bind(deferred->exit());
3604 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3605 XMMRegister xmm_scratch = double_scratch0();
3606 Register output_reg = ToRegister(instr->result());
3607 XMMRegister input_reg = ToDoubleRegister(instr->value());
3609 if (CpuFeatures::IsSupported(SSE4_1)) {
3610 CpuFeatureScope scope(masm(), SSE4_1);
3611 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3612 // Deoptimize if minus zero.
3613 __ movq(output_reg, input_reg);
3614 __ subq(output_reg, Immediate(1));
3615 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
3617 __ roundsd(xmm_scratch, input_reg, kRoundDown);
3618 __ cvttsd2si(output_reg, xmm_scratch);
3619 __ cmpl(output_reg, Immediate(0x1));
3620 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3622 Label negative_sign, done;
3623 // Deoptimize on unordered.
3624 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3625 __ ucomisd(input_reg, xmm_scratch);
3626 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3627 __ j(below, &negative_sign, Label::kNear);
3629 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3630 // Check for negative zero.
3631 Label positive_sign;
3632 __ j(above, &positive_sign, Label::kNear);
3633 __ movmskpd(output_reg, input_reg);
3634 __ testq(output_reg, Immediate(1));
3635 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3636 __ Set(output_reg, 0);
3638 __ bind(&positive_sign);
3641 // Use truncating instruction (OK because input is positive).
3642 __ cvttsd2si(output_reg, input_reg);
3643 // Overflow is signalled with minint.
3644 __ cmpl(output_reg, Immediate(0x1));
3645 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3646 __ jmp(&done, Label::kNear);
3648 // Non-zero negative reaches here.
3649 __ bind(&negative_sign);
3650 // Truncate, then compare and compensate.
3651 __ cvttsd2si(output_reg, input_reg);
3652 __ Cvtlsi2sd(xmm_scratch, output_reg);
3653 __ ucomisd(input_reg, xmm_scratch);
3654 __ j(equal, &done, Label::kNear);
3655 __ subl(output_reg, Immediate(1));
3656 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3663 void LCodeGen::DoMathRound(LMathRound* instr) {
3664 const XMMRegister xmm_scratch = double_scratch0();
3665 Register output_reg = ToRegister(instr->result());
3666 XMMRegister input_reg = ToDoubleRegister(instr->value());
3667 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3668 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3669 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3671 Label done, round_to_zero, below_one_half;
3672 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3673 __ movq(kScratchRegister, one_half);
3674 __ movq(xmm_scratch, kScratchRegister);
3675 __ ucomisd(xmm_scratch, input_reg);
3676 __ j(above, &below_one_half, Label::kNear);
3678 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3679 __ addsd(xmm_scratch, input_reg);
3680 __ cvttsd2si(output_reg, xmm_scratch);
3681 // Overflow is signalled with minint.
3682 __ cmpl(output_reg, Immediate(0x1));
3683 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3684 __ jmp(&done, dist);
3686 __ bind(&below_one_half);
3687 __ movq(kScratchRegister, minus_one_half);
3688 __ movq(xmm_scratch, kScratchRegister);
3689 __ ucomisd(xmm_scratch, input_reg);
3690 __ j(below_equal, &round_to_zero, Label::kNear);
3692 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3693 // compare and compensate.
3694 __ movq(input_temp, input_reg); // Do not alter input_reg.
3695 __ subsd(input_temp, xmm_scratch);
3696 __ cvttsd2si(output_reg, input_temp);
3697 // Catch minint due to overflow, and to prevent overflow when compensating.
3698 __ cmpl(output_reg, Immediate(0x1));
3699 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3701 __ Cvtlsi2sd(xmm_scratch, output_reg);
3702 __ ucomisd(xmm_scratch, input_temp);
3703 __ j(equal, &done, dist);
3704 __ subl(output_reg, Immediate(1));
3705 // No overflow because we already ruled out minint.
3706 __ jmp(&done, dist);
3708 __ bind(&round_to_zero);
3709 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3710 // we can ignore the difference between a result of -0 and +0.
3711 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3712 __ movq(output_reg, input_reg);
3713 __ testq(output_reg, output_reg);
3714 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
3716 __ Set(output_reg, 0);
3721 void LCodeGen::DoMathFround(LMathFround* instr) {
3722 XMMRegister input_reg = ToDoubleRegister(instr->value());
3723 XMMRegister output_reg = ToDoubleRegister(instr->result());
3724 __ cvtsd2ss(output_reg, input_reg);
3725 __ cvtss2sd(output_reg, output_reg);
3729 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3730 XMMRegister output = ToDoubleRegister(instr->result());
3731 if (instr->value()->IsDoubleRegister()) {
3732 XMMRegister input = ToDoubleRegister(instr->value());
3733 __ sqrtsd(output, input);
3735 Operand input = ToOperand(instr->value());
3736 __ sqrtsd(output, input);
3741 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3742 XMMRegister xmm_scratch = double_scratch0();
3743 XMMRegister input_reg = ToDoubleRegister(instr->value());
3744 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3746 // Note that according to ECMA-262 15.8.2.13:
3747 // Math.pow(-Infinity, 0.5) == Infinity
3748 // Math.sqrt(-Infinity) == NaN
3750 // Check base for -Infinity. According to IEEE-754, double-precision
3751 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3752 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3753 __ movq(xmm_scratch, kScratchRegister);
3754 __ ucomisd(xmm_scratch, input_reg);
3755 // Comparing -Infinity with NaN results in "unordered", which sets the
3756 // zero flag as if both were equal. However, it also sets the carry flag.
3757 __ j(not_equal, &sqrt, Label::kNear);
3758 __ j(carry, &sqrt, Label::kNear);
3759 // If input is -Infinity, return Infinity.
3760 __ xorps(input_reg, input_reg);
3761 __ subsd(input_reg, xmm_scratch);
3762 __ jmp(&done, Label::kNear);
3766 __ xorps(xmm_scratch, xmm_scratch);
3767 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3768 __ sqrtsd(input_reg, input_reg);
3773 void LCodeGen::DoPower(LPower* instr) {
3774 Representation exponent_type = instr->hydrogen()->right()->representation();
3775 // Having marked this as a call, we can use any registers.
3776 // Just make sure that the input/output registers are the expected ones.
3778 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3779 DCHECK(!instr->right()->IsRegister() ||
3780 ToRegister(instr->right()).is(tagged_exponent));
3781 DCHECK(!instr->right()->IsDoubleRegister() ||
3782 ToDoubleRegister(instr->right()).is(xmm1));
3783 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3784 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3786 if (exponent_type.IsSmi()) {
3787 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3789 } else if (exponent_type.IsTagged()) {
3791 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3792 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3793 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3795 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3797 } else if (exponent_type.IsInteger32()) {
3798 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3801 DCHECK(exponent_type.IsDouble());
3802 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3808 void LCodeGen::DoMathExp(LMathExp* instr) {
3809 XMMRegister input = ToDoubleRegister(instr->value());
3810 XMMRegister result = ToDoubleRegister(instr->result());
3811 XMMRegister temp0 = double_scratch0();
3812 Register temp1 = ToRegister(instr->temp1());
3813 Register temp2 = ToRegister(instr->temp2());
3815 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3819 void LCodeGen::DoMathLog(LMathLog* instr) {
3820 DCHECK(instr->value()->Equals(instr->result()));
3821 XMMRegister input_reg = ToDoubleRegister(instr->value());
3822 XMMRegister xmm_scratch = double_scratch0();
3823 Label positive, done, zero;
3824 __ xorps(xmm_scratch, xmm_scratch);
3825 __ ucomisd(input_reg, xmm_scratch);
3826 __ j(above, &positive, Label::kNear);
3827 __ j(not_carry, &zero, Label::kNear);
3828 __ pcmpeqd(input_reg, input_reg);
3829 __ jmp(&done, Label::kNear);
3831 ExternalReference ninf =
3832 ExternalReference::address_of_negative_infinity();
3833 Operand ninf_operand = masm()->ExternalOperand(ninf);
3834 __ movsd(input_reg, ninf_operand);
3835 __ jmp(&done, Label::kNear);
3838 __ subp(rsp, Immediate(kDoubleSize));
3839 __ movsd(Operand(rsp, 0), input_reg);
3840 __ fld_d(Operand(rsp, 0));
3842 __ fstp_d(Operand(rsp, 0));
3843 __ movsd(input_reg, Operand(rsp, 0));
3844 __ addp(rsp, Immediate(kDoubleSize));
3849 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3850 Register input = ToRegister(instr->value());
3851 Register result = ToRegister(instr->result());
3853 __ Lzcntl(result, input);
3857 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3858 DCHECK(ToRegister(instr->context()).is(rsi));
3859 DCHECK(ToRegister(instr->function()).is(rdi));
3860 DCHECK(instr->HasPointerMap());
3862 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3863 if (known_function.is_null()) {
3864 LPointerMap* pointers = instr->pointer_map();
3865 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3866 ParameterCount count(instr->arity());
3867 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3869 CallKnownFunction(known_function,
3870 instr->hydrogen()->formal_parameter_count(),
3871 instr->arity(), instr);
3876 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3877 DCHECK(ToRegister(instr->context()).is(rsi));
3878 DCHECK(ToRegister(instr->function()).is(rdi));
3879 DCHECK(ToRegister(instr->result()).is(rax));
3881 int arity = instr->arity();
3882 CallFunctionFlags flags = instr->hydrogen()->function_flags();
3883 if (instr->hydrogen()->HasVectorAndSlot()) {
3884 Register slot_register = ToRegister(instr->temp_slot());
3885 Register vector_register = ToRegister(instr->temp_vector());
3886 DCHECK(slot_register.is(rdx));
3887 DCHECK(vector_register.is(rbx));
3889 AllowDeferredHandleDereference vector_structure_check;
3890 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3891 int index = vector->GetIndex(instr->hydrogen()->slot());
3893 __ Move(vector_register, vector);
3894 __ Move(slot_register, Smi::FromInt(index));
3896 CallICState::CallType call_type =
3897 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
3900 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
3901 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3903 CallFunctionStub stub(isolate(), arity, flags);
3904 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3909 void LCodeGen::DoCallNew(LCallNew* instr) {
3910 DCHECK(ToRegister(instr->context()).is(rsi));
3911 DCHECK(ToRegister(instr->constructor()).is(rdi));
3912 DCHECK(ToRegister(instr->result()).is(rax));
3914 __ Set(rax, instr->arity());
3915 // No cell in ebx for construct type feedback in optimized code
3916 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3917 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
3918 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3922 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3923 DCHECK(ToRegister(instr->context()).is(rsi));
3924 DCHECK(ToRegister(instr->constructor()).is(rdi));
3925 DCHECK(ToRegister(instr->result()).is(rax));
3927 __ Set(rax, instr->arity());
3928 if (instr->arity() == 1) {
3929 // We only need the allocation site for the case we have a length argument.
3930 // The case may bail out to the runtime, which will determine the correct
3931 // elements kind with the site.
3932 __ Move(rbx, instr->hydrogen()->site());
3934 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3937 ElementsKind kind = instr->hydrogen()->elements_kind();
3938 AllocationSiteOverrideMode override_mode =
3939 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3940 ? DISABLE_ALLOCATION_SITES
3943 if (instr->arity() == 0) {
3944 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3945 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3946 } else if (instr->arity() == 1) {
3948 if (IsFastPackedElementsKind(kind)) {
3950 // We might need a change here
3951 // look at the first argument
3952 __ movp(rcx, Operand(rsp, 0));
3954 __ j(zero, &packed_case, Label::kNear);
3956 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3957 ArraySingleArgumentConstructorStub stub(isolate(),
3960 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3961 __ jmp(&done, Label::kNear);
3962 __ bind(&packed_case);
3965 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3966 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3969 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3970 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3975 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3976 DCHECK(ToRegister(instr->context()).is(rsi));
3977 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3981 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3982 Register function = ToRegister(instr->function());
3983 Register code_object = ToRegister(instr->code_object());
3984 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
3985 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3989 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3990 Register result = ToRegister(instr->result());
3991 Register base = ToRegister(instr->base_object());
3992 if (instr->offset()->IsConstantOperand()) {
3993 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3994 __ leap(result, Operand(base, ToInteger32(offset)));
3996 Register offset = ToRegister(instr->offset());
3997 __ leap(result, Operand(base, offset, times_1, 0));
4002 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4003 HStoreNamedField* hinstr = instr->hydrogen();
4004 Representation representation = instr->representation();
4006 HObjectAccess access = hinstr->access();
4007 int offset = access.offset();
4009 if (access.IsExternalMemory()) {
4010 DCHECK(!hinstr->NeedsWriteBarrier());
4011 Register value = ToRegister(instr->value());
4012 if (instr->object()->IsConstantOperand()) {
4013 DCHECK(value.is(rax));
4014 LConstantOperand* object = LConstantOperand::cast(instr->object());
4015 __ store_rax(ToExternalReference(object));
4017 Register object = ToRegister(instr->object());
4018 __ Store(MemOperand(object, offset), value, representation);
4023 Register object = ToRegister(instr->object());
4024 __ AssertNotSmi(object);
4026 DCHECK(!representation.IsSmi() ||
4027 !instr->value()->IsConstantOperand() ||
4028 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4029 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4030 DCHECK(access.IsInobject());
4031 DCHECK(!hinstr->has_transition());
4032 DCHECK(!hinstr->NeedsWriteBarrier());
4033 XMMRegister value = ToDoubleRegister(instr->value());
4034 __ movsd(FieldOperand(object, offset), value);
4038 if (hinstr->has_transition()) {
4039 Handle<Map> transition = hinstr->transition_map();
4040 AddDeprecationDependency(transition);
4041 if (!hinstr->NeedsWriteBarrierForMap()) {
4042 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4044 Register temp = ToRegister(instr->temp());
4045 __ Move(kScratchRegister, transition);
4046 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4047 // Update the write barrier for the map field.
4048 __ RecordWriteForMap(object,
4056 Register write_register = object;
4057 if (!access.IsInobject()) {
4058 write_register = ToRegister(instr->temp());
4059 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4062 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4063 hinstr->value()->representation().IsInteger32()) {
4064 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4065 if (FLAG_debug_code) {
4066 Register scratch = kScratchRegister;
4067 __ Load(scratch, FieldOperand(write_register, offset), representation);
4068 __ AssertSmi(scratch);
4070 // Store int value directly to upper half of the smi.
4071 STATIC_ASSERT(kSmiTag == 0);
4072 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4073 offset += kPointerSize / 2;
4074 representation = Representation::Integer32();
4077 Operand operand = FieldOperand(write_register, offset);
4079 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4080 DCHECK(access.IsInobject());
4081 XMMRegister value = ToDoubleRegister(instr->value());
4082 __ movsd(operand, value);
4084 } else if (instr->value()->IsRegister()) {
4085 Register value = ToRegister(instr->value());
4086 __ Store(operand, value, representation);
4088 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4089 if (IsInteger32Constant(operand_value)) {
4090 DCHECK(!hinstr->NeedsWriteBarrier());
4091 int32_t value = ToInteger32(operand_value);
4092 if (representation.IsSmi()) {
4093 __ Move(operand, Smi::FromInt(value));
4096 __ movl(operand, Immediate(value));
4099 } else if (IsExternalConstant(operand_value)) {
4100 DCHECK(!hinstr->NeedsWriteBarrier());
4101 ExternalReference ptr = ToExternalReference(operand_value);
4102 __ Move(kScratchRegister, ptr);
4103 __ movp(operand, kScratchRegister);
4105 Handle<Object> handle_value = ToHandle(operand_value);
4106 DCHECK(!hinstr->NeedsWriteBarrier());
4107 __ Move(operand, handle_value);
4111 if (hinstr->NeedsWriteBarrier()) {
4112 Register value = ToRegister(instr->value());
4113 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4114 // Update the write barrier for the object for in-object properties.
4115 __ RecordWriteField(write_register,
4120 EMIT_REMEMBERED_SET,
4121 hinstr->SmiCheckForWriteBarrier(),
4122 hinstr->PointersToHereCheckForValue());
4127 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4128 DCHECK(ToRegister(instr->context()).is(rsi));
4129 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4130 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4132 if (instr->hydrogen()->HasVectorAndSlot()) {
4133 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4136 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
4137 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4138 isolate(), instr->language_mode(),
4139 instr->hydrogen()->initialization_state()).code();
4140 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4144 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4145 DCHECK(ToRegister(instr->context()).is(rsi));
4146 DCHECK(ToRegister(instr->value())
4147 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4148 int const slot = instr->slot_index();
4149 int const depth = instr->depth();
4150 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4151 __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
4152 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4153 isolate(), depth, instr->language_mode())
4155 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4157 __ Push(Smi::FromInt(slot));
4158 __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
4159 __ CallRuntime(is_strict(instr->language_mode())
4160 ? Runtime::kStoreGlobalViaContext_Strict
4161 : Runtime::kStoreGlobalViaContext_Sloppy,
4167 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4168 Representation representation = instr->hydrogen()->length()->representation();
4169 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4170 DCHECK(representation.IsSmiOrInteger32());
4172 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4173 if (instr->length()->IsConstantOperand()) {
4174 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4175 Register index = ToRegister(instr->index());
4176 if (representation.IsSmi()) {
4177 __ Cmp(index, Smi::FromInt(length));
4179 __ cmpl(index, Immediate(length));
4181 cc = CommuteCondition(cc);
4182 } else if (instr->index()->IsConstantOperand()) {
4183 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4184 if (instr->length()->IsRegister()) {
4185 Register length = ToRegister(instr->length());
4186 if (representation.IsSmi()) {
4187 __ Cmp(length, Smi::FromInt(index));
4189 __ cmpl(length, Immediate(index));
4192 Operand length = ToOperand(instr->length());
4193 if (representation.IsSmi()) {
4194 __ Cmp(length, Smi::FromInt(index));
4196 __ cmpl(length, Immediate(index));
4200 Register index = ToRegister(instr->index());
4201 if (instr->length()->IsRegister()) {
4202 Register length = ToRegister(instr->length());
4203 if (representation.IsSmi()) {
4204 __ cmpp(length, index);
4206 __ cmpl(length, index);
4209 Operand length = ToOperand(instr->length());
4210 if (representation.IsSmi()) {
4211 __ cmpp(length, index);
4213 __ cmpl(length, index);
4217 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4219 __ j(NegateCondition(cc), &done, Label::kNear);
4223 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4228 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4229 ElementsKind elements_kind = instr->elements_kind();
4230 LOperand* key = instr->key();
4231 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4232 Register key_reg = ToRegister(key);
4233 Representation key_representation =
4234 instr->hydrogen()->key()->representation();
4235 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4236 __ SmiToInteger64(key_reg, key_reg);
4237 } else if (instr->hydrogen()->IsDehoisted()) {
4238 // Sign extend key because it could be a 32 bit negative value
4239 // and the dehoisted address computation happens in 64 bits
4240 __ movsxlq(key_reg, key_reg);
4243 Operand operand(BuildFastArrayOperand(
4246 instr->hydrogen()->key()->representation(),
4248 instr->base_offset()));
4250 if (elements_kind == FLOAT32_ELEMENTS) {
4251 XMMRegister value(ToDoubleRegister(instr->value()));
4252 __ cvtsd2ss(value, value);
4253 __ movss(operand, value);
4254 } else if (elements_kind == FLOAT64_ELEMENTS) {
4255 __ movsd(operand, ToDoubleRegister(instr->value()));
4257 Register value(ToRegister(instr->value()));
4258 switch (elements_kind) {
4260 case UINT8_ELEMENTS:
4261 case UINT8_CLAMPED_ELEMENTS:
4262 __ movb(operand, value);
4264 case INT16_ELEMENTS:
4265 case UINT16_ELEMENTS:
4266 __ movw(operand, value);
4268 case INT32_ELEMENTS:
4269 case UINT32_ELEMENTS:
4270 __ movl(operand, value);
4272 case FLOAT32_ELEMENTS:
4273 case FLOAT64_ELEMENTS:
4275 case FAST_SMI_ELEMENTS:
4276 case FAST_DOUBLE_ELEMENTS:
4277 case FAST_HOLEY_ELEMENTS:
4278 case FAST_HOLEY_SMI_ELEMENTS:
4279 case FAST_HOLEY_DOUBLE_ELEMENTS:
4280 case DICTIONARY_ELEMENTS:
4281 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4282 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4290 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4291 XMMRegister value = ToDoubleRegister(instr->value());
4292 LOperand* key = instr->key();
4293 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4294 instr->hydrogen()->IsDehoisted()) {
4295 // Sign extend key because it could be a 32 bit negative value
4296 // and the dehoisted address computation happens in 64 bits
4297 __ movsxlq(ToRegister(key), ToRegister(key));
4299 if (instr->NeedsCanonicalization()) {
4300 XMMRegister xmm_scratch = double_scratch0();
4301 // Turn potential sNaN value into qNaN.
4302 __ xorps(xmm_scratch, xmm_scratch);
4303 __ subsd(value, xmm_scratch);
4306 Operand double_store_operand = BuildFastArrayOperand(
4309 instr->hydrogen()->key()->representation(),
4310 FAST_DOUBLE_ELEMENTS,
4311 instr->base_offset());
4313 __ movsd(double_store_operand, value);
4317 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4318 HStoreKeyed* hinstr = instr->hydrogen();
4319 LOperand* key = instr->key();
4320 int offset = instr->base_offset();
4321 Representation representation = hinstr->value()->representation();
4323 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4324 instr->hydrogen()->IsDehoisted()) {
4325 // Sign extend key because it could be a 32 bit negative value
4326 // and the dehoisted address computation happens in 64 bits
4327 __ movsxlq(ToRegister(key), ToRegister(key));
4329 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4330 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4331 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4332 if (FLAG_debug_code) {
4333 Register scratch = kScratchRegister;
4335 BuildFastArrayOperand(instr->elements(),
4337 instr->hydrogen()->key()->representation(),
4340 Representation::Smi());
4341 __ AssertSmi(scratch);
4343 // Store int value directly to upper half of the smi.
4344 STATIC_ASSERT(kSmiTag == 0);
4345 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4346 offset += kPointerSize / 2;
4350 BuildFastArrayOperand(instr->elements(),
4352 instr->hydrogen()->key()->representation(),
4355 if (instr->value()->IsRegister()) {
4356 __ Store(operand, ToRegister(instr->value()), representation);
4358 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4359 if (IsInteger32Constant(operand_value)) {
4360 int32_t value = ToInteger32(operand_value);
4361 if (representation.IsSmi()) {
4362 __ Move(operand, Smi::FromInt(value));
4365 __ movl(operand, Immediate(value));
4368 Handle<Object> handle_value = ToHandle(operand_value);
4369 __ Move(operand, handle_value);
4373 if (hinstr->NeedsWriteBarrier()) {
4374 Register elements = ToRegister(instr->elements());
4375 DCHECK(instr->value()->IsRegister());
4376 Register value = ToRegister(instr->value());
4377 DCHECK(!key->IsConstantOperand());
4378 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4379 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4380 // Compute address of modified element and store it into key register.
4381 Register key_reg(ToRegister(key));
4382 __ leap(key_reg, operand);
4383 __ RecordWrite(elements,
4387 EMIT_REMEMBERED_SET,
4389 hinstr->PointersToHereCheckForValue());
4394 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4395 if (instr->is_fixed_typed_array()) {
4396 DoStoreKeyedExternalArray(instr);
4397 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4398 DoStoreKeyedFixedDoubleArray(instr);
4400 DoStoreKeyedFixedArray(instr);
4405 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4406 DCHECK(ToRegister(instr->context()).is(rsi));
4407 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4408 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4409 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4411 if (instr->hydrogen()->HasVectorAndSlot()) {
4412 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4415 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4416 isolate(), instr->language_mode(),
4417 instr->hydrogen()->initialization_state()).code();
4418 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4422 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4423 class DeferredMaybeGrowElements final : public LDeferredCode {
4425 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4426 : LDeferredCode(codegen), instr_(instr) {}
4427 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4428 LInstruction* instr() override { return instr_; }
4431 LMaybeGrowElements* instr_;
4434 Register result = rax;
4435 DeferredMaybeGrowElements* deferred =
4436 new (zone()) DeferredMaybeGrowElements(this, instr);
4437 LOperand* key = instr->key();
4438 LOperand* current_capacity = instr->current_capacity();
4440 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4441 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4442 DCHECK(key->IsConstantOperand() || key->IsRegister());
4443 DCHECK(current_capacity->IsConstantOperand() ||
4444 current_capacity->IsRegister());
4446 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4447 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4448 int32_t constant_capacity =
4449 ToInteger32(LConstantOperand::cast(current_capacity));
4450 if (constant_key >= constant_capacity) {
4452 __ jmp(deferred->entry());
4454 } else if (key->IsConstantOperand()) {
4455 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4456 __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
4457 __ j(less_equal, deferred->entry());
4458 } else if (current_capacity->IsConstantOperand()) {
4459 int32_t constant_capacity =
4460 ToInteger32(LConstantOperand::cast(current_capacity));
4461 __ cmpl(ToRegister(key), Immediate(constant_capacity));
4462 __ j(greater_equal, deferred->entry());
4464 __ cmpl(ToRegister(key), ToRegister(current_capacity));
4465 __ j(greater_equal, deferred->entry());
4468 if (instr->elements()->IsRegister()) {
4469 __ movp(result, ToRegister(instr->elements()));
4471 __ movp(result, ToOperand(instr->elements()));
4474 __ bind(deferred->exit());
4478 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4479 // TODO(3095996): Get rid of this. For now, we need to make the
4480 // result register contain a valid pointer because it is already
4481 // contained in the register pointer map.
4482 Register result = rax;
4483 __ Move(result, Smi::FromInt(0));
4485 // We have to call a stub.
4487 PushSafepointRegistersScope scope(this);
4488 if (instr->object()->IsConstantOperand()) {
4489 LConstantOperand* constant_object =
4490 LConstantOperand::cast(instr->object());
4491 if (IsSmiConstant(constant_object)) {
4492 Smi* immediate = ToSmi(constant_object);
4493 __ Move(result, immediate);
4495 Handle<Object> handle_value = ToHandle(constant_object);
4496 __ Move(result, handle_value);
4498 } else if (instr->object()->IsRegister()) {
4499 __ Move(result, ToRegister(instr->object()));
4501 __ movp(result, ToOperand(instr->object()));
4504 LOperand* key = instr->key();
4505 if (key->IsConstantOperand()) {
4506 __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
4508 __ Move(rbx, ToRegister(key));
4509 __ Integer32ToSmi(rbx, rbx);
4512 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4513 instr->hydrogen()->kind());
4515 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4516 __ StoreToSafepointRegisterSlot(result, result);
4519 // Deopt on smi, which means the elements array changed to dictionary mode.
4520 Condition is_smi = __ CheckSmi(result);
4521 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
4525 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4526 Register object_reg = ToRegister(instr->object());
4528 Handle<Map> from_map = instr->original_map();
4529 Handle<Map> to_map = instr->transitioned_map();
4530 ElementsKind from_kind = instr->from_kind();
4531 ElementsKind to_kind = instr->to_kind();
4533 Label not_applicable;
4534 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4535 __ j(not_equal, ¬_applicable);
4536 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4537 Register new_map_reg = ToRegister(instr->new_map_temp());
4538 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4539 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4541 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4544 DCHECK(object_reg.is(rax));
4545 DCHECK(ToRegister(instr->context()).is(rsi));
4546 PushSafepointRegistersScope scope(this);
4547 __ Move(rbx, to_map);
4548 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4549 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4551 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4553 __ bind(¬_applicable);
4557 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4558 Register object = ToRegister(instr->object());
4559 Register temp = ToRegister(instr->temp());
4560 Label no_memento_found;
4561 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4562 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4563 __ bind(&no_memento_found);
4567 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4568 DCHECK(ToRegister(instr->context()).is(rsi));
4569 DCHECK(ToRegister(instr->left()).is(rdx));
4570 DCHECK(ToRegister(instr->right()).is(rax));
4571 StringAddStub stub(isolate(),
4572 instr->hydrogen()->flags(),
4573 instr->hydrogen()->pretenure_flag());
4574 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4578 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4579 class DeferredStringCharCodeAt final : public LDeferredCode {
4581 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4582 : LDeferredCode(codegen), instr_(instr) { }
4583 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4584 LInstruction* instr() override { return instr_; }
4587 LStringCharCodeAt* instr_;
4590 DeferredStringCharCodeAt* deferred =
4591 new(zone()) DeferredStringCharCodeAt(this, instr);
4593 StringCharLoadGenerator::Generate(masm(),
4594 ToRegister(instr->string()),
4595 ToRegister(instr->index()),
4596 ToRegister(instr->result()),
4598 __ bind(deferred->exit());
4602 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4603 Register string = ToRegister(instr->string());
4604 Register result = ToRegister(instr->result());
4606 // TODO(3095996): Get rid of this. For now, we need to make the
4607 // result register contain a valid pointer because it is already
4608 // contained in the register pointer map.
4611 PushSafepointRegistersScope scope(this);
4613 // Push the index as a smi. This is safe because of the checks in
4614 // DoStringCharCodeAt above.
4615 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4616 if (instr->index()->IsConstantOperand()) {
4617 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4618 __ Push(Smi::FromInt(const_index));
4620 Register index = ToRegister(instr->index());
4621 __ Integer32ToSmi(index, index);
4624 CallRuntimeFromDeferred(
4625 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4627 __ SmiToInteger32(rax, rax);
4628 __ StoreToSafepointRegisterSlot(result, rax);
4632 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4633 class DeferredStringCharFromCode final : public LDeferredCode {
4635 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4636 : LDeferredCode(codegen), instr_(instr) { }
4637 void Generate() override {
4638 codegen()->DoDeferredStringCharFromCode(instr_);
4640 LInstruction* instr() override { return instr_; }
4643 LStringCharFromCode* instr_;
4646 DeferredStringCharFromCode* deferred =
4647 new(zone()) DeferredStringCharFromCode(this, instr);
4649 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4650 Register char_code = ToRegister(instr->char_code());
4651 Register result = ToRegister(instr->result());
4652 DCHECK(!char_code.is(result));
4654 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4655 __ j(above, deferred->entry());
4656 __ movsxlq(char_code, char_code);
4657 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4658 __ movp(result, FieldOperand(result,
4659 char_code, times_pointer_size,
4660 FixedArray::kHeaderSize));
4661 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4662 __ j(equal, deferred->entry());
4663 __ bind(deferred->exit());
4667 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4668 Register char_code = ToRegister(instr->char_code());
4669 Register result = ToRegister(instr->result());
4671 // TODO(3095996): Get rid of this. For now, we need to make the
4672 // result register contain a valid pointer because it is already
4673 // contained in the register pointer map.
4676 PushSafepointRegistersScope scope(this);
4677 __ Integer32ToSmi(char_code, char_code);
4679 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4680 __ StoreToSafepointRegisterSlot(result, rax);
4684 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4685 LOperand* input = instr->value();
4686 DCHECK(input->IsRegister() || input->IsStackSlot());
4687 LOperand* output = instr->result();
4688 DCHECK(output->IsDoubleRegister());
4689 if (input->IsRegister()) {
4690 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4692 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4697 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4698 LOperand* input = instr->value();
4699 LOperand* output = instr->result();
4701 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4705 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4706 class DeferredNumberTagI final : public LDeferredCode {
4708 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4709 : LDeferredCode(codegen), instr_(instr) { }
4710 void Generate() override {
4711 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4712 instr_->temp2(), SIGNED_INT32);
4714 LInstruction* instr() override { return instr_; }
4717 LNumberTagI* instr_;
4720 LOperand* input = instr->value();
4721 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4722 Register reg = ToRegister(input);
4724 if (SmiValuesAre32Bits()) {
4725 __ Integer32ToSmi(reg, reg);
4727 DCHECK(SmiValuesAre31Bits());
4728 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4729 __ Integer32ToSmi(reg, reg);
4730 __ j(overflow, deferred->entry());
4731 __ bind(deferred->exit());
4736 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4737 class DeferredNumberTagU final : public LDeferredCode {
4739 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4740 : LDeferredCode(codegen), instr_(instr) { }
4741 void Generate() override {
4742 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4743 instr_->temp2(), UNSIGNED_INT32);
4745 LInstruction* instr() override { return instr_; }
4748 LNumberTagU* instr_;
4751 LOperand* input = instr->value();
4752 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4753 Register reg = ToRegister(input);
4755 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4756 __ cmpl(reg, Immediate(Smi::kMaxValue));
4757 __ j(above, deferred->entry());
4758 __ Integer32ToSmi(reg, reg);
4759 __ bind(deferred->exit());
4763 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4767 IntegerSignedness signedness) {
4769 Register reg = ToRegister(value);
4770 Register tmp = ToRegister(temp1);
4771 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4773 // Load value into temp_xmm which will be preserved across potential call to
4774 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4775 // XMM registers on x64).
4776 if (signedness == SIGNED_INT32) {
4777 DCHECK(SmiValuesAre31Bits());
4778 // There was overflow, so bits 30 and 31 of the original integer
4779 // disagree. Try to allocate a heap number in new space and store
4780 // the value in there. If that fails, call the runtime system.
4781 __ SmiToInteger32(reg, reg);
4782 __ xorl(reg, Immediate(0x80000000));
4783 __ cvtlsi2sd(temp_xmm, reg);
4785 DCHECK(signedness == UNSIGNED_INT32);
4786 __ LoadUint32(temp_xmm, reg);
4789 if (FLAG_inline_new) {
4790 __ AllocateHeapNumber(reg, tmp, &slow);
4791 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4794 // Slow case: Call the runtime system to do the number allocation.
4797 // Put a valid pointer value in the stack slot where the result
4798 // register is stored, as this register is in the pointer map, but contains
4799 // an integer value.
4802 // Preserve the value of all registers.
4803 PushSafepointRegistersScope scope(this);
4805 // NumberTagIU uses the context from the frame, rather than
4806 // the environment's HContext or HInlinedContext value.
4807 // They only call Runtime::kAllocateHeapNumber.
4808 // The corresponding HChange instructions are added in a phase that does
4809 // not have easy access to the local context.
4810 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4811 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4812 RecordSafepointWithRegisters(
4813 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4814 __ StoreToSafepointRegisterSlot(reg, rax);
4817 // Done. Put the value in temp_xmm into the value of the allocated heap
4820 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4824 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4825 class DeferredNumberTagD final : public LDeferredCode {
4827 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4828 : LDeferredCode(codegen), instr_(instr) { }
4829 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4830 LInstruction* instr() override { return instr_; }
4833 LNumberTagD* instr_;
4836 XMMRegister input_reg = ToDoubleRegister(instr->value());
4837 Register reg = ToRegister(instr->result());
4838 Register tmp = ToRegister(instr->temp());
4840 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4841 if (FLAG_inline_new) {
4842 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4844 __ jmp(deferred->entry());
4846 __ bind(deferred->exit());
4847 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4851 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4852 // TODO(3095996): Get rid of this. For now, we need to make the
4853 // result register contain a valid pointer because it is already
4854 // contained in the register pointer map.
4855 Register reg = ToRegister(instr->result());
4856 __ Move(reg, Smi::FromInt(0));
4859 PushSafepointRegistersScope scope(this);
4860 // NumberTagD uses the context from the frame, rather than
4861 // the environment's HContext or HInlinedContext value.
4862 // They only call Runtime::kAllocateHeapNumber.
4863 // The corresponding HChange instructions are added in a phase that does
4864 // not have easy access to the local context.
4865 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4866 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4867 RecordSafepointWithRegisters(
4868 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4869 __ movp(kScratchRegister, rax);
4871 __ movp(reg, kScratchRegister);
4875 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4876 HChange* hchange = instr->hydrogen();
4877 Register input = ToRegister(instr->value());
4878 Register output = ToRegister(instr->result());
4879 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4880 hchange->value()->CheckFlag(HValue::kUint32)) {
4881 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4882 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
4884 __ Integer32ToSmi(output, input);
4885 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4886 !hchange->value()->CheckFlag(HValue::kUint32)) {
4887 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4892 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4893 DCHECK(instr->value()->Equals(instr->result()));
4894 Register input = ToRegister(instr->value());
4895 if (instr->needs_check()) {
4896 Condition is_smi = __ CheckSmi(input);
4897 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
4899 __ AssertSmi(input);
4901 __ SmiToInteger32(input, input);
4905 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4906 XMMRegister result_reg, NumberUntagDMode mode) {
4907 bool can_convert_undefined_to_nan =
4908 instr->hydrogen()->can_convert_undefined_to_nan();
4909 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4911 Label convert, load_smi, done;
4913 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4915 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4917 // Heap number map check.
4918 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4919 Heap::kHeapNumberMapRootIndex);
4921 // On x64 it is safe to load at heap number offset before evaluating the map
4922 // check, since all heap objects are at least two words long.
4923 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4925 if (can_convert_undefined_to_nan) {
4926 __ j(not_equal, &convert, Label::kNear);
4928 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4931 if (deoptimize_on_minus_zero) {
4932 XMMRegister xmm_scratch = double_scratch0();
4933 __ xorps(xmm_scratch, xmm_scratch);
4934 __ ucomisd(xmm_scratch, result_reg);
4935 __ j(not_equal, &done, Label::kNear);
4936 __ movmskpd(kScratchRegister, result_reg);
4937 __ testq(kScratchRegister, Immediate(1));
4938 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4940 __ jmp(&done, Label::kNear);
4942 if (can_convert_undefined_to_nan) {
4945 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4946 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4947 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4949 __ pcmpeqd(result_reg, result_reg);
4950 __ jmp(&done, Label::kNear);
4953 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4956 // Smi to XMM conversion
4958 __ SmiToInteger32(kScratchRegister, input_reg);
4959 __ Cvtlsi2sd(result_reg, kScratchRegister);
4964 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4965 Register input_reg = ToRegister(instr->value());
4967 if (instr->truncating()) {
4968 Label no_heap_number, check_bools, check_false;
4970 // Heap number map check.
4971 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4972 Heap::kHeapNumberMapRootIndex);
4973 __ j(not_equal, &no_heap_number, Label::kNear);
4974 __ TruncateHeapNumberToI(input_reg, input_reg);
4977 __ bind(&no_heap_number);
4978 // Check for Oddballs. Undefined/False is converted to zero and True to one
4979 // for truncating conversions.
4980 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4981 __ j(not_equal, &check_bools, Label::kNear);
4982 __ Set(input_reg, 0);
4985 __ bind(&check_bools);
4986 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4987 __ j(not_equal, &check_false, Label::kNear);
4988 __ Set(input_reg, 1);
4991 __ bind(&check_false);
4992 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4993 DeoptimizeIf(not_equal, instr,
4994 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4995 __ Set(input_reg, 0);
4997 XMMRegister scratch = ToDoubleRegister(instr->temp());
4998 DCHECK(!scratch.is(xmm0));
4999 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5000 Heap::kHeapNumberMapRootIndex);
5001 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5002 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5003 __ cvttsd2si(input_reg, xmm0);
5004 __ Cvtlsi2sd(scratch, input_reg);
5005 __ ucomisd(xmm0, scratch);
5006 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5007 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5008 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5009 __ testl(input_reg, input_reg);
5010 __ j(not_zero, done);
5011 __ movmskpd(input_reg, xmm0);
5012 __ andl(input_reg, Immediate(1));
5013 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5019 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5020 class DeferredTaggedToI final : public LDeferredCode {
5022 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5023 : LDeferredCode(codegen), instr_(instr) { }
5024 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
5025 LInstruction* instr() override { return instr_; }
5031 LOperand* input = instr->value();
5032 DCHECK(input->IsRegister());
5033 DCHECK(input->Equals(instr->result()));
5034 Register input_reg = ToRegister(input);
5036 if (instr->hydrogen()->value()->representation().IsSmi()) {
5037 __ SmiToInteger32(input_reg, input_reg);
5039 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5040 __ JumpIfNotSmi(input_reg, deferred->entry());
5041 __ SmiToInteger32(input_reg, input_reg);
5042 __ bind(deferred->exit());
5047 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5048 LOperand* input = instr->value();
5049 DCHECK(input->IsRegister());
5050 LOperand* result = instr->result();
5051 DCHECK(result->IsDoubleRegister());
5053 Register input_reg = ToRegister(input);
5054 XMMRegister result_reg = ToDoubleRegister(result);
5056 HValue* value = instr->hydrogen()->value();
5057 NumberUntagDMode mode = value->representation().IsSmi()
5058 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5060 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5064 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5065 LOperand* input = instr->value();
5066 DCHECK(input->IsDoubleRegister());
5067 LOperand* result = instr->result();
5068 DCHECK(result->IsRegister());
5070 XMMRegister input_reg = ToDoubleRegister(input);
5071 Register result_reg = ToRegister(result);
5073 if (instr->truncating()) {
5074 __ TruncateDoubleToI(result_reg, input_reg);
5076 Label lost_precision, is_nan, minus_zero, done;
5077 XMMRegister xmm_scratch = double_scratch0();
5078 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5079 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5080 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
5081 &is_nan, &minus_zero, dist);
5082 __ jmp(&done, dist);
5083 __ bind(&lost_precision);
5084 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5086 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5087 __ bind(&minus_zero);
5088 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5094 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5095 LOperand* input = instr->value();
5096 DCHECK(input->IsDoubleRegister());
5097 LOperand* result = instr->result();
5098 DCHECK(result->IsRegister());
5100 XMMRegister input_reg = ToDoubleRegister(input);
5101 Register result_reg = ToRegister(result);
5103 Label lost_precision, is_nan, minus_zero, done;
5104 XMMRegister xmm_scratch = double_scratch0();
5105 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5106 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5107 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
5109 __ jmp(&done, dist);
5110 __ bind(&lost_precision);
5111 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5113 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5114 __ bind(&minus_zero);
5115 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5117 __ Integer32ToSmi(result_reg, result_reg);
5118 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5122 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5123 LOperand* input = instr->value();
5124 Condition cc = masm()->CheckSmi(ToRegister(input));
5125 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
5129 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5130 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5131 LOperand* input = instr->value();
5132 Condition cc = masm()->CheckSmi(ToRegister(input));
5133 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5138 void LCodeGen::DoCheckArrayBufferNotNeutered(
5139 LCheckArrayBufferNotNeutered* instr) {
5140 Register view = ToRegister(instr->view());
5142 __ movp(kScratchRegister,
5143 FieldOperand(view, JSArrayBufferView::kBufferOffset));
5144 __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
5145 Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
5146 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5150 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5151 Register input = ToRegister(instr->value());
5153 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5155 if (instr->hydrogen()->is_interval_check()) {
5158 instr->hydrogen()->GetCheckInterval(&first, &last);
5160 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5161 Immediate(static_cast<int8_t>(first)));
5163 // If there is only one type in the interval check for equality.
5164 if (first == last) {
5165 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5167 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5168 // Omit check for the last type.
5169 if (last != LAST_TYPE) {
5170 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5171 Immediate(static_cast<int8_t>(last)));
5172 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5178 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5180 if (base::bits::IsPowerOfTwo32(mask)) {
5181 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5182 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5184 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5185 Deoptimizer::kWrongInstanceType);
5187 __ movzxbl(kScratchRegister,
5188 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5189 __ andb(kScratchRegister, Immediate(mask));
5190 __ cmpb(kScratchRegister, Immediate(tag));
5191 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5197 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5198 Register reg = ToRegister(instr->value());
5199 __ Cmp(reg, instr->hydrogen()->object().handle());
5200 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5204 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5206 PushSafepointRegistersScope scope(this);
5209 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5210 RecordSafepointWithRegisters(
5211 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5213 __ testp(rax, Immediate(kSmiTagMask));
5215 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5219 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5220 class DeferredCheckMaps final : public LDeferredCode {
5222 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5223 : LDeferredCode(codegen), instr_(instr), object_(object) {
5224 SetExit(check_maps());
5226 void Generate() override {
5227 codegen()->DoDeferredInstanceMigration(instr_, object_);
5229 Label* check_maps() { return &check_maps_; }
5230 LInstruction* instr() override { return instr_; }
5238 if (instr->hydrogen()->IsStabilityCheck()) {
5239 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5240 for (int i = 0; i < maps->size(); ++i) {
5241 AddStabilityDependency(maps->at(i).handle());
5246 LOperand* input = instr->value();
5247 DCHECK(input->IsRegister());
5248 Register reg = ToRegister(input);
5250 DeferredCheckMaps* deferred = NULL;
5251 if (instr->hydrogen()->HasMigrationTarget()) {
5252 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5253 __ bind(deferred->check_maps());
5256 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5258 for (int i = 0; i < maps->size() - 1; i++) {
5259 Handle<Map> map = maps->at(i).handle();
5260 __ CompareMap(reg, map);
5261 __ j(equal, &success, Label::kNear);
5264 Handle<Map> map = maps->at(maps->size() - 1).handle();
5265 __ CompareMap(reg, map);
5266 if (instr->hydrogen()->HasMigrationTarget()) {
5267 __ j(not_equal, deferred->entry());
5269 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5276 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5277 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5278 XMMRegister xmm_scratch = double_scratch0();
5279 Register result_reg = ToRegister(instr->result());
5280 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5284 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5285 DCHECK(instr->unclamped()->Equals(instr->result()));
5286 Register value_reg = ToRegister(instr->result());
5287 __ ClampUint8(value_reg);
5291 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5292 DCHECK(instr->unclamped()->Equals(instr->result()));
5293 Register input_reg = ToRegister(instr->unclamped());
5294 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5295 XMMRegister xmm_scratch = double_scratch0();
5296 Label is_smi, done, heap_number;
5297 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5298 __ JumpIfSmi(input_reg, &is_smi, dist);
5300 // Check for heap number
5301 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5302 factory()->heap_number_map());
5303 __ j(equal, &heap_number, Label::kNear);
5305 // Check for undefined. Undefined is converted to zero for clamping
5307 __ Cmp(input_reg, factory()->undefined_value());
5308 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5309 __ xorl(input_reg, input_reg);
5310 __ jmp(&done, Label::kNear);
5313 __ bind(&heap_number);
5314 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5315 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5316 __ jmp(&done, Label::kNear);
5320 __ SmiToInteger32(input_reg, input_reg);
5321 __ ClampUint8(input_reg);
5327 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5328 XMMRegister value_reg = ToDoubleRegister(instr->value());
5329 Register result_reg = ToRegister(instr->result());
5330 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5331 __ movq(result_reg, value_reg);
5332 __ shrq(result_reg, Immediate(32));
5334 __ movd(result_reg, value_reg);
5339 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5340 Register hi_reg = ToRegister(instr->hi());
5341 Register lo_reg = ToRegister(instr->lo());
5342 XMMRegister result_reg = ToDoubleRegister(instr->result());
5343 XMMRegister xmm_scratch = double_scratch0();
5344 __ movd(result_reg, hi_reg);
5345 __ psllq(result_reg, 32);
5346 __ movd(xmm_scratch, lo_reg);
5347 __ orps(result_reg, xmm_scratch);
5351 void LCodeGen::DoAllocate(LAllocate* instr) {
5352 class DeferredAllocate final : public LDeferredCode {
5354 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5355 : LDeferredCode(codegen), instr_(instr) { }
5356 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5357 LInstruction* instr() override { return instr_; }
5363 DeferredAllocate* deferred =
5364 new(zone()) DeferredAllocate(this, instr);
5366 Register result = ToRegister(instr->result());
5367 Register temp = ToRegister(instr->temp());
5369 // Allocate memory for the object.
5370 AllocationFlags flags = TAG_OBJECT;
5371 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5372 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5374 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5375 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5376 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5379 if (instr->size()->IsConstantOperand()) {
5380 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5381 if (size <= Page::kMaxRegularHeapObjectSize) {
5382 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5384 __ jmp(deferred->entry());
5387 Register size = ToRegister(instr->size());
5388 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5391 __ bind(deferred->exit());
5393 if (instr->hydrogen()->MustPrefillWithFiller()) {
5394 if (instr->size()->IsConstantOperand()) {
5395 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5396 __ movl(temp, Immediate((size / kPointerSize) - 1));
5398 temp = ToRegister(instr->size());
5399 __ sarp(temp, Immediate(kPointerSizeLog2));
5404 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5405 isolate()->factory()->one_pointer_filler_map());
5407 __ j(not_zero, &loop);
5412 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5413 Register result = ToRegister(instr->result());
5415 // TODO(3095996): Get rid of this. For now, we need to make the
5416 // result register contain a valid pointer because it is already
5417 // contained in the register pointer map.
5418 __ Move(result, Smi::FromInt(0));
5420 PushSafepointRegistersScope scope(this);
5421 if (instr->size()->IsRegister()) {
5422 Register size = ToRegister(instr->size());
5423 DCHECK(!size.is(result));
5424 __ Integer32ToSmi(size, size);
5427 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5428 __ Push(Smi::FromInt(size));
5432 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5433 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5434 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5436 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5438 __ Push(Smi::FromInt(flags));
5440 CallRuntimeFromDeferred(
5441 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5442 __ StoreToSafepointRegisterSlot(result, rax);
5446 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5447 DCHECK(ToRegister(instr->value()).is(rax));
5449 CallRuntime(Runtime::kToFastProperties, 1, instr);
5453 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5454 DCHECK(ToRegister(instr->context()).is(rsi));
5456 // Registers will be used as follows:
5457 // rcx = literals array.
5458 // rbx = regexp literal.
5459 // rax = regexp literal clone.
5460 int literal_offset =
5461 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5462 __ Move(rcx, instr->hydrogen()->literals());
5463 __ movp(rbx, FieldOperand(rcx, literal_offset));
5464 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5465 __ j(not_equal, &materialized, Label::kNear);
5467 // Create regexp literal using runtime function
5468 // Result will be in rax.
5470 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5471 __ Push(instr->hydrogen()->pattern());
5472 __ Push(instr->hydrogen()->flags());
5473 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5476 __ bind(&materialized);
5477 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5478 Label allocated, runtime_allocate;
5479 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5480 __ jmp(&allocated, Label::kNear);
5482 __ bind(&runtime_allocate);
5484 __ Push(Smi::FromInt(size));
5485 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5488 __ bind(&allocated);
5489 // Copy the content into the newly allocated memory.
5490 // (Unroll copy loop once for better throughput).
5491 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5492 __ movp(rdx, FieldOperand(rbx, i));
5493 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5494 __ movp(FieldOperand(rax, i), rdx);
5495 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5497 if ((size % (2 * kPointerSize)) != 0) {
5498 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5499 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5504 void LCodeGen::DoTypeof(LTypeof* instr) {
5505 DCHECK(ToRegister(instr->context()).is(rsi));
5506 DCHECK(ToRegister(instr->value()).is(rbx));
5508 Register value_register = ToRegister(instr->value());
5509 __ JumpIfNotSmi(value_register, &do_call);
5510 __ Move(rax, isolate()->factory()->number_string());
5513 TypeofStub stub(isolate());
5514 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5519 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5520 DCHECK(!operand->IsDoubleRegister());
5521 if (operand->IsConstantOperand()) {
5522 __ Push(ToHandle(LConstantOperand::cast(operand)));
5523 } else if (operand->IsRegister()) {
5524 __ Push(ToRegister(operand));
5526 __ Push(ToOperand(operand));
5531 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5532 Register input = ToRegister(instr->value());
5533 Condition final_branch_condition = EmitTypeofIs(instr, input);
5534 if (final_branch_condition != no_condition) {
5535 EmitBranch(instr, final_branch_condition);
5540 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5541 Label* true_label = instr->TrueLabel(chunk_);
5542 Label* false_label = instr->FalseLabel(chunk_);
5543 Handle<String> type_name = instr->type_literal();
5544 int left_block = instr->TrueDestination(chunk_);
5545 int right_block = instr->FalseDestination(chunk_);
5546 int next_block = GetNextEmittedBlock();
5548 Label::Distance true_distance = left_block == next_block ? Label::kNear
5550 Label::Distance false_distance = right_block == next_block ? Label::kNear
5552 Condition final_branch_condition = no_condition;
5553 Factory* factory = isolate()->factory();
5554 if (String::Equals(type_name, factory->number_string())) {
5555 __ JumpIfSmi(input, true_label, true_distance);
5556 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5557 Heap::kHeapNumberMapRootIndex);
5559 final_branch_condition = equal;
5561 } else if (String::Equals(type_name, factory->string_string())) {
5562 __ JumpIfSmi(input, false_label, false_distance);
5563 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5564 final_branch_condition = below;
5566 } else if (String::Equals(type_name, factory->symbol_string())) {
5567 __ JumpIfSmi(input, false_label, false_distance);
5568 __ CmpObjectType(input, SYMBOL_TYPE, input);
5569 final_branch_condition = equal;
5571 } else if (String::Equals(type_name, factory->boolean_string())) {
5572 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5573 __ j(equal, true_label, true_distance);
5574 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5575 final_branch_condition = equal;
5577 } else if (String::Equals(type_name, factory->undefined_string())) {
5578 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5579 __ j(equal, true_label, true_distance);
5580 __ JumpIfSmi(input, false_label, false_distance);
5581 // Check for undetectable objects => true.
5582 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5583 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5584 Immediate(1 << Map::kIsUndetectable));
5585 final_branch_condition = not_zero;
5587 } else if (String::Equals(type_name, factory->function_string())) {
5588 __ JumpIfSmi(input, false_label, false_distance);
5589 // Check for callable and not undetectable objects => true.
5590 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5591 __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
5593 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5594 __ cmpb(input, Immediate(1 << Map::kIsCallable));
5595 final_branch_condition = equal;
5597 } else if (String::Equals(type_name, factory->object_string())) {
5598 __ JumpIfSmi(input, false_label, false_distance);
5599 __ CompareRoot(input, Heap::kNullValueRootIndex);
5600 __ j(equal, true_label, true_distance);
5601 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
5602 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
5603 __ j(below, false_label, false_distance);
5604 // Check for callable or undetectable objects => false.
5605 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5606 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5607 final_branch_condition = zero;
5610 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5611 } else if (String::Equals(type_name, factory->type##_string())) { \
5612 __ JumpIfSmi(input, false_label, false_distance); \
5613 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), \
5614 Heap::k##Type##MapRootIndex); \
5615 final_branch_condition = equal;
5616 SIMD128_TYPES(SIMD128_TYPE)
5621 __ jmp(false_label, false_distance);
5624 return final_branch_condition;
5628 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5629 Register temp = ToRegister(instr->temp());
5631 EmitIsConstructCall(temp);
5632 EmitBranch(instr, equal);
5636 void LCodeGen::EmitIsConstructCall(Register temp) {
5637 // Get the frame pointer for the calling frame.
5638 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5640 // Skip the arguments adaptor frame if it exists.
5641 Label check_frame_marker;
5642 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5643 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5644 __ j(not_equal, &check_frame_marker, Label::kNear);
5645 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5647 // Check the marker in the calling frame.
5648 __ bind(&check_frame_marker);
5649 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5650 Smi::FromInt(StackFrame::CONSTRUCT));
5654 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5655 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5656 // Ensure that we have enough space after the previous lazy-bailout
5657 // instruction for patching the code here.
5658 int current_pc = masm()->pc_offset();
5659 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5660 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5661 __ Nop(padding_size);
5664 last_lazy_deopt_pc_ = masm()->pc_offset();
5668 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5669 last_lazy_deopt_pc_ = masm()->pc_offset();
5670 DCHECK(instr->HasEnvironment());
5671 LEnvironment* env = instr->environment();
5672 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5673 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5677 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5678 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5679 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5680 // needed return address), even though the implementation of LAZY and EAGER is
5681 // now identical. When LAZY is eventually completely folded into EAGER, remove
5682 // the special case below.
5683 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5684 type = Deoptimizer::LAZY;
5686 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5690 void LCodeGen::DoDummy(LDummy* instr) {
5691 // Nothing to see here, move on!
5695 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5696 // Nothing to see here, move on!
5700 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5701 PushSafepointRegistersScope scope(this);
5702 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5703 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5704 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5705 DCHECK(instr->HasEnvironment());
5706 LEnvironment* env = instr->environment();
5707 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5711 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5712 class DeferredStackCheck final : public LDeferredCode {
5714 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5715 : LDeferredCode(codegen), instr_(instr) { }
5716 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5717 LInstruction* instr() override { return instr_; }
5720 LStackCheck* instr_;
5723 DCHECK(instr->HasEnvironment());
5724 LEnvironment* env = instr->environment();
5725 // There is no LLazyBailout instruction for stack-checks. We have to
5726 // prepare for lazy deoptimization explicitly here.
5727 if (instr->hydrogen()->is_function_entry()) {
5728 // Perform stack overflow check.
5730 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5731 __ j(above_equal, &done, Label::kNear);
5733 DCHECK(instr->context()->IsRegister());
5734 DCHECK(ToRegister(instr->context()).is(rsi));
5735 CallCode(isolate()->builtins()->StackCheck(),
5736 RelocInfo::CODE_TARGET,
5740 DCHECK(instr->hydrogen()->is_backwards_branch());
5741 // Perform stack overflow check if this goto needs it before jumping.
5742 DeferredStackCheck* deferred_stack_check =
5743 new(zone()) DeferredStackCheck(this, instr);
5744 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5745 __ j(below, deferred_stack_check->entry());
5746 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5747 __ bind(instr->done_label());
5748 deferred_stack_check->SetExit(instr->done_label());
5749 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5750 // Don't record a deoptimization index for the safepoint here.
5751 // This will be done explicitly when emitting call and the safepoint in
5752 // the deferred code.
5757 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5758 // This is a pseudo-instruction that ensures that the environment here is
5759 // properly registered for deoptimization and records the assembler's PC
5761 LEnvironment* environment = instr->environment();
5763 // If the environment were already registered, we would have no way of
5764 // backpatching it with the spill slot operands.
5765 DCHECK(!environment->HasBeenRegistered());
5766 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5768 GenerateOsrPrologue();
5772 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5773 DCHECK(ToRegister(instr->context()).is(rsi));
5775 Condition cc = masm()->CheckSmi(rax);
5776 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5778 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5779 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5780 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5782 Label use_cache, call_runtime;
5783 Register null_value = rdi;
5784 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5785 __ CheckEnumCache(null_value, &call_runtime);
5787 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5788 __ jmp(&use_cache, Label::kNear);
5790 // Get the set of properties to enumerate.
5791 __ bind(&call_runtime);
5793 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5795 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5796 Heap::kMetaMapRootIndex);
5797 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5798 __ bind(&use_cache);
5802 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5803 Register map = ToRegister(instr->map());
5804 Register result = ToRegister(instr->result());
5805 Label load_cache, done;
5806 __ EnumLength(result, map);
5807 __ Cmp(result, Smi::FromInt(0));
5808 __ j(not_equal, &load_cache, Label::kNear);
5809 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5810 __ jmp(&done, Label::kNear);
5811 __ bind(&load_cache);
5812 __ LoadInstanceDescriptors(map, result);
5814 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5816 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5818 Condition cc = masm()->CheckSmi(result);
5819 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
5823 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5824 Register object = ToRegister(instr->value());
5825 __ cmpp(ToRegister(instr->map()),
5826 FieldOperand(object, HeapObject::kMapOffset));
5827 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5831 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5834 PushSafepointRegistersScope scope(this);
5838 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5839 RecordSafepointWithRegisters(
5840 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5841 __ StoreToSafepointRegisterSlot(object, rax);
5845 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5846 class DeferredLoadMutableDouble final : public LDeferredCode {
5848 DeferredLoadMutableDouble(LCodeGen* codegen,
5849 LLoadFieldByIndex* instr,
5852 : LDeferredCode(codegen),
5857 void Generate() override {
5858 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5860 LInstruction* instr() override { return instr_; }
5863 LLoadFieldByIndex* instr_;
5868 Register object = ToRegister(instr->object());
5869 Register index = ToRegister(instr->index());
5871 DeferredLoadMutableDouble* deferred;
5872 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5874 Label out_of_object, done;
5875 __ Move(kScratchRegister, Smi::FromInt(1));
5876 __ testp(index, kScratchRegister);
5877 __ j(not_zero, deferred->entry());
5879 __ sarp(index, Immediate(1));
5881 __ SmiToInteger32(index, index);
5882 __ cmpl(index, Immediate(0));
5883 __ j(less, &out_of_object, Label::kNear);
5884 __ movp(object, FieldOperand(object,
5887 JSObject::kHeaderSize));
5888 __ jmp(&done, Label::kNear);
5890 __ bind(&out_of_object);
5891 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5893 // Index is now equal to out of object property index plus 1.
5894 __ movp(object, FieldOperand(object,
5897 FixedArray::kHeaderSize - kPointerSize));
5898 __ bind(deferred->exit());
5903 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5904 Register context = ToRegister(instr->context());
5905 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
5909 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5910 Handle<ScopeInfo> scope_info = instr->scope_info();
5911 __ Push(scope_info);
5912 __ Push(ToRegister(instr->function()));
5913 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5914 RecordSafepoint(Safepoint::kNoLazyDeopt);
5920 } // namespace internal
5923 #endif // V8_TARGET_ARCH_X64