1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/x64/lithium-codegen-x64.h"
21 // When invoking builtins, we need to record the safepoint in the middle of
22 // the invoke instruction sequence generated by the macro assembler.
23 class SafepointGenerator FINAL : public CallWrapper {
25 SafepointGenerator(LCodeGen* codegen,
26 LPointerMap* pointers,
27 Safepoint::DeoptMode mode)
31 virtual ~SafepointGenerator() {}
33 void BeforeCall(int call_size) const OVERRIDE {}
35 void AfterCall() const OVERRIDE {
36 codegen_->RecordSafepoint(pointers_, deopt_mode_);
41 LPointerMap* pointers_;
42 Safepoint::DeoptMode deopt_mode_;
48 bool LCodeGen::GenerateCode() {
49 LPhase phase("Z_Code generation", chunk());
53 // Open a frame scope to indicate that there is a frame on the stack. The
54 // MANUAL indicates that the scope shouldn't actually generate code to set up
55 // the frame (that is done in GeneratePrologue).
56 FrameScope frame_scope(masm_, StackFrame::MANUAL);
58 return GeneratePrologue() &&
60 GenerateDeferredCode() &&
61 GenerateJumpTable() &&
62 GenerateSafepointTable();
66 void LCodeGen::FinishCode(Handle<Code> code) {
68 code->set_stack_slots(GetStackSlotCount());
69 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
70 PopulateDeoptimizationData(code);
75 void LCodeGen::MakeSureStackPagesMapped(int offset) {
76 const int kPageSize = 4 * KB;
77 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
78 __ movp(Operand(rsp, offset), rax);
84 void LCodeGen::SaveCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Save clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
91 while (!save_iterator.Done()) {
92 __ movsd(MemOperand(rsp, count * kDoubleSize),
93 XMMRegister::FromAllocationIndex(save_iterator.Current()));
94 save_iterator.Advance();
100 void LCodeGen::RestoreCallerDoubles() {
101 DCHECK(info()->saves_caller_doubles());
102 DCHECK(NeedsEagerFrame());
103 Comment(";;; Restore clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
107 while (!save_iterator.Done()) {
108 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
109 MemOperand(rsp, count * kDoubleSize));
110 save_iterator.Advance();
116 bool LCodeGen::GeneratePrologue() {
117 DCHECK(is_generating());
119 if (info()->IsOptimizing()) {
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
123 if (strlen(FLAG_stop_at) > 0 &&
124 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
129 // Sloppy mode functions need to replace the receiver with the global proxy
130 // when called as functions (without an explicit receiver object).
131 if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
132 !info_->is_native()) {
134 StackArgumentsAccessor args(rsp, scope()->num_parameters());
135 __ movp(rcx, args.GetReceiverOperand());
137 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
138 __ j(not_equal, &ok, Label::kNear);
140 __ movp(rcx, GlobalObjectOperand());
141 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
143 __ movp(args.GetReceiverOperand(), rcx);
149 info()->set_prologue_offset(masm_->pc_offset());
150 if (NeedsEagerFrame()) {
151 DCHECK(!frame_is_built_);
152 frame_is_built_ = true;
153 if (info()->IsStub()) {
156 __ Prologue(info()->IsCodePreAgingActive());
158 info()->AddNoFrameRange(0, masm_->pc_offset());
161 // Reserve space for the stack slots needed by the code.
162 int slots = GetStackSlotCount();
164 if (FLAG_debug_code) {
165 __ subp(rsp, Immediate(slots * kPointerSize));
167 MakeSureStackPagesMapped(slots * kPointerSize);
171 __ Set(kScratchRegister, kSlotsZapValue);
174 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
177 __ j(not_zero, &loop);
180 __ subp(rsp, Immediate(slots * kPointerSize));
182 MakeSureStackPagesMapped(slots * kPointerSize);
186 if (info()->saves_caller_doubles()) {
191 // Possibly allocate a local context.
192 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
193 if (heap_slots > 0) {
194 Comment(";;; Allocate local context");
195 bool need_write_barrier = true;
196 // Argument to NewContext is the function, which is still in rdi.
197 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
198 FastNewContextStub stub(isolate(), heap_slots);
200 // Result of FastNewContextStub is always in new space.
201 need_write_barrier = false;
204 __ CallRuntime(Runtime::kNewFunctionContext, 1);
206 RecordSafepoint(Safepoint::kNoLazyDeopt);
207 // Context is returned in rax. It replaces the context passed to us.
208 // It's saved in the stack and kept live in rsi.
210 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
212 // Copy any necessary parameters into the context.
213 int num_parameters = scope()->num_parameters();
214 for (int i = 0; i < num_parameters; i++) {
215 Variable* var = scope()->parameter(i);
216 if (var->IsContextSlot()) {
217 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
218 (num_parameters - 1 - i) * kPointerSize;
219 // Load parameter from stack.
220 __ movp(rax, Operand(rbp, parameter_offset));
221 // Store it in the context.
222 int context_offset = Context::SlotOffset(var->index());
223 __ movp(Operand(rsi, context_offset), rax);
224 // Update the write barrier. This clobbers rax and rbx.
225 if (need_write_barrier) {
226 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
227 } else if (FLAG_debug_code) {
229 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
230 __ Abort(kExpectedNewSpaceObject);
235 Comment(";;; End allocate local context");
239 if (FLAG_trace && info()->IsOptimizing()) {
240 __ CallRuntime(Runtime::kTraceEnter, 0);
242 return !is_aborted();
246 void LCodeGen::GenerateOsrPrologue() {
247 // Generate the OSR entry prologue at the first unknown OSR value, or if there
248 // are none, at the OSR entrypoint instruction.
249 if (osr_pc_offset_ >= 0) return;
251 osr_pc_offset_ = masm()->pc_offset();
253 // Adjust the frame size, subsuming the unoptimized frame into the
255 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
257 __ subp(rsp, Immediate(slots * kPointerSize));
261 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
262 if (instr->IsCall()) {
263 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
265 if (!instr->IsLazyBailout() && !instr->IsGap()) {
266 safepoints_.BumpLastLazySafepointIndex();
271 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
272 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
273 instr->hydrogen_value()->representation().IsInteger32() &&
274 instr->result()->IsRegister()) {
275 __ AssertZeroExtended(ToRegister(instr->result()));
278 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
279 // We sign extend the dehoisted key at the definition point when the pointer
280 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
281 // points and MustSignExtendResult is always false. We can't use
282 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
283 DCHECK(kPointerSize == kInt64Size);
284 if (instr->result()->IsRegister()) {
285 Register result_reg = ToRegister(instr->result());
286 __ movsxlq(result_reg, result_reg);
288 // Sign extend the 32bit result in the stack slots.
289 DCHECK(instr->result()->IsStackSlot());
290 Operand src = ToOperand(instr->result());
291 __ movsxlq(kScratchRegister, src);
292 __ movq(src, kScratchRegister);
298 bool LCodeGen::GenerateJumpTable() {
300 if (jump_table_.length() > 0) {
301 Comment(";;; -------------------- Jump table --------------------");
303 for (int i = 0; i < jump_table_.length(); i++) {
304 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
305 __ bind(&table_entry->label);
306 Address entry = table_entry->address;
307 DeoptComment(table_entry->deopt_info);
308 if (table_entry->needs_frame) {
309 DCHECK(!info()->saves_caller_doubles());
310 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
311 if (needs_frame.is_bound()) {
312 __ jmp(&needs_frame);
314 __ bind(&needs_frame);
315 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
319 // This variant of deopt can only be used with stubs. Since we don't
320 // have a function pointer to install in the stack frame that we're
321 // building, install a special marker there instead.
322 DCHECK(info()->IsStub());
323 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
325 __ movp(rsi, MemOperand(rsp, kPointerSize));
326 __ call(kScratchRegister);
329 if (info()->saves_caller_doubles()) {
330 DCHECK(info()->IsStub());
331 RestoreCallerDoubles();
333 __ call(entry, RelocInfo::RUNTIME_ENTRY);
336 return !is_aborted();
340 bool LCodeGen::GenerateDeferredCode() {
341 DCHECK(is_generating());
342 if (deferred_.length() > 0) {
343 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
344 LDeferredCode* code = deferred_[i];
347 instructions_->at(code->instruction_index())->hydrogen_value();
348 RecordAndWritePosition(
349 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
351 Comment(";;; <@%d,#%d> "
352 "-------------------- Deferred %s --------------------",
353 code->instruction_index(),
354 code->instr()->hydrogen_value()->id(),
355 code->instr()->Mnemonic());
356 __ bind(code->entry());
357 if (NeedsDeferredFrame()) {
358 Comment(";;; Build frame");
359 DCHECK(!frame_is_built_);
360 DCHECK(info()->IsStub());
361 frame_is_built_ = true;
362 // Build the frame in such a way that esi isn't trashed.
363 __ pushq(rbp); // Caller's frame pointer.
364 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
365 __ Push(Smi::FromInt(StackFrame::STUB));
366 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
367 Comment(";;; Deferred code");
370 if (NeedsDeferredFrame()) {
371 __ bind(code->done());
372 Comment(";;; Destroy frame");
373 DCHECK(frame_is_built_);
374 frame_is_built_ = false;
378 __ jmp(code->exit());
382 // Deferred code is the last part of the instruction sequence. Mark
383 // the generated code as done unless we bailed out.
384 if (!is_aborted()) status_ = DONE;
385 return !is_aborted();
389 bool LCodeGen::GenerateSafepointTable() {
391 safepoints_.Emit(masm(), GetStackSlotCount());
392 return !is_aborted();
396 Register LCodeGen::ToRegister(int index) const {
397 return Register::FromAllocationIndex(index);
401 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
402 return XMMRegister::FromAllocationIndex(index);
406 Register LCodeGen::ToRegister(LOperand* op) const {
407 DCHECK(op->IsRegister());
408 return ToRegister(op->index());
412 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
413 DCHECK(op->IsDoubleRegister());
414 return ToDoubleRegister(op->index());
418 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
419 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
423 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
424 return op->IsConstantOperand() &&
425 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
429 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
430 return chunk_->LookupLiteralRepresentation(op).IsSmi();
434 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
435 return ToRepresentation(op, Representation::Integer32());
439 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
440 const Representation& r) const {
441 HConstant* constant = chunk_->LookupConstant(op);
442 int32_t value = constant->Integer32Value();
443 if (r.IsInteger32()) return value;
444 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
445 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
449 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
450 HConstant* constant = chunk_->LookupConstant(op);
451 return Smi::FromInt(constant->Integer32Value());
455 double LCodeGen::ToDouble(LConstantOperand* op) const {
456 HConstant* constant = chunk_->LookupConstant(op);
457 DCHECK(constant->HasDoubleValue());
458 return constant->DoubleValue();
462 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
463 HConstant* constant = chunk_->LookupConstant(op);
464 DCHECK(constant->HasExternalReferenceValue());
465 return constant->ExternalReferenceValue();
469 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
470 HConstant* constant = chunk_->LookupConstant(op);
471 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
472 return constant->handle(isolate());
476 static int ArgumentsOffsetWithoutFrame(int index) {
478 return -(index + 1) * kPointerSize + kPCOnStackSize;
482 Operand LCodeGen::ToOperand(LOperand* op) const {
483 // Does not handle registers. In X64 assembler, plain registers are not
484 // representable as an Operand.
485 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
486 if (NeedsEagerFrame()) {
487 return Operand(rbp, StackSlotOffset(op->index()));
489 // Retrieve parameter without eager stack-frame relative to the
491 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
496 void LCodeGen::WriteTranslation(LEnvironment* environment,
497 Translation* translation) {
498 if (environment == NULL) return;
500 // The translation includes one command per value in the environment.
501 int translation_size = environment->translation_size();
502 // The output frame height does not include the parameters.
503 int height = translation_size - environment->parameter_count();
505 WriteTranslation(environment->outer(), translation);
506 bool has_closure_id = !info()->closure().is_null() &&
507 !info()->closure().is_identical_to(environment->closure());
508 int closure_id = has_closure_id
509 ? DefineDeoptimizationLiteral(environment->closure())
510 : Translation::kSelfLiteralId;
512 switch (environment->frame_type()) {
514 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
517 translation->BeginConstructStubFrame(closure_id, translation_size);
520 DCHECK(translation_size == 1);
522 translation->BeginGetterStubFrame(closure_id);
525 DCHECK(translation_size == 2);
527 translation->BeginSetterStubFrame(closure_id);
529 case ARGUMENTS_ADAPTOR:
530 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
533 translation->BeginCompiledStubFrame();
537 int object_index = 0;
538 int dematerialized_index = 0;
539 for (int i = 0; i < translation_size; ++i) {
540 LOperand* value = environment->values()->at(i);
541 AddToTranslation(environment,
544 environment->HasTaggedValueAt(i),
545 environment->HasUint32ValueAt(i),
547 &dematerialized_index);
552 void LCodeGen::AddToTranslation(LEnvironment* environment,
553 Translation* translation,
557 int* object_index_pointer,
558 int* dematerialized_index_pointer) {
559 if (op == LEnvironment::materialization_marker()) {
560 int object_index = (*object_index_pointer)++;
561 if (environment->ObjectIsDuplicateAt(object_index)) {
562 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
563 translation->DuplicateObject(dupe_of);
566 int object_length = environment->ObjectLengthAt(object_index);
567 if (environment->ObjectIsArgumentsAt(object_index)) {
568 translation->BeginArgumentsObject(object_length);
570 translation->BeginCapturedObject(object_length);
572 int dematerialized_index = *dematerialized_index_pointer;
573 int env_offset = environment->translation_size() + dematerialized_index;
574 *dematerialized_index_pointer += object_length;
575 for (int i = 0; i < object_length; ++i) {
576 LOperand* value = environment->values()->at(env_offset + i);
577 AddToTranslation(environment,
580 environment->HasTaggedValueAt(env_offset + i),
581 environment->HasUint32ValueAt(env_offset + i),
582 object_index_pointer,
583 dematerialized_index_pointer);
588 if (op->IsStackSlot()) {
590 translation->StoreStackSlot(op->index());
591 } else if (is_uint32) {
592 translation->StoreUint32StackSlot(op->index());
594 translation->StoreInt32StackSlot(op->index());
596 } else if (op->IsDoubleStackSlot()) {
597 translation->StoreDoubleStackSlot(op->index());
598 } else if (op->IsRegister()) {
599 Register reg = ToRegister(op);
601 translation->StoreRegister(reg);
602 } else if (is_uint32) {
603 translation->StoreUint32Register(reg);
605 translation->StoreInt32Register(reg);
607 } else if (op->IsDoubleRegister()) {
608 XMMRegister reg = ToDoubleRegister(op);
609 translation->StoreDoubleRegister(reg);
610 } else if (op->IsConstantOperand()) {
611 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
612 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
613 translation->StoreLiteral(src_index);
620 void LCodeGen::CallCodeGeneric(Handle<Code> code,
621 RelocInfo::Mode mode,
623 SafepointMode safepoint_mode,
625 DCHECK(instr != NULL);
627 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
629 // Signal that we don't inline smi code before these stubs in the
630 // optimizing code generator.
631 if (code->kind() == Code::BINARY_OP_IC ||
632 code->kind() == Code::COMPARE_IC) {
638 void LCodeGen::CallCode(Handle<Code> code,
639 RelocInfo::Mode mode,
640 LInstruction* instr) {
641 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
645 void LCodeGen::CallRuntime(const Runtime::Function* function,
648 SaveFPRegsMode save_doubles) {
649 DCHECK(instr != NULL);
650 DCHECK(instr->HasPointerMap());
652 __ CallRuntime(function, num_arguments, save_doubles);
654 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
658 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
659 if (context->IsRegister()) {
660 if (!ToRegister(context).is(rsi)) {
661 __ movp(rsi, ToRegister(context));
663 } else if (context->IsStackSlot()) {
664 __ movp(rsi, ToOperand(context));
665 } else if (context->IsConstantOperand()) {
666 HConstant* constant =
667 chunk_->LookupConstant(LConstantOperand::cast(context));
668 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
676 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
680 LoadContextFromDeferred(context);
682 __ CallRuntimeSaveDoubles(id);
683 RecordSafepointWithRegisters(
684 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
688 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
689 Safepoint::DeoptMode mode) {
690 environment->set_has_been_used();
691 if (!environment->HasBeenRegistered()) {
692 // Physical stack frame layout:
693 // -x ............. -4 0 ..................................... y
694 // [incoming arguments] [spill slots] [pushed outgoing arguments]
696 // Layout of the environment:
697 // 0 ..................................................... size-1
698 // [parameters] [locals] [expression stack including arguments]
700 // Layout of the translation:
701 // 0 ........................................................ size - 1 + 4
702 // [expression stack including arguments] [locals] [4 words] [parameters]
703 // |>------------ translation_size ------------<|
706 int jsframe_count = 0;
707 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
709 if (e->frame_type() == JS_FUNCTION) {
713 Translation translation(&translations_, frame_count, jsframe_count, zone());
714 WriteTranslation(environment, &translation);
715 int deoptimization_index = deoptimizations_.length();
716 int pc_offset = masm()->pc_offset();
717 environment->Register(deoptimization_index,
719 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
720 deoptimizations_.Add(environment, environment->zone());
725 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
726 Deoptimizer::DeoptReason deopt_reason,
727 Deoptimizer::BailoutType bailout_type) {
728 LEnvironment* environment = instr->environment();
729 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
730 DCHECK(environment->HasBeenRegistered());
731 int id = environment->deoptimization_index();
732 DCHECK(info()->IsOptimizing() || info()->IsStub());
734 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
736 Abort(kBailoutWasNotPrepared);
740 if (DeoptEveryNTimes()) {
741 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
745 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
746 __ movl(rax, count_operand);
747 __ subl(rax, Immediate(1));
748 __ j(not_zero, &no_deopt, Label::kNear);
749 if (FLAG_trap_on_deopt) __ int3();
750 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
751 __ movl(count_operand, rax);
754 DCHECK(frame_is_built_);
755 __ call(entry, RelocInfo::RUNTIME_ENTRY);
757 __ movl(count_operand, rax);
762 if (info()->ShouldTrapOnDeopt()) {
764 if (cc != no_condition) {
765 __ j(NegateCondition(cc), &done, Label::kNear);
771 Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
772 instr->Mnemonic(), deopt_reason);
773 DCHECK(info()->IsStub() || frame_is_built_);
774 // Go through jump table if we need to handle condition, build frame, or
775 // restore caller doubles.
776 if (cc == no_condition && frame_is_built_ &&
777 !info()->saves_caller_doubles()) {
778 DeoptComment(deopt_info);
779 __ call(entry, RelocInfo::RUNTIME_ENTRY);
781 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
783 // We often have several deopts to the same entry, reuse the last
784 // jump entry if this is the case.
785 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
786 jump_table_.is_empty() ||
787 !table_entry.IsEquivalentTo(jump_table_.last())) {
788 jump_table_.Add(table_entry, zone());
790 if (cc == no_condition) {
791 __ jmp(&jump_table_.last().label);
793 __ j(cc, &jump_table_.last().label);
799 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
800 Deoptimizer::DeoptReason deopt_reason) {
801 Deoptimizer::BailoutType bailout_type = info()->IsStub()
803 : Deoptimizer::EAGER;
804 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
808 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
809 int length = deoptimizations_.length();
810 if (length == 0) return;
811 Handle<DeoptimizationInputData> data =
812 DeoptimizationInputData::New(isolate(), length, TENURED);
814 Handle<ByteArray> translations =
815 translations_.CreateByteArray(isolate()->factory());
816 data->SetTranslationByteArray(*translations);
817 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
818 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
819 if (info_->IsOptimizing()) {
820 // Reference to shared function info does not change between phases.
821 AllowDeferredHandleDereference allow_handle_dereference;
822 data->SetSharedFunctionInfo(*info_->shared_info());
824 data->SetSharedFunctionInfo(Smi::FromInt(0));
826 data->SetWeakCellCache(Smi::FromInt(0));
828 Handle<FixedArray> literals =
829 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
830 { AllowDeferredHandleDereference copy_handles;
831 for (int i = 0; i < deoptimization_literals_.length(); i++) {
832 literals->set(i, *deoptimization_literals_[i]);
834 data->SetLiteralArray(*literals);
837 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
838 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
840 // Populate the deoptimization entries.
841 for (int i = 0; i < length; i++) {
842 LEnvironment* env = deoptimizations_[i];
843 data->SetAstId(i, env->ast_id());
844 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
845 data->SetArgumentsStackHeight(i,
846 Smi::FromInt(env->arguments_stack_height()));
847 data->SetPc(i, Smi::FromInt(env->pc_offset()));
849 code->set_deoptimization_data(*data);
853 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
854 int result = deoptimization_literals_.length();
855 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
856 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
858 deoptimization_literals_.Add(literal, zone());
863 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
864 DCHECK(deoptimization_literals_.length() == 0);
866 const ZoneList<Handle<JSFunction> >* inlined_closures =
867 chunk()->inlined_closures();
869 for (int i = 0, length = inlined_closures->length();
872 DefineDeoptimizationLiteral(inlined_closures->at(i));
875 inlined_function_count_ = deoptimization_literals_.length();
879 void LCodeGen::RecordSafepointWithLazyDeopt(
880 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
881 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
882 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
884 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
885 RecordSafepointWithRegisters(
886 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
891 void LCodeGen::RecordSafepoint(
892 LPointerMap* pointers,
893 Safepoint::Kind kind,
895 Safepoint::DeoptMode deopt_mode) {
896 DCHECK(kind == expected_safepoint_kind_);
898 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
900 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
901 kind, arguments, deopt_mode);
902 for (int i = 0; i < operands->length(); i++) {
903 LOperand* pointer = operands->at(i);
904 if (pointer->IsStackSlot()) {
905 safepoint.DefinePointerSlot(pointer->index(), zone());
906 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
907 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
913 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
914 Safepoint::DeoptMode deopt_mode) {
915 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
919 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
920 LPointerMap empty_pointers(zone());
921 RecordSafepoint(&empty_pointers, deopt_mode);
925 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
927 Safepoint::DeoptMode deopt_mode) {
928 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
932 void LCodeGen::RecordAndWritePosition(int position) {
933 if (position == RelocInfo::kNoPosition) return;
934 masm()->positions_recorder()->RecordPosition(position);
935 masm()->positions_recorder()->WriteRecordedPositions();
939 static const char* LabelType(LLabel* label) {
940 if (label->is_loop_header()) return " (loop header)";
941 if (label->is_osr_entry()) return " (OSR entry)";
946 void LCodeGen::DoLabel(LLabel* label) {
947 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
948 current_instruction_,
949 label->hydrogen_value()->id(),
952 __ bind(label->label());
953 current_block_ = label->block_id();
958 void LCodeGen::DoParallelMove(LParallelMove* move) {
959 resolver_.Resolve(move);
963 void LCodeGen::DoGap(LGap* gap) {
964 for (int i = LGap::FIRST_INNER_POSITION;
965 i <= LGap::LAST_INNER_POSITION;
967 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
968 LParallelMove* move = gap->GetParallelMove(inner_pos);
969 if (move != NULL) DoParallelMove(move);
974 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
979 void LCodeGen::DoParameter(LParameter* instr) {
984 void LCodeGen::DoCallStub(LCallStub* instr) {
985 DCHECK(ToRegister(instr->context()).is(rsi));
986 DCHECK(ToRegister(instr->result()).is(rax));
987 switch (instr->hydrogen()->major_key()) {
988 case CodeStub::RegExpExec: {
989 RegExpExecStub stub(isolate());
990 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
993 case CodeStub::SubString: {
994 SubStringStub stub(isolate());
995 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
998 case CodeStub::StringCompare: {
999 StringCompareStub stub(isolate());
1000 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1009 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1010 GenerateOsrPrologue();
1014 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1015 Register dividend = ToRegister(instr->dividend());
1016 int32_t divisor = instr->divisor();
1017 DCHECK(dividend.is(ToRegister(instr->result())));
1019 // Theoretically, a variation of the branch-free code for integer division by
1020 // a power of 2 (calculating the remainder via an additional multiplication
1021 // (which gets simplified to an 'and') and subtraction) should be faster, and
1022 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1023 // indicate that positive dividends are heavily favored, so the branching
1024 // version performs better.
1025 HMod* hmod = instr->hydrogen();
1026 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1027 Label dividend_is_not_negative, done;
1028 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1029 __ testl(dividend, dividend);
1030 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1031 // Note that this is correct even for kMinInt operands.
1033 __ andl(dividend, Immediate(mask));
1035 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1036 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1038 __ jmp(&done, Label::kNear);
1041 __ bind(÷nd_is_not_negative);
1042 __ andl(dividend, Immediate(mask));
1047 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1048 Register dividend = ToRegister(instr->dividend());
1049 int32_t divisor = instr->divisor();
1050 DCHECK(ToRegister(instr->result()).is(rax));
1053 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1057 __ TruncatingDiv(dividend, Abs(divisor));
1058 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1059 __ movl(rax, dividend);
1062 // Check for negative zero.
1063 HMod* hmod = instr->hydrogen();
1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1065 Label remainder_not_zero;
1066 __ j(not_zero, &remainder_not_zero, Label::kNear);
1067 __ cmpl(dividend, Immediate(0));
1068 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1069 __ bind(&remainder_not_zero);
1074 void LCodeGen::DoModI(LModI* instr) {
1075 HMod* hmod = instr->hydrogen();
1077 Register left_reg = ToRegister(instr->left());
1078 DCHECK(left_reg.is(rax));
1079 Register right_reg = ToRegister(instr->right());
1080 DCHECK(!right_reg.is(rax));
1081 DCHECK(!right_reg.is(rdx));
1082 Register result_reg = ToRegister(instr->result());
1083 DCHECK(result_reg.is(rdx));
1086 // Check for x % 0, idiv would signal a divide error. We have to
1087 // deopt in this case because we can't return a NaN.
1088 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1089 __ testl(right_reg, right_reg);
1090 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1093 // Check for kMinInt % -1, idiv would signal a divide error. We
1094 // have to deopt if we care about -0, because we can't return that.
1095 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1096 Label no_overflow_possible;
1097 __ cmpl(left_reg, Immediate(kMinInt));
1098 __ j(not_zero, &no_overflow_possible, Label::kNear);
1099 __ cmpl(right_reg, Immediate(-1));
1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1101 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1103 __ j(not_equal, &no_overflow_possible, Label::kNear);
1104 __ Set(result_reg, 0);
1105 __ jmp(&done, Label::kNear);
1107 __ bind(&no_overflow_possible);
1110 // Sign extend dividend in eax into edx:eax, since we are using only the low
1111 // 32 bits of the values.
1114 // If we care about -0, test if the dividend is <0 and the result is 0.
1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1116 Label positive_left;
1117 __ testl(left_reg, left_reg);
1118 __ j(not_sign, &positive_left, Label::kNear);
1119 __ idivl(right_reg);
1120 __ testl(result_reg, result_reg);
1121 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1122 __ jmp(&done, Label::kNear);
1123 __ bind(&positive_left);
1125 __ idivl(right_reg);
1130 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1131 Register dividend = ToRegister(instr->dividend());
1132 int32_t divisor = instr->divisor();
1133 DCHECK(dividend.is(ToRegister(instr->result())));
1135 // If the divisor is positive, things are easy: There can be no deopts and we
1136 // can simply do an arithmetic right shift.
1137 if (divisor == 1) return;
1138 int32_t shift = WhichPowerOf2Abs(divisor);
1140 __ sarl(dividend, Immediate(shift));
1144 // If the divisor is negative, we have to negate and handle edge cases.
1146 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1147 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1150 // Dividing by -1 is basically negation, unless we overflow.
1151 if (divisor == -1) {
1152 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1153 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1158 // If the negation could not overflow, simply shifting is OK.
1159 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1160 __ sarl(dividend, Immediate(shift));
1164 Label not_kmin_int, done;
1165 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1166 __ movl(dividend, Immediate(kMinInt / divisor));
1167 __ jmp(&done, Label::kNear);
1168 __ bind(¬_kmin_int);
1169 __ sarl(dividend, Immediate(shift));
1174 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1175 Register dividend = ToRegister(instr->dividend());
1176 int32_t divisor = instr->divisor();
1177 DCHECK(ToRegister(instr->result()).is(rdx));
1180 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1184 // Check for (0 / -x) that will produce negative zero.
1185 HMathFloorOfDiv* hdiv = instr->hydrogen();
1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1187 __ testl(dividend, dividend);
1188 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1191 // Easy case: We need no dynamic check for the dividend and the flooring
1192 // division is the same as the truncating division.
1193 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1194 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1195 __ TruncatingDiv(dividend, Abs(divisor));
1196 if (divisor < 0) __ negl(rdx);
1200 // In the general case we may need to adjust before and after the truncating
1201 // division to get a flooring division.
1202 Register temp = ToRegister(instr->temp3());
1203 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1204 Label needs_adjustment, done;
1205 __ cmpl(dividend, Immediate(0));
1206 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1207 __ TruncatingDiv(dividend, Abs(divisor));
1208 if (divisor < 0) __ negl(rdx);
1209 __ jmp(&done, Label::kNear);
1210 __ bind(&needs_adjustment);
1211 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1212 __ TruncatingDiv(temp, Abs(divisor));
1213 if (divisor < 0) __ negl(rdx);
1219 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1220 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1221 HBinaryOperation* hdiv = instr->hydrogen();
1222 Register dividend = ToRegister(instr->dividend());
1223 Register divisor = ToRegister(instr->divisor());
1224 Register remainder = ToRegister(instr->temp());
1225 Register result = ToRegister(instr->result());
1226 DCHECK(dividend.is(rax));
1227 DCHECK(remainder.is(rdx));
1228 DCHECK(result.is(rax));
1229 DCHECK(!divisor.is(rax));
1230 DCHECK(!divisor.is(rdx));
1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234 __ testl(divisor, divisor);
1235 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1238 // Check for (0 / -x) that will produce negative zero.
1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1240 Label dividend_not_zero;
1241 __ testl(dividend, dividend);
1242 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1243 __ testl(divisor, divisor);
1244 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1245 __ bind(÷nd_not_zero);
1248 // Check for (kMinInt / -1).
1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1250 Label dividend_not_min_int;
1251 __ cmpl(dividend, Immediate(kMinInt));
1252 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1253 __ cmpl(divisor, Immediate(-1));
1254 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1255 __ bind(÷nd_not_min_int);
1258 // Sign extend to rdx (= remainder).
1263 __ testl(remainder, remainder);
1264 __ j(zero, &done, Label::kNear);
1265 __ xorl(remainder, divisor);
1266 __ sarl(remainder, Immediate(31));
1267 __ addl(result, remainder);
1272 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1273 Register dividend = ToRegister(instr->dividend());
1274 int32_t divisor = instr->divisor();
1275 Register result = ToRegister(instr->result());
1276 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1277 DCHECK(!result.is(dividend));
1279 // Check for (0 / -x) that will produce negative zero.
1280 HDiv* hdiv = instr->hydrogen();
1281 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1282 __ testl(dividend, dividend);
1283 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1285 // Check for (kMinInt / -1).
1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1287 __ cmpl(dividend, Immediate(kMinInt));
1288 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1290 // Deoptimize if remainder will not be 0.
1291 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1292 divisor != 1 && divisor != -1) {
1293 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1294 __ testl(dividend, Immediate(mask));
1295 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1297 __ Move(result, dividend);
1298 int32_t shift = WhichPowerOf2Abs(divisor);
1300 // The arithmetic shift is always OK, the 'if' is an optimization only.
1301 if (shift > 1) __ sarl(result, Immediate(31));
1302 __ shrl(result, Immediate(32 - shift));
1303 __ addl(result, dividend);
1304 __ sarl(result, Immediate(shift));
1306 if (divisor < 0) __ negl(result);
1310 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1311 Register dividend = ToRegister(instr->dividend());
1312 int32_t divisor = instr->divisor();
1313 DCHECK(ToRegister(instr->result()).is(rdx));
1316 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1320 // Check for (0 / -x) that will produce negative zero.
1321 HDiv* hdiv = instr->hydrogen();
1322 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1323 __ testl(dividend, dividend);
1324 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1327 __ TruncatingDiv(dividend, Abs(divisor));
1328 if (divisor < 0) __ negl(rdx);
1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1332 __ imull(rax, rax, Immediate(divisor));
1333 __ subl(rax, dividend);
1334 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1339 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1340 void LCodeGen::DoDivI(LDivI* instr) {
1341 HBinaryOperation* hdiv = instr->hydrogen();
1342 Register dividend = ToRegister(instr->dividend());
1343 Register divisor = ToRegister(instr->divisor());
1344 Register remainder = ToRegister(instr->temp());
1345 DCHECK(dividend.is(rax));
1346 DCHECK(remainder.is(rdx));
1347 DCHECK(ToRegister(instr->result()).is(rax));
1348 DCHECK(!divisor.is(rax));
1349 DCHECK(!divisor.is(rdx));
1352 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1353 __ testl(divisor, divisor);
1354 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1357 // Check for (0 / -x) that will produce negative zero.
1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1359 Label dividend_not_zero;
1360 __ testl(dividend, dividend);
1361 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1362 __ testl(divisor, divisor);
1363 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1364 __ bind(÷nd_not_zero);
1367 // Check for (kMinInt / -1).
1368 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1369 Label dividend_not_min_int;
1370 __ cmpl(dividend, Immediate(kMinInt));
1371 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1372 __ cmpl(divisor, Immediate(-1));
1373 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1374 __ bind(÷nd_not_min_int);
1377 // Sign extend to rdx (= remainder).
1381 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1382 // Deoptimize if remainder is not 0.
1383 __ testl(remainder, remainder);
1384 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1389 void LCodeGen::DoMulI(LMulI* instr) {
1390 Register left = ToRegister(instr->left());
1391 LOperand* right = instr->right();
1393 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1394 if (instr->hydrogen_value()->representation().IsSmi()) {
1395 __ movp(kScratchRegister, left);
1397 __ movl(kScratchRegister, left);
1402 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1403 if (right->IsConstantOperand()) {
1404 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1405 if (right_value == -1) {
1407 } else if (right_value == 0) {
1408 __ xorl(left, left);
1409 } else if (right_value == 2) {
1410 __ addl(left, left);
1411 } else if (!can_overflow) {
1412 // If the multiplication is known to not overflow, we
1413 // can use operations that don't set the overflow flag
1415 switch (right_value) {
1420 __ leal(left, Operand(left, left, times_2, 0));
1423 __ shll(left, Immediate(2));
1426 __ leal(left, Operand(left, left, times_4, 0));
1429 __ shll(left, Immediate(3));
1432 __ leal(left, Operand(left, left, times_8, 0));
1435 __ shll(left, Immediate(4));
1438 __ imull(left, left, Immediate(right_value));
1442 __ imull(left, left, Immediate(right_value));
1444 } else if (right->IsStackSlot()) {
1445 if (instr->hydrogen_value()->representation().IsSmi()) {
1446 __ SmiToInteger64(left, left);
1447 __ imulp(left, ToOperand(right));
1449 __ imull(left, ToOperand(right));
1452 if (instr->hydrogen_value()->representation().IsSmi()) {
1453 __ SmiToInteger64(left, left);
1454 __ imulp(left, ToRegister(right));
1456 __ imull(left, ToRegister(right));
1461 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1465 // Bail out if the result is supposed to be negative zero.
1467 if (instr->hydrogen_value()->representation().IsSmi()) {
1468 __ testp(left, left);
1470 __ testl(left, left);
1472 __ j(not_zero, &done, Label::kNear);
1473 if (right->IsConstantOperand()) {
1474 // Constant can't be represented as 32-bit Smi due to immediate size
1476 DCHECK(SmiValuesAre32Bits()
1477 ? !instr->hydrogen_value()->representation().IsSmi()
1478 : SmiValuesAre31Bits());
1479 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1480 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1481 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1482 __ cmpl(kScratchRegister, Immediate(0));
1483 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1485 } else if (right->IsStackSlot()) {
1486 if (instr->hydrogen_value()->representation().IsSmi()) {
1487 __ orp(kScratchRegister, ToOperand(right));
1489 __ orl(kScratchRegister, ToOperand(right));
1491 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1493 // Test the non-zero operand for negative sign.
1494 if (instr->hydrogen_value()->representation().IsSmi()) {
1495 __ orp(kScratchRegister, ToRegister(right));
1497 __ orl(kScratchRegister, ToRegister(right));
1499 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1506 void LCodeGen::DoBitI(LBitI* instr) {
1507 LOperand* left = instr->left();
1508 LOperand* right = instr->right();
1509 DCHECK(left->Equals(instr->result()));
1510 DCHECK(left->IsRegister());
1512 if (right->IsConstantOperand()) {
1513 int32_t right_operand =
1514 ToRepresentation(LConstantOperand::cast(right),
1515 instr->hydrogen()->right()->representation());
1516 switch (instr->op()) {
1517 case Token::BIT_AND:
1518 __ andl(ToRegister(left), Immediate(right_operand));
1521 __ orl(ToRegister(left), Immediate(right_operand));
1523 case Token::BIT_XOR:
1524 if (right_operand == int32_t(~0)) {
1525 __ notl(ToRegister(left));
1527 __ xorl(ToRegister(left), Immediate(right_operand));
1534 } else if (right->IsStackSlot()) {
1535 switch (instr->op()) {
1536 case Token::BIT_AND:
1537 if (instr->IsInteger32()) {
1538 __ andl(ToRegister(left), ToOperand(right));
1540 __ andp(ToRegister(left), ToOperand(right));
1544 if (instr->IsInteger32()) {
1545 __ orl(ToRegister(left), ToOperand(right));
1547 __ orp(ToRegister(left), ToOperand(right));
1550 case Token::BIT_XOR:
1551 if (instr->IsInteger32()) {
1552 __ xorl(ToRegister(left), ToOperand(right));
1554 __ xorp(ToRegister(left), ToOperand(right));
1562 DCHECK(right->IsRegister());
1563 switch (instr->op()) {
1564 case Token::BIT_AND:
1565 if (instr->IsInteger32()) {
1566 __ andl(ToRegister(left), ToRegister(right));
1568 __ andp(ToRegister(left), ToRegister(right));
1572 if (instr->IsInteger32()) {
1573 __ orl(ToRegister(left), ToRegister(right));
1575 __ orp(ToRegister(left), ToRegister(right));
1578 case Token::BIT_XOR:
1579 if (instr->IsInteger32()) {
1580 __ xorl(ToRegister(left), ToRegister(right));
1582 __ xorp(ToRegister(left), ToRegister(right));
1593 void LCodeGen::DoShiftI(LShiftI* instr) {
1594 LOperand* left = instr->left();
1595 LOperand* right = instr->right();
1596 DCHECK(left->Equals(instr->result()));
1597 DCHECK(left->IsRegister());
1598 if (right->IsRegister()) {
1599 DCHECK(ToRegister(right).is(rcx));
1601 switch (instr->op()) {
1603 __ rorl_cl(ToRegister(left));
1606 __ sarl_cl(ToRegister(left));
1609 __ shrl_cl(ToRegister(left));
1610 if (instr->can_deopt()) {
1611 __ testl(ToRegister(left), ToRegister(left));
1612 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1616 __ shll_cl(ToRegister(left));
1623 int32_t value = ToInteger32(LConstantOperand::cast(right));
1624 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1625 switch (instr->op()) {
1627 if (shift_count != 0) {
1628 __ rorl(ToRegister(left), Immediate(shift_count));
1632 if (shift_count != 0) {
1633 __ sarl(ToRegister(left), Immediate(shift_count));
1637 if (shift_count != 0) {
1638 __ shrl(ToRegister(left), Immediate(shift_count));
1639 } else if (instr->can_deopt()) {
1640 __ testl(ToRegister(left), ToRegister(left));
1641 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
1645 if (shift_count != 0) {
1646 if (instr->hydrogen_value()->representation().IsSmi()) {
1647 if (SmiValuesAre32Bits()) {
1648 __ shlp(ToRegister(left), Immediate(shift_count));
1650 DCHECK(SmiValuesAre31Bits());
1651 if (instr->can_deopt()) {
1652 if (shift_count != 1) {
1653 __ shll(ToRegister(left), Immediate(shift_count - 1));
1655 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1656 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1658 __ shll(ToRegister(left), Immediate(shift_count));
1662 __ shll(ToRegister(left), Immediate(shift_count));
1674 void LCodeGen::DoSubI(LSubI* instr) {
1675 LOperand* left = instr->left();
1676 LOperand* right = instr->right();
1677 DCHECK(left->Equals(instr->result()));
1679 if (right->IsConstantOperand()) {
1680 int32_t right_operand =
1681 ToRepresentation(LConstantOperand::cast(right),
1682 instr->hydrogen()->right()->representation());
1683 __ subl(ToRegister(left), Immediate(right_operand));
1684 } else if (right->IsRegister()) {
1685 if (instr->hydrogen_value()->representation().IsSmi()) {
1686 __ subp(ToRegister(left), ToRegister(right));
1688 __ subl(ToRegister(left), ToRegister(right));
1691 if (instr->hydrogen_value()->representation().IsSmi()) {
1692 __ subp(ToRegister(left), ToOperand(right));
1694 __ subl(ToRegister(left), ToOperand(right));
1698 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1699 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1704 void LCodeGen::DoConstantI(LConstantI* instr) {
1705 Register dst = ToRegister(instr->result());
1706 if (instr->value() == 0) {
1709 __ movl(dst, Immediate(instr->value()));
1714 void LCodeGen::DoConstantS(LConstantS* instr) {
1715 __ Move(ToRegister(instr->result()), instr->value());
1719 void LCodeGen::DoConstantD(LConstantD* instr) {
1720 __ Move(ToDoubleRegister(instr->result()), instr->bits());
1724 void LCodeGen::DoConstantE(LConstantE* instr) {
1725 __ LoadAddress(ToRegister(instr->result()), instr->value());
1729 void LCodeGen::DoConstantT(LConstantT* instr) {
1730 Handle<Object> object = instr->value(isolate());
1731 AllowDeferredHandleDereference smi_check;
1732 __ Move(ToRegister(instr->result()), object);
1736 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1737 Register result = ToRegister(instr->result());
1738 Register map = ToRegister(instr->value());
1739 __ EnumLength(result, map);
1743 void LCodeGen::DoDateField(LDateField* instr) {
1744 Register object = ToRegister(instr->date());
1745 Register result = ToRegister(instr->result());
1746 Smi* index = instr->index();
1747 Label runtime, done, not_date_object;
1748 DCHECK(object.is(result));
1749 DCHECK(object.is(rax));
1751 Condition cc = masm()->CheckSmi(object);
1752 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
1753 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1754 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
1756 if (index->value() == 0) {
1757 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1759 if (index->value() < JSDate::kFirstUncachedField) {
1760 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1761 Operand stamp_operand = __ ExternalOperand(stamp);
1762 __ movp(kScratchRegister, stamp_operand);
1763 __ cmpp(kScratchRegister, FieldOperand(object,
1764 JSDate::kCacheStampOffset));
1765 __ j(not_equal, &runtime, Label::kNear);
1766 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1767 kPointerSize * index->value()));
1768 __ jmp(&done, Label::kNear);
1771 __ PrepareCallCFunction(2);
1772 __ movp(arg_reg_1, object);
1773 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1774 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1780 Operand LCodeGen::BuildSeqStringOperand(Register string,
1782 String::Encoding encoding) {
1783 if (index->IsConstantOperand()) {
1784 int offset = ToInteger32(LConstantOperand::cast(index));
1785 if (encoding == String::TWO_BYTE_ENCODING) {
1786 offset *= kUC16Size;
1788 STATIC_ASSERT(kCharSize == 1);
1789 return FieldOperand(string, SeqString::kHeaderSize + offset);
1791 return FieldOperand(
1792 string, ToRegister(index),
1793 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1794 SeqString::kHeaderSize);
1798 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1799 String::Encoding encoding = instr->hydrogen()->encoding();
1800 Register result = ToRegister(instr->result());
1801 Register string = ToRegister(instr->string());
1803 if (FLAG_debug_code) {
1805 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1806 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1808 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1809 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1810 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1811 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1812 ? one_byte_seq_type : two_byte_seq_type));
1813 __ Check(equal, kUnexpectedStringType);
1817 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1818 if (encoding == String::ONE_BYTE_ENCODING) {
1819 __ movzxbl(result, operand);
1821 __ movzxwl(result, operand);
1826 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1827 String::Encoding encoding = instr->hydrogen()->encoding();
1828 Register string = ToRegister(instr->string());
1830 if (FLAG_debug_code) {
1831 Register value = ToRegister(instr->value());
1832 Register index = ToRegister(instr->index());
1833 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1834 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1836 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1837 ? one_byte_seq_type : two_byte_seq_type;
1838 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1841 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1842 if (instr->value()->IsConstantOperand()) {
1843 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1844 DCHECK_LE(0, value);
1845 if (encoding == String::ONE_BYTE_ENCODING) {
1846 DCHECK_LE(value, String::kMaxOneByteCharCode);
1847 __ movb(operand, Immediate(value));
1849 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1850 __ movw(operand, Immediate(value));
1853 Register value = ToRegister(instr->value());
1854 if (encoding == String::ONE_BYTE_ENCODING) {
1855 __ movb(operand, value);
1857 __ movw(operand, value);
1863 void LCodeGen::DoAddI(LAddI* instr) {
1864 LOperand* left = instr->left();
1865 LOperand* right = instr->right();
1867 Representation target_rep = instr->hydrogen()->representation();
1868 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1870 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1871 if (right->IsConstantOperand()) {
1872 // No support for smi-immediates for 32-bit SMI.
1873 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1875 ToRepresentation(LConstantOperand::cast(right),
1876 instr->hydrogen()->right()->representation());
1878 __ leap(ToRegister(instr->result()),
1879 MemOperand(ToRegister(left), offset));
1881 __ leal(ToRegister(instr->result()),
1882 MemOperand(ToRegister(left), offset));
1885 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1887 __ leap(ToRegister(instr->result()), address);
1889 __ leal(ToRegister(instr->result()), address);
1893 if (right->IsConstantOperand()) {
1894 // No support for smi-immediates for 32-bit SMI.
1895 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1896 int32_t right_operand =
1897 ToRepresentation(LConstantOperand::cast(right),
1898 instr->hydrogen()->right()->representation());
1900 __ addp(ToRegister(left), Immediate(right_operand));
1902 __ addl(ToRegister(left), Immediate(right_operand));
1904 } else if (right->IsRegister()) {
1906 __ addp(ToRegister(left), ToRegister(right));
1908 __ addl(ToRegister(left), ToRegister(right));
1912 __ addp(ToRegister(left), ToOperand(right));
1914 __ addl(ToRegister(left), ToOperand(right));
1917 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1918 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1924 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1925 LOperand* left = instr->left();
1926 LOperand* right = instr->right();
1927 DCHECK(left->Equals(instr->result()));
1928 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1929 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1931 Condition condition = (operation == HMathMinMax::kMathMin)
1934 Register left_reg = ToRegister(left);
1935 if (right->IsConstantOperand()) {
1936 Immediate right_imm = Immediate(
1937 ToRepresentation(LConstantOperand::cast(right),
1938 instr->hydrogen()->right()->representation()));
1939 DCHECK(SmiValuesAre32Bits()
1940 ? !instr->hydrogen()->representation().IsSmi()
1941 : SmiValuesAre31Bits());
1942 __ cmpl(left_reg, right_imm);
1943 __ j(condition, &return_left, Label::kNear);
1944 __ movp(left_reg, right_imm);
1945 } else if (right->IsRegister()) {
1946 Register right_reg = ToRegister(right);
1947 if (instr->hydrogen_value()->representation().IsSmi()) {
1948 __ cmpp(left_reg, right_reg);
1950 __ cmpl(left_reg, right_reg);
1952 __ j(condition, &return_left, Label::kNear);
1953 __ movp(left_reg, right_reg);
1955 Operand right_op = ToOperand(right);
1956 if (instr->hydrogen_value()->representation().IsSmi()) {
1957 __ cmpp(left_reg, right_op);
1959 __ cmpl(left_reg, right_op);
1961 __ j(condition, &return_left, Label::kNear);
1962 __ movp(left_reg, right_op);
1964 __ bind(&return_left);
1966 DCHECK(instr->hydrogen()->representation().IsDouble());
1967 Label check_nan_left, check_zero, return_left, return_right;
1968 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1969 XMMRegister left_reg = ToDoubleRegister(left);
1970 XMMRegister right_reg = ToDoubleRegister(right);
1971 __ ucomisd(left_reg, right_reg);
1972 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1973 __ j(equal, &check_zero, Label::kNear); // left == right.
1974 __ j(condition, &return_left, Label::kNear);
1975 __ jmp(&return_right, Label::kNear);
1977 __ bind(&check_zero);
1978 XMMRegister xmm_scratch = double_scratch0();
1979 __ xorps(xmm_scratch, xmm_scratch);
1980 __ ucomisd(left_reg, xmm_scratch);
1981 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1982 // At this point, both left and right are either 0 or -0.
1983 if (operation == HMathMinMax::kMathMin) {
1984 __ orps(left_reg, right_reg);
1986 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1987 __ addsd(left_reg, right_reg);
1989 __ jmp(&return_left, Label::kNear);
1991 __ bind(&check_nan_left);
1992 __ ucomisd(left_reg, left_reg); // NaN check.
1993 __ j(parity_even, &return_left, Label::kNear);
1994 __ bind(&return_right);
1995 __ movaps(left_reg, right_reg);
1997 __ bind(&return_left);
2002 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2003 XMMRegister left = ToDoubleRegister(instr->left());
2004 XMMRegister right = ToDoubleRegister(instr->right());
2005 XMMRegister result = ToDoubleRegister(instr->result());
2006 switch (instr->op()) {
2008 if (CpuFeatures::IsSupported(AVX)) {
2009 CpuFeatureScope scope(masm(), AVX);
2010 __ vaddsd(result, left, right);
2012 DCHECK(result.is(left));
2013 __ addsd(left, right);
2017 if (CpuFeatures::IsSupported(AVX)) {
2018 CpuFeatureScope scope(masm(), AVX);
2019 __ vsubsd(result, left, right);
2021 DCHECK(result.is(left));
2022 __ subsd(left, right);
2026 if (CpuFeatures::IsSupported(AVX)) {
2027 CpuFeatureScope scope(masm(), AVX);
2028 __ vmulsd(result, left, right);
2030 DCHECK(result.is(left));
2031 __ mulsd(left, right);
2035 if (CpuFeatures::IsSupported(AVX)) {
2036 CpuFeatureScope scope(masm(), AVX);
2037 __ vdivsd(result, left, right);
2039 DCHECK(result.is(left));
2040 __ divsd(left, right);
2041 // Don't delete this mov. It may improve performance on some CPUs,
2042 // when there is a mulsd depending on the result
2043 __ movaps(left, left);
2047 XMMRegister xmm_scratch = double_scratch0();
2048 __ PrepareCallCFunction(2);
2049 __ movaps(xmm_scratch, left);
2050 DCHECK(right.is(xmm1));
2052 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2053 __ movaps(result, xmm_scratch);
2063 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2064 DCHECK(ToRegister(instr->context()).is(rsi));
2065 DCHECK(ToRegister(instr->left()).is(rdx));
2066 DCHECK(ToRegister(instr->right()).is(rax));
2067 DCHECK(ToRegister(instr->result()).is(rax));
2069 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2070 CallCode(code, RelocInfo::CODE_TARGET, instr);
2074 template<class InstrType>
2075 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2076 int left_block = instr->TrueDestination(chunk_);
2077 int right_block = instr->FalseDestination(chunk_);
2079 int next_block = GetNextEmittedBlock();
2081 if (right_block == left_block || cc == no_condition) {
2082 EmitGoto(left_block);
2083 } else if (left_block == next_block) {
2084 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2085 } else if (right_block == next_block) {
2086 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2088 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2090 __ jmp(chunk_->GetAssemblyLabel(right_block));
2096 template<class InstrType>
2097 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2098 int false_block = instr->FalseDestination(chunk_);
2099 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2103 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2108 void LCodeGen::DoBranch(LBranch* instr) {
2109 Representation r = instr->hydrogen()->value()->representation();
2110 if (r.IsInteger32()) {
2111 DCHECK(!info()->IsStub());
2112 Register reg = ToRegister(instr->value());
2114 EmitBranch(instr, not_zero);
2115 } else if (r.IsSmi()) {
2116 DCHECK(!info()->IsStub());
2117 Register reg = ToRegister(instr->value());
2119 EmitBranch(instr, not_zero);
2120 } else if (r.IsDouble()) {
2121 DCHECK(!info()->IsStub());
2122 XMMRegister reg = ToDoubleRegister(instr->value());
2123 XMMRegister xmm_scratch = double_scratch0();
2124 __ xorps(xmm_scratch, xmm_scratch);
2125 __ ucomisd(reg, xmm_scratch);
2126 EmitBranch(instr, not_equal);
2128 DCHECK(r.IsTagged());
2129 Register reg = ToRegister(instr->value());
2130 HType type = instr->hydrogen()->value()->type();
2131 if (type.IsBoolean()) {
2132 DCHECK(!info()->IsStub());
2133 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2134 EmitBranch(instr, equal);
2135 } else if (type.IsSmi()) {
2136 DCHECK(!info()->IsStub());
2137 __ SmiCompare(reg, Smi::FromInt(0));
2138 EmitBranch(instr, not_equal);
2139 } else if (type.IsJSArray()) {
2140 DCHECK(!info()->IsStub());
2141 EmitBranch(instr, no_condition);
2142 } else if (type.IsHeapNumber()) {
2143 DCHECK(!info()->IsStub());
2144 XMMRegister xmm_scratch = double_scratch0();
2145 __ xorps(xmm_scratch, xmm_scratch);
2146 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2147 EmitBranch(instr, not_equal);
2148 } else if (type.IsString()) {
2149 DCHECK(!info()->IsStub());
2150 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2151 EmitBranch(instr, not_equal);
2153 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2154 // Avoid deopts in the case where we've never executed this path before.
2155 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2157 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2158 // undefined -> false.
2159 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2160 __ j(equal, instr->FalseLabel(chunk_));
2162 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2164 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2165 __ j(equal, instr->TrueLabel(chunk_));
2167 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2168 __ j(equal, instr->FalseLabel(chunk_));
2170 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2172 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2173 __ j(equal, instr->FalseLabel(chunk_));
2176 if (expected.Contains(ToBooleanStub::SMI)) {
2177 // Smis: 0 -> false, all other -> true.
2178 __ Cmp(reg, Smi::FromInt(0));
2179 __ j(equal, instr->FalseLabel(chunk_));
2180 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2181 } else if (expected.NeedsMap()) {
2182 // If we need a map later and have a Smi -> deopt.
2183 __ testb(reg, Immediate(kSmiTagMask));
2184 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2187 const Register map = kScratchRegister;
2188 if (expected.NeedsMap()) {
2189 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2191 if (expected.CanBeUndetectable()) {
2192 // Undetectable -> false.
2193 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2194 Immediate(1 << Map::kIsUndetectable));
2195 __ j(not_zero, instr->FalseLabel(chunk_));
2199 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2200 // spec object -> true.
2201 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2202 __ j(above_equal, instr->TrueLabel(chunk_));
2205 if (expected.Contains(ToBooleanStub::STRING)) {
2206 // String value -> false iff empty.
2208 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2209 __ j(above_equal, ¬_string, Label::kNear);
2210 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2211 __ j(not_zero, instr->TrueLabel(chunk_));
2212 __ jmp(instr->FalseLabel(chunk_));
2213 __ bind(¬_string);
2216 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2217 // Symbol value -> true.
2218 __ CmpInstanceType(map, SYMBOL_TYPE);
2219 __ j(equal, instr->TrueLabel(chunk_));
2222 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2223 // heap number -> false iff +0, -0, or NaN.
2224 Label not_heap_number;
2225 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2226 __ j(not_equal, ¬_heap_number, Label::kNear);
2227 XMMRegister xmm_scratch = double_scratch0();
2228 __ xorps(xmm_scratch, xmm_scratch);
2229 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2230 __ j(zero, instr->FalseLabel(chunk_));
2231 __ jmp(instr->TrueLabel(chunk_));
2232 __ bind(¬_heap_number);
2235 if (!expected.IsGeneric()) {
2236 // We've seen something for the first time -> deopt.
2237 // This can only happen if we are not generic already.
2238 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2245 void LCodeGen::EmitGoto(int block) {
2246 if (!IsNextEmittedBlock(block)) {
2247 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2252 void LCodeGen::DoGoto(LGoto* instr) {
2253 EmitGoto(instr->block_id());
2257 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2258 Condition cond = no_condition;
2261 case Token::EQ_STRICT:
2265 case Token::NE_STRICT:
2269 cond = is_unsigned ? below : less;
2272 cond = is_unsigned ? above : greater;
2275 cond = is_unsigned ? below_equal : less_equal;
2278 cond = is_unsigned ? above_equal : greater_equal;
2281 case Token::INSTANCEOF:
2289 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2290 LOperand* left = instr->left();
2291 LOperand* right = instr->right();
2293 instr->is_double() ||
2294 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2295 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2296 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2298 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2299 // We can statically evaluate the comparison.
2300 double left_val = ToDouble(LConstantOperand::cast(left));
2301 double right_val = ToDouble(LConstantOperand::cast(right));
2302 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2303 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2304 EmitGoto(next_block);
2306 if (instr->is_double()) {
2307 // Don't base result on EFLAGS when a NaN is involved. Instead
2308 // jump to the false block.
2309 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2310 __ j(parity_even, instr->FalseLabel(chunk_));
2313 if (right->IsConstantOperand()) {
2314 value = ToInteger32(LConstantOperand::cast(right));
2315 if (instr->hydrogen_value()->representation().IsSmi()) {
2316 __ Cmp(ToRegister(left), Smi::FromInt(value));
2318 __ cmpl(ToRegister(left), Immediate(value));
2320 } else if (left->IsConstantOperand()) {
2321 value = ToInteger32(LConstantOperand::cast(left));
2322 if (instr->hydrogen_value()->representation().IsSmi()) {
2323 if (right->IsRegister()) {
2324 __ Cmp(ToRegister(right), Smi::FromInt(value));
2326 __ Cmp(ToOperand(right), Smi::FromInt(value));
2328 } else if (right->IsRegister()) {
2329 __ cmpl(ToRegister(right), Immediate(value));
2331 __ cmpl(ToOperand(right), Immediate(value));
2333 // We commuted the operands, so commute the condition.
2334 cc = CommuteCondition(cc);
2335 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2336 if (right->IsRegister()) {
2337 __ cmpp(ToRegister(left), ToRegister(right));
2339 __ cmpp(ToRegister(left), ToOperand(right));
2342 if (right->IsRegister()) {
2343 __ cmpl(ToRegister(left), ToRegister(right));
2345 __ cmpl(ToRegister(left), ToOperand(right));
2349 EmitBranch(instr, cc);
2354 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2355 Register left = ToRegister(instr->left());
2357 if (instr->right()->IsConstantOperand()) {
2358 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2359 __ Cmp(left, right);
2361 Register right = ToRegister(instr->right());
2362 __ cmpp(left, right);
2364 EmitBranch(instr, equal);
2368 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2369 if (instr->hydrogen()->representation().IsTagged()) {
2370 Register input_reg = ToRegister(instr->object());
2371 __ Cmp(input_reg, factory()->the_hole_value());
2372 EmitBranch(instr, equal);
2376 XMMRegister input_reg = ToDoubleRegister(instr->object());
2377 __ ucomisd(input_reg, input_reg);
2378 EmitFalseBranch(instr, parity_odd);
2380 __ subp(rsp, Immediate(kDoubleSize));
2381 __ movsd(MemOperand(rsp, 0), input_reg);
2382 __ addp(rsp, Immediate(kDoubleSize));
2384 int offset = sizeof(kHoleNanUpper32);
2385 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2386 EmitBranch(instr, equal);
2390 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2391 Representation rep = instr->hydrogen()->value()->representation();
2392 DCHECK(!rep.IsInteger32());
2394 if (rep.IsDouble()) {
2395 XMMRegister value = ToDoubleRegister(instr->value());
2396 XMMRegister xmm_scratch = double_scratch0();
2397 __ xorps(xmm_scratch, xmm_scratch);
2398 __ ucomisd(xmm_scratch, value);
2399 EmitFalseBranch(instr, not_equal);
2400 __ movmskpd(kScratchRegister, value);
2401 __ testl(kScratchRegister, Immediate(1));
2402 EmitBranch(instr, not_zero);
2404 Register value = ToRegister(instr->value());
2405 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2406 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2407 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2409 EmitFalseBranch(instr, no_overflow);
2410 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2411 Immediate(0x00000000));
2412 EmitBranch(instr, equal);
2417 Condition LCodeGen::EmitIsObject(Register input,
2418 Label* is_not_object,
2420 DCHECK(!input.is(kScratchRegister));
2422 __ JumpIfSmi(input, is_not_object);
2424 __ CompareRoot(input, Heap::kNullValueRootIndex);
2425 __ j(equal, is_object);
2427 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2428 // Undetectable objects behave like undefined.
2429 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2430 Immediate(1 << Map::kIsUndetectable));
2431 __ j(not_zero, is_not_object);
2433 __ movzxbl(kScratchRegister,
2434 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2435 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2436 __ j(below, is_not_object);
2437 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2442 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2443 Register reg = ToRegister(instr->value());
2445 Condition true_cond = EmitIsObject(
2446 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2448 EmitBranch(instr, true_cond);
2452 Condition LCodeGen::EmitIsString(Register input,
2454 Label* is_not_string,
2455 SmiCheck check_needed = INLINE_SMI_CHECK) {
2456 if (check_needed == INLINE_SMI_CHECK) {
2457 __ JumpIfSmi(input, is_not_string);
2460 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2466 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2467 Register reg = ToRegister(instr->value());
2468 Register temp = ToRegister(instr->temp());
2470 SmiCheck check_needed =
2471 instr->hydrogen()->value()->type().IsHeapObject()
2472 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2474 Condition true_cond = EmitIsString(
2475 reg, temp, instr->FalseLabel(chunk_), check_needed);
2477 EmitBranch(instr, true_cond);
2481 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2483 if (instr->value()->IsRegister()) {
2484 Register input = ToRegister(instr->value());
2485 is_smi = masm()->CheckSmi(input);
2487 Operand input = ToOperand(instr->value());
2488 is_smi = masm()->CheckSmi(input);
2490 EmitBranch(instr, is_smi);
2494 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2495 Register input = ToRegister(instr->value());
2496 Register temp = ToRegister(instr->temp());
2498 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2499 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2501 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2502 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2503 Immediate(1 << Map::kIsUndetectable));
2504 EmitBranch(instr, not_zero);
2508 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2509 DCHECK(ToRegister(instr->context()).is(rsi));
2510 Token::Value op = instr->op();
2512 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2513 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2515 Condition condition = TokenToCondition(op, false);
2518 EmitBranch(instr, condition);
2522 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2523 InstanceType from = instr->from();
2524 InstanceType to = instr->to();
2525 if (from == FIRST_TYPE) return to;
2526 DCHECK(from == to || to == LAST_TYPE);
2531 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2532 InstanceType from = instr->from();
2533 InstanceType to = instr->to();
2534 if (from == to) return equal;
2535 if (to == LAST_TYPE) return above_equal;
2536 if (from == FIRST_TYPE) return below_equal;
2542 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2543 Register input = ToRegister(instr->value());
2545 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2546 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2549 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2550 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2554 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2555 Register input = ToRegister(instr->value());
2556 Register result = ToRegister(instr->result());
2558 __ AssertString(input);
2560 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2561 DCHECK(String::kHashShift >= kSmiTagSize);
2562 __ IndexFromHash(result, result);
2566 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2567 LHasCachedArrayIndexAndBranch* instr) {
2568 Register input = ToRegister(instr->value());
2570 __ testl(FieldOperand(input, String::kHashFieldOffset),
2571 Immediate(String::kContainsCachedArrayIndexMask));
2572 EmitBranch(instr, equal);
2576 // Branches to a label or falls through with the answer in the z flag.
2577 // Trashes the temp register.
2578 void LCodeGen::EmitClassOfTest(Label* is_true,
2580 Handle<String> class_name,
2584 DCHECK(!input.is(temp));
2585 DCHECK(!input.is(temp2));
2586 DCHECK(!temp.is(temp2));
2588 __ JumpIfSmi(input, is_false);
2590 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2591 // Assuming the following assertions, we can use the same compares to test
2592 // for both being a function type and being in the object type range.
2593 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2594 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2595 FIRST_SPEC_OBJECT_TYPE + 1);
2596 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2597 LAST_SPEC_OBJECT_TYPE - 1);
2598 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2599 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2600 __ j(below, is_false);
2601 __ j(equal, is_true);
2602 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2603 __ j(equal, is_true);
2605 // Faster code path to avoid two compares: subtract lower bound from the
2606 // actual type and do a signed compare with the width of the type range.
2607 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2608 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2609 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2610 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2611 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2612 __ j(above, is_false);
2615 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2616 // Check if the constructor in the map is a function.
2617 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2619 // Objects with a non-function constructor have class 'Object'.
2620 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2621 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2622 __ j(not_equal, is_true);
2624 __ j(not_equal, is_false);
2627 // temp now contains the constructor function. Grab the
2628 // instance class name from there.
2629 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2630 __ movp(temp, FieldOperand(temp,
2631 SharedFunctionInfo::kInstanceClassNameOffset));
2632 // The class name we are testing against is internalized since it's a literal.
2633 // The name in the constructor is internalized because of the way the context
2634 // is booted. This routine isn't expected to work for random API-created
2635 // classes and it doesn't have to because you can't access it with natives
2636 // syntax. Since both sides are internalized it is sufficient to use an
2637 // identity comparison.
2638 DCHECK(class_name->IsInternalizedString());
2639 __ Cmp(temp, class_name);
2640 // End with the answer in the z flag.
2644 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2645 Register input = ToRegister(instr->value());
2646 Register temp = ToRegister(instr->temp());
2647 Register temp2 = ToRegister(instr->temp2());
2648 Handle<String> class_name = instr->hydrogen()->class_name();
2650 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2651 class_name, input, temp, temp2);
2653 EmitBranch(instr, equal);
2657 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2658 Register reg = ToRegister(instr->value());
2660 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2661 EmitBranch(instr, equal);
2665 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2666 DCHECK(ToRegister(instr->context()).is(rsi));
2667 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2668 __ Push(ToRegister(instr->left()));
2669 __ Push(ToRegister(instr->right()));
2670 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2671 Label true_value, done;
2673 __ j(zero, &true_value, Label::kNear);
2674 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2675 __ jmp(&done, Label::kNear);
2676 __ bind(&true_value);
2677 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2682 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2683 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2685 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2686 LInstanceOfKnownGlobal* instr)
2687 : LDeferredCode(codegen), instr_(instr) { }
2688 void Generate() OVERRIDE {
2689 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2691 LInstruction* instr() OVERRIDE { return instr_; }
2692 Label* map_check() { return &map_check_; }
2694 LInstanceOfKnownGlobal* instr_;
2698 DCHECK(ToRegister(instr->context()).is(rsi));
2699 DeferredInstanceOfKnownGlobal* deferred;
2700 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2702 Label done, false_result;
2703 Register object = ToRegister(instr->value());
2705 // A Smi is not an instance of anything.
2706 __ JumpIfSmi(object, &false_result, Label::kNear);
2708 // This is the inlined call site instanceof cache. The two occurences of the
2709 // hole value will be patched to the last map/result pair generated by the
2712 // Use a temp register to avoid memory operands with variable lengths.
2713 Register map = ToRegister(instr->temp());
2714 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2715 __ bind(deferred->map_check()); // Label for calculating code patching.
2716 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2717 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2718 __ cmpp(map, Operand(kScratchRegister, 0));
2719 __ j(not_equal, &cache_miss, Label::kNear);
2720 // Patched to load either true or false.
2721 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2723 // Check that the code size between patch label and patch sites is invariant.
2724 Label end_of_patched_code;
2725 __ bind(&end_of_patched_code);
2728 __ jmp(&done, Label::kNear);
2730 // The inlined call site cache did not match. Check for null and string
2731 // before calling the deferred code.
2732 __ bind(&cache_miss); // Null is not an instance of anything.
2733 __ CompareRoot(object, Heap::kNullValueRootIndex);
2734 __ j(equal, &false_result, Label::kNear);
2736 // String values are not instances of anything.
2737 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2739 __ bind(&false_result);
2740 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2742 __ bind(deferred->exit());
2747 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2750 PushSafepointRegistersScope scope(this);
2751 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2752 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2753 InstanceofStub stub(isolate(), flags);
2755 __ Push(ToRegister(instr->value()));
2756 __ Push(instr->function());
2758 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2760 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2762 __ PushImm32(delta);
2764 // We are pushing three values on the stack but recording a
2765 // safepoint with two arguments because stub is going to
2766 // remove the third argument from the stack before jumping
2767 // to instanceof builtin on the slow path.
2768 CallCodeGeneric(stub.GetCode(),
2769 RelocInfo::CODE_TARGET,
2771 RECORD_SAFEPOINT_WITH_REGISTERS,
2773 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2774 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2775 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2776 // Move result to a register that survives the end of the
2777 // PushSafepointRegisterScope.
2778 __ movp(kScratchRegister, rax);
2780 __ testp(kScratchRegister, kScratchRegister);
2783 __ j(not_zero, &load_false, Label::kNear);
2784 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2785 __ jmp(&done, Label::kNear);
2786 __ bind(&load_false);
2787 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2792 void LCodeGen::DoCmpT(LCmpT* instr) {
2793 DCHECK(ToRegister(instr->context()).is(rsi));
2794 Token::Value op = instr->op();
2796 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2797 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2799 Condition condition = TokenToCondition(op, false);
2800 Label true_value, done;
2802 __ j(condition, &true_value, Label::kNear);
2803 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2804 __ jmp(&done, Label::kNear);
2805 __ bind(&true_value);
2806 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2811 void LCodeGen::DoReturn(LReturn* instr) {
2812 if (FLAG_trace && info()->IsOptimizing()) {
2813 // Preserve the return value on the stack and rely on the runtime call
2814 // to return the value in the same register. We're leaving the code
2815 // managed by the register allocator and tearing down the frame, it's
2816 // safe to write to the context register.
2818 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2819 __ CallRuntime(Runtime::kTraceExit, 1);
2821 if (info()->saves_caller_doubles()) {
2822 RestoreCallerDoubles();
2824 int no_frame_start = -1;
2825 if (NeedsEagerFrame()) {
2828 no_frame_start = masm_->pc_offset();
2830 if (instr->has_constant_parameter_count()) {
2831 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2834 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2835 Register reg = ToRegister(instr->parameter_count());
2836 // The argument count parameter is a smi
2837 __ SmiToInteger32(reg, reg);
2838 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2839 __ PopReturnAddressTo(return_addr_reg);
2840 __ shlp(reg, Immediate(kPointerSizeLog2));
2842 __ jmp(return_addr_reg);
2844 if (no_frame_start != -1) {
2845 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2850 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2851 Register result = ToRegister(instr->result());
2852 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2853 if (instr->hydrogen()->RequiresHoleCheck()) {
2854 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2855 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2861 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2862 DCHECK(FLAG_vector_ics);
2863 Register vector_register = ToRegister(instr->temp_vector());
2864 Register slot_register = VectorLoadICDescriptor::SlotRegister();
2865 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
2866 DCHECK(slot_register.is(rax));
2868 AllowDeferredHandleDereference vector_structure_check;
2869 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2870 __ Move(vector_register, vector);
2871 // No need to allocate this register.
2872 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2873 int index = vector->GetIndex(slot);
2874 __ Move(slot_register, Smi::FromInt(index));
2878 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2879 DCHECK(ToRegister(instr->context()).is(rsi));
2880 DCHECK(ToRegister(instr->global_object())
2881 .is(LoadDescriptor::ReceiverRegister()));
2882 DCHECK(ToRegister(instr->result()).is(rax));
2884 __ Move(LoadDescriptor::NameRegister(), instr->name());
2885 if (FLAG_vector_ics) {
2886 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2888 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2889 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
2890 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2894 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2895 Register value = ToRegister(instr->value());
2896 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2898 // If the cell we are storing to contains the hole it could have
2899 // been deleted from the property dictionary. In that case, we need
2900 // to update the property details in the property dictionary to mark
2901 // it as no longer deleted. We deoptimize in that case.
2902 if (instr->hydrogen()->RequiresHoleCheck()) {
2903 // We have a temp because CompareRoot might clobber kScratchRegister.
2904 Register cell = ToRegister(instr->temp());
2905 DCHECK(!value.is(cell));
2906 __ Move(cell, cell_handle, RelocInfo::CELL);
2907 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2908 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2910 __ movp(Operand(cell, 0), value);
2913 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2914 __ movp(Operand(kScratchRegister, 0), value);
2916 // Cells are always rescanned, so no write barrier here.
2920 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2921 Register context = ToRegister(instr->context());
2922 Register result = ToRegister(instr->result());
2923 __ movp(result, ContextOperand(context, instr->slot_index()));
2924 if (instr->hydrogen()->RequiresHoleCheck()) {
2925 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2926 if (instr->hydrogen()->DeoptimizesOnHole()) {
2927 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2930 __ j(not_equal, &is_not_hole, Label::kNear);
2931 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2932 __ bind(&is_not_hole);
2938 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2939 Register context = ToRegister(instr->context());
2940 Register value = ToRegister(instr->value());
2942 Operand target = ContextOperand(context, instr->slot_index());
2944 Label skip_assignment;
2945 if (instr->hydrogen()->RequiresHoleCheck()) {
2946 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2947 if (instr->hydrogen()->DeoptimizesOnHole()) {
2948 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2950 __ j(not_equal, &skip_assignment);
2953 __ movp(target, value);
2955 if (instr->hydrogen()->NeedsWriteBarrier()) {
2956 SmiCheck check_needed =
2957 instr->hydrogen()->value()->type().IsHeapObject()
2958 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2959 int offset = Context::SlotOffset(instr->slot_index());
2960 Register scratch = ToRegister(instr->temp());
2961 __ RecordWriteContextSlot(context,
2966 EMIT_REMEMBERED_SET,
2970 __ bind(&skip_assignment);
2974 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2975 HObjectAccess access = instr->hydrogen()->access();
2976 int offset = access.offset();
2978 if (access.IsExternalMemory()) {
2979 Register result = ToRegister(instr->result());
2980 if (instr->object()->IsConstantOperand()) {
2981 DCHECK(result.is(rax));
2982 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2984 Register object = ToRegister(instr->object());
2985 __ Load(result, MemOperand(object, offset), access.representation());
2990 Register object = ToRegister(instr->object());
2991 if (instr->hydrogen()->representation().IsDouble()) {
2992 DCHECK(access.IsInobject());
2993 XMMRegister result = ToDoubleRegister(instr->result());
2994 __ movsd(result, FieldOperand(object, offset));
2998 Register result = ToRegister(instr->result());
2999 if (!access.IsInobject()) {
3000 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
3004 Representation representation = access.representation();
3005 if (representation.IsSmi() && SmiValuesAre32Bits() &&
3006 instr->hydrogen()->representation().IsInteger32()) {
3007 if (FLAG_debug_code) {
3008 Register scratch = kScratchRegister;
3009 __ Load(scratch, FieldOperand(object, offset), representation);
3010 __ AssertSmi(scratch);
3013 // Read int value directly from upper half of the smi.
3014 STATIC_ASSERT(kSmiTag == 0);
3015 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3016 offset += kPointerSize / 2;
3017 representation = Representation::Integer32();
3019 __ Load(result, FieldOperand(object, offset), representation);
3023 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3024 DCHECK(ToRegister(instr->context()).is(rsi));
3025 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3026 DCHECK(ToRegister(instr->result()).is(rax));
3028 __ Move(LoadDescriptor::NameRegister(), instr->name());
3029 if (FLAG_vector_ics) {
3030 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3033 CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
3034 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3038 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3039 Register function = ToRegister(instr->function());
3040 Register result = ToRegister(instr->result());
3042 // Get the prototype or initial map from the function.
3044 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3046 // Check that the function has a prototype or an initial map.
3047 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3048 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3050 // If the function does not have an initial map, we're done.
3052 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3053 __ j(not_equal, &done, Label::kNear);
3055 // Get the prototype from the initial map.
3056 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3063 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3064 Register result = ToRegister(instr->result());
3065 __ LoadRoot(result, instr->index());
3069 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3070 Register arguments = ToRegister(instr->arguments());
3071 Register result = ToRegister(instr->result());
3073 if (instr->length()->IsConstantOperand() &&
3074 instr->index()->IsConstantOperand()) {
3075 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3076 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3077 if (const_index >= 0 && const_index < const_length) {
3078 StackArgumentsAccessor args(arguments, const_length,
3079 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3080 __ movp(result, args.GetArgumentOperand(const_index));
3081 } else if (FLAG_debug_code) {
3085 Register length = ToRegister(instr->length());
3086 // There are two words between the frame pointer and the last argument.
3087 // Subtracting from length accounts for one of them add one more.
3088 if (instr->index()->IsRegister()) {
3089 __ subl(length, ToRegister(instr->index()));
3091 __ subl(length, ToOperand(instr->index()));
3093 StackArgumentsAccessor args(arguments, length,
3094 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3095 __ movp(result, args.GetArgumentOperand(0));
3100 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3101 ElementsKind elements_kind = instr->elements_kind();
3102 LOperand* key = instr->key();
3103 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3104 Register key_reg = ToRegister(key);
3105 Representation key_representation =
3106 instr->hydrogen()->key()->representation();
3107 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3108 __ SmiToInteger64(key_reg, key_reg);
3109 } else if (instr->hydrogen()->IsDehoisted()) {
3110 // Sign extend key because it could be a 32 bit negative value
3111 // and the dehoisted address computation happens in 64 bits
3112 __ movsxlq(key_reg, key_reg);
3115 Operand operand(BuildFastArrayOperand(
3118 instr->hydrogen()->key()->representation(),
3120 instr->base_offset()));
3122 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3123 elements_kind == FLOAT32_ELEMENTS) {
3124 XMMRegister result(ToDoubleRegister(instr->result()));
3125 __ movss(result, operand);
3126 __ cvtss2sd(result, result);
3127 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3128 elements_kind == FLOAT64_ELEMENTS) {
3129 __ movsd(ToDoubleRegister(instr->result()), operand);
3131 Register result(ToRegister(instr->result()));
3132 switch (elements_kind) {
3133 case EXTERNAL_INT8_ELEMENTS:
3135 __ movsxbl(result, operand);
3137 case EXTERNAL_UINT8_ELEMENTS:
3138 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3139 case UINT8_ELEMENTS:
3140 case UINT8_CLAMPED_ELEMENTS:
3141 __ movzxbl(result, operand);
3143 case EXTERNAL_INT16_ELEMENTS:
3144 case INT16_ELEMENTS:
3145 __ movsxwl(result, operand);
3147 case EXTERNAL_UINT16_ELEMENTS:
3148 case UINT16_ELEMENTS:
3149 __ movzxwl(result, operand);
3151 case EXTERNAL_INT32_ELEMENTS:
3152 case INT32_ELEMENTS:
3153 __ movl(result, operand);
3155 case EXTERNAL_UINT32_ELEMENTS:
3156 case UINT32_ELEMENTS:
3157 __ movl(result, operand);
3158 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3159 __ testl(result, result);
3160 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
3163 case EXTERNAL_FLOAT32_ELEMENTS:
3164 case EXTERNAL_FLOAT64_ELEMENTS:
3165 case FLOAT32_ELEMENTS:
3166 case FLOAT64_ELEMENTS:
3168 case FAST_SMI_ELEMENTS:
3169 case FAST_DOUBLE_ELEMENTS:
3170 case FAST_HOLEY_ELEMENTS:
3171 case FAST_HOLEY_SMI_ELEMENTS:
3172 case FAST_HOLEY_DOUBLE_ELEMENTS:
3173 case DICTIONARY_ELEMENTS:
3174 case SLOPPY_ARGUMENTS_ELEMENTS:
3182 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3183 XMMRegister result(ToDoubleRegister(instr->result()));
3184 LOperand* key = instr->key();
3185 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3186 instr->hydrogen()->IsDehoisted()) {
3187 // Sign extend key because it could be a 32 bit negative value
3188 // and the dehoisted address computation happens in 64 bits
3189 __ movsxlq(ToRegister(key), ToRegister(key));
3191 if (instr->hydrogen()->RequiresHoleCheck()) {
3192 Operand hole_check_operand = BuildFastArrayOperand(
3195 instr->hydrogen()->key()->representation(),
3196 FAST_DOUBLE_ELEMENTS,
3197 instr->base_offset() + sizeof(kHoleNanLower32));
3198 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3199 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3202 Operand double_load_operand = BuildFastArrayOperand(
3205 instr->hydrogen()->key()->representation(),
3206 FAST_DOUBLE_ELEMENTS,
3207 instr->base_offset());
3208 __ movsd(result, double_load_operand);
3212 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3213 HLoadKeyed* hinstr = instr->hydrogen();
3214 Register result = ToRegister(instr->result());
3215 LOperand* key = instr->key();
3216 bool requires_hole_check = hinstr->RequiresHoleCheck();
3217 Representation representation = hinstr->representation();
3218 int offset = instr->base_offset();
3220 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3221 instr->hydrogen()->IsDehoisted()) {
3222 // Sign extend key because it could be a 32 bit negative value
3223 // and the dehoisted address computation happens in 64 bits
3224 __ movsxlq(ToRegister(key), ToRegister(key));
3226 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3227 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3228 DCHECK(!requires_hole_check);
3229 if (FLAG_debug_code) {
3230 Register scratch = kScratchRegister;
3232 BuildFastArrayOperand(instr->elements(),
3234 instr->hydrogen()->key()->representation(),
3237 Representation::Smi());
3238 __ AssertSmi(scratch);
3240 // Read int value directly from upper half of the smi.
3241 STATIC_ASSERT(kSmiTag == 0);
3242 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3243 offset += kPointerSize / 2;
3247 BuildFastArrayOperand(instr->elements(), key,
3248 instr->hydrogen()->key()->representation(),
3249 FAST_ELEMENTS, offset),
3252 // Check for the hole value.
3253 if (requires_hole_check) {
3254 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3255 Condition smi = __ CheckSmi(result);
3256 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
3258 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3259 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3265 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3266 if (instr->is_typed_elements()) {
3267 DoLoadKeyedExternalArray(instr);
3268 } else if (instr->hydrogen()->representation().IsDouble()) {
3269 DoLoadKeyedFixedDoubleArray(instr);
3271 DoLoadKeyedFixedArray(instr);
3276 Operand LCodeGen::BuildFastArrayOperand(
3277 LOperand* elements_pointer,
3279 Representation key_representation,
3280 ElementsKind elements_kind,
3282 Register elements_pointer_reg = ToRegister(elements_pointer);
3283 int shift_size = ElementsKindToShiftSize(elements_kind);
3284 if (key->IsConstantOperand()) {
3285 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3286 if (constant_value & 0xF0000000) {
3287 Abort(kArrayIndexConstantValueTooBig);
3289 return Operand(elements_pointer_reg,
3290 (constant_value << shift_size) + offset);
3292 // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
3293 DCHECK(key_representation.IsInteger32());
3295 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3296 return Operand(elements_pointer_reg,
3304 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3305 DCHECK(ToRegister(instr->context()).is(rsi));
3306 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3307 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3309 if (FLAG_vector_ics) {
3310 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3313 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
3314 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3318 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3319 Register result = ToRegister(instr->result());
3321 if (instr->hydrogen()->from_inlined()) {
3322 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3324 // Check for arguments adapter frame.
3325 Label done, adapted;
3326 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3327 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3328 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3329 __ j(equal, &adapted, Label::kNear);
3331 // No arguments adaptor frame.
3332 __ movp(result, rbp);
3333 __ jmp(&done, Label::kNear);
3335 // Arguments adaptor frame present.
3337 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3339 // Result is the frame pointer for the frame if not adapted and for the real
3340 // frame below the adaptor frame if adapted.
3346 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3347 Register result = ToRegister(instr->result());
3351 // If no arguments adaptor frame the number of arguments is fixed.
3352 if (instr->elements()->IsRegister()) {
3353 __ cmpp(rbp, ToRegister(instr->elements()));
3355 __ cmpp(rbp, ToOperand(instr->elements()));
3357 __ movl(result, Immediate(scope()->num_parameters()));
3358 __ j(equal, &done, Label::kNear);
3360 // Arguments adaptor frame present. Get argument length from there.
3361 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3362 __ SmiToInteger32(result,
3364 ArgumentsAdaptorFrameConstants::kLengthOffset));
3366 // Argument length is in result register.
3371 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3372 Register receiver = ToRegister(instr->receiver());
3373 Register function = ToRegister(instr->function());
3375 // If the receiver is null or undefined, we have to pass the global
3376 // object as a receiver to normal functions. Values have to be
3377 // passed unchanged to builtins and strict-mode functions.
3378 Label global_object, receiver_ok;
3379 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3381 if (!instr->hydrogen()->known_function()) {
3382 // Do not transform the receiver to object for strict mode
3384 __ movp(kScratchRegister,
3385 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3386 __ testb(FieldOperand(kScratchRegister,
3387 SharedFunctionInfo::kStrictModeByteOffset),
3388 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3389 __ j(not_equal, &receiver_ok, dist);
3391 // Do not transform the receiver to object for builtins.
3392 __ testb(FieldOperand(kScratchRegister,
3393 SharedFunctionInfo::kNativeByteOffset),
3394 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3395 __ j(not_equal, &receiver_ok, dist);
3398 // Normal function. Replace undefined or null with global receiver.
3399 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3400 __ j(equal, &global_object, Label::kNear);
3401 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3402 __ j(equal, &global_object, Label::kNear);
3404 // The receiver should be a JS object.
3405 Condition is_smi = __ CheckSmi(receiver);
3406 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
3407 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3408 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3410 __ jmp(&receiver_ok, Label::kNear);
3411 __ bind(&global_object);
3412 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3415 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3416 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3418 __ bind(&receiver_ok);
3422 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3423 Register receiver = ToRegister(instr->receiver());
3424 Register function = ToRegister(instr->function());
3425 Register length = ToRegister(instr->length());
3426 Register elements = ToRegister(instr->elements());
3427 DCHECK(receiver.is(rax)); // Used for parameter count.
3428 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3429 DCHECK(ToRegister(instr->result()).is(rax));
3431 // Copy the arguments to this function possibly from the
3432 // adaptor frame below it.
3433 const uint32_t kArgumentsLimit = 1 * KB;
3434 __ cmpp(length, Immediate(kArgumentsLimit));
3435 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3438 __ movp(receiver, length);
3440 // Loop through the arguments pushing them onto the execution
3443 // length is a small non-negative integer, due to the test above.
3444 __ testl(length, length);
3445 __ j(zero, &invoke, Label::kNear);
3447 StackArgumentsAccessor args(elements, length,
3448 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3449 __ Push(args.GetArgumentOperand(0));
3451 __ j(not_zero, &loop);
3453 // Invoke the function.
3455 DCHECK(instr->HasPointerMap());
3456 LPointerMap* pointers = instr->pointer_map();
3457 SafepointGenerator safepoint_generator(
3458 this, pointers, Safepoint::kLazyDeopt);
3459 ParameterCount actual(rax);
3460 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3464 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3465 LOperand* argument = instr->value();
3466 EmitPushTaggedOperand(argument);
3470 void LCodeGen::DoDrop(LDrop* instr) {
3471 __ Drop(instr->count());
3475 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3476 Register result = ToRegister(instr->result());
3477 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3481 void LCodeGen::DoContext(LContext* instr) {
3482 Register result = ToRegister(instr->result());
3483 if (info()->IsOptimizing()) {
3484 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3486 // If there is no frame, the context must be in rsi.
3487 DCHECK(result.is(rsi));
3492 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3493 DCHECK(ToRegister(instr->context()).is(rsi));
3494 __ Push(rsi); // The context is the first argument.
3495 __ Push(instr->hydrogen()->pairs());
3496 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3497 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3501 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3502 int formal_parameter_count, int arity,
3503 LInstruction* instr) {
3504 bool dont_adapt_arguments =
3505 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3506 bool can_invoke_directly =
3507 dont_adapt_arguments || formal_parameter_count == arity;
3509 Register function_reg = rdi;
3510 LPointerMap* pointers = instr->pointer_map();
3512 if (can_invoke_directly) {
3514 __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3516 // Set rax to arguments count if adaption is not needed. Assumes that rax
3517 // is available to write to at this point.
3518 if (dont_adapt_arguments) {
3523 if (function.is_identical_to(info()->closure())) {
3526 __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3529 // Set up deoptimization.
3530 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3532 // We need to adapt arguments.
3533 SafepointGenerator generator(
3534 this, pointers, Safepoint::kLazyDeopt);
3535 ParameterCount count(arity);
3536 ParameterCount expected(formal_parameter_count);
3537 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3542 void LCodeGen::DoTailCallThroughMegamorphicCache(
3543 LTailCallThroughMegamorphicCache* instr) {
3544 Register receiver = ToRegister(instr->receiver());
3545 Register name = ToRegister(instr->name());
3546 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3547 DCHECK(name.is(LoadDescriptor::NameRegister()));
3548 Register scratch = rdi;
3549 DCHECK(!scratch.is(receiver) && !scratch.is(name));
3550 DCHECK(!FLAG_vector_ics ||
3551 !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
3554 // Important for the tail-call.
3555 bool must_teardown_frame = NeedsEagerFrame();
3557 if (!instr->hydrogen()->is_just_miss()) {
3558 // The probe will tail call to a handler if found.
3559 DCHECK(!instr->hydrogen()->is_keyed_load());
3560 isolate()->stub_cache()->GenerateProbe(
3561 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
3562 receiver, name, scratch, no_reg);
3565 // Tail call to miss if we ended up here.
3566 if (must_teardown_frame) __ leave();
3567 if (instr->hydrogen()->is_keyed_load()) {
3568 KeyedLoadIC::GenerateMiss(masm());
3570 LoadIC::GenerateMiss(masm());
3575 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3576 DCHECK(ToRegister(instr->result()).is(rax));
3578 if (instr->hydrogen()->IsTailCall()) {
3579 if (NeedsEagerFrame()) __ leave();
3581 if (instr->target()->IsConstantOperand()) {
3582 LConstantOperand* target = LConstantOperand::cast(instr->target());
3583 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3584 __ jmp(code, RelocInfo::CODE_TARGET);
3586 DCHECK(instr->target()->IsRegister());
3587 Register target = ToRegister(instr->target());
3588 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3592 LPointerMap* pointers = instr->pointer_map();
3593 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3595 if (instr->target()->IsConstantOperand()) {
3596 LConstantOperand* target = LConstantOperand::cast(instr->target());
3597 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3598 generator.BeforeCall(__ CallSize(code));
3599 __ call(code, RelocInfo::CODE_TARGET);
3601 DCHECK(instr->target()->IsRegister());
3602 Register target = ToRegister(instr->target());
3603 generator.BeforeCall(__ CallSize(target));
3604 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3607 generator.AfterCall();
3612 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3613 DCHECK(ToRegister(instr->function()).is(rdi));
3614 DCHECK(ToRegister(instr->result()).is(rax));
3616 if (instr->hydrogen()->pass_argument_count()) {
3617 __ Set(rax, instr->arity());
3621 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3623 LPointerMap* pointers = instr->pointer_map();
3624 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3626 bool is_self_call = false;
3627 if (instr->hydrogen()->function()->IsConstant()) {
3628 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3629 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3630 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3631 is_self_call = jsfun.is_identical_to(info()->closure());
3637 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3638 generator.BeforeCall(__ CallSize(target));
3641 generator.AfterCall();
3645 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3646 Register input_reg = ToRegister(instr->value());
3647 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3648 Heap::kHeapNumberMapRootIndex);
3649 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3651 Label slow, allocated, done;
3652 Register tmp = input_reg.is(rax) ? rcx : rax;
3653 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3655 // Preserve the value of all registers.
3656 PushSafepointRegistersScope scope(this);
3658 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3659 // Check the sign of the argument. If the argument is positive, just
3660 // return it. We do not need to patch the stack since |input| and
3661 // |result| are the same register and |input| will be restored
3662 // unchanged by popping safepoint registers.
3663 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3666 __ AllocateHeapNumber(tmp, tmp2, &slow);
3667 __ jmp(&allocated, Label::kNear);
3669 // Slow case: Call the runtime system to do the number allocation.
3671 CallRuntimeFromDeferred(
3672 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3673 // Set the pointer to the new heap number in tmp.
3674 if (!tmp.is(rax)) __ movp(tmp, rax);
3675 // Restore input_reg after call to runtime.
3676 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3678 __ bind(&allocated);
3679 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3680 __ shlq(tmp2, Immediate(1));
3681 __ shrq(tmp2, Immediate(1));
3682 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3683 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3689 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3690 Register input_reg = ToRegister(instr->value());
3691 __ testl(input_reg, input_reg);
3693 __ j(not_sign, &is_positive, Label::kNear);
3694 __ negl(input_reg); // Sets flags.
3695 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3696 __ bind(&is_positive);
3700 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3701 Register input_reg = ToRegister(instr->value());
3702 __ testp(input_reg, input_reg);
3704 __ j(not_sign, &is_positive, Label::kNear);
3705 __ negp(input_reg); // Sets flags.
3706 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3707 __ bind(&is_positive);
3711 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3712 // Class for deferred case.
3713 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3715 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3716 : LDeferredCode(codegen), instr_(instr) { }
3717 void Generate() OVERRIDE {
3718 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3720 LInstruction* instr() OVERRIDE { return instr_; }
3726 DCHECK(instr->value()->Equals(instr->result()));
3727 Representation r = instr->hydrogen()->value()->representation();
3730 XMMRegister scratch = double_scratch0();
3731 XMMRegister input_reg = ToDoubleRegister(instr->value());
3732 __ xorps(scratch, scratch);
3733 __ subsd(scratch, input_reg);
3734 __ andps(input_reg, scratch);
3735 } else if (r.IsInteger32()) {
3736 EmitIntegerMathAbs(instr);
3737 } else if (r.IsSmi()) {
3738 EmitSmiMathAbs(instr);
3739 } else { // Tagged case.
3740 DeferredMathAbsTaggedHeapNumber* deferred =
3741 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3742 Register input_reg = ToRegister(instr->value());
3744 __ JumpIfNotSmi(input_reg, deferred->entry());
3745 EmitSmiMathAbs(instr);
3746 __ bind(deferred->exit());
3751 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3752 XMMRegister xmm_scratch = double_scratch0();
3753 Register output_reg = ToRegister(instr->result());
3754 XMMRegister input_reg = ToDoubleRegister(instr->value());
3756 if (CpuFeatures::IsSupported(SSE4_1)) {
3757 CpuFeatureScope scope(masm(), SSE4_1);
3758 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3759 // Deoptimize if minus zero.
3760 __ movq(output_reg, input_reg);
3761 __ subq(output_reg, Immediate(1));
3762 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
3764 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3765 __ cvttsd2si(output_reg, xmm_scratch);
3766 __ cmpl(output_reg, Immediate(0x1));
3767 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3769 Label negative_sign, done;
3770 // Deoptimize on unordered.
3771 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3772 __ ucomisd(input_reg, xmm_scratch);
3773 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3774 __ j(below, &negative_sign, Label::kNear);
3776 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3777 // Check for negative zero.
3778 Label positive_sign;
3779 __ j(above, &positive_sign, Label::kNear);
3780 __ movmskpd(output_reg, input_reg);
3781 __ testq(output_reg, Immediate(1));
3782 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3783 __ Set(output_reg, 0);
3785 __ bind(&positive_sign);
3788 // Use truncating instruction (OK because input is positive).
3789 __ cvttsd2si(output_reg, input_reg);
3790 // Overflow is signalled with minint.
3791 __ cmpl(output_reg, Immediate(0x1));
3792 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3793 __ jmp(&done, Label::kNear);
3795 // Non-zero negative reaches here.
3796 __ bind(&negative_sign);
3797 // Truncate, then compare and compensate.
3798 __ cvttsd2si(output_reg, input_reg);
3799 __ Cvtlsi2sd(xmm_scratch, output_reg);
3800 __ ucomisd(input_reg, xmm_scratch);
3801 __ j(equal, &done, Label::kNear);
3802 __ subl(output_reg, Immediate(1));
3803 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3810 void LCodeGen::DoMathRound(LMathRound* instr) {
3811 const XMMRegister xmm_scratch = double_scratch0();
3812 Register output_reg = ToRegister(instr->result());
3813 XMMRegister input_reg = ToDoubleRegister(instr->value());
3814 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3815 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3816 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3818 Label done, round_to_zero, below_one_half;
3819 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3820 __ movq(kScratchRegister, one_half);
3821 __ movq(xmm_scratch, kScratchRegister);
3822 __ ucomisd(xmm_scratch, input_reg);
3823 __ j(above, &below_one_half, Label::kNear);
3825 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3826 __ addsd(xmm_scratch, input_reg);
3827 __ cvttsd2si(output_reg, xmm_scratch);
3828 // Overflow is signalled with minint.
3829 __ cmpl(output_reg, Immediate(0x1));
3830 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3831 __ jmp(&done, dist);
3833 __ bind(&below_one_half);
3834 __ movq(kScratchRegister, minus_one_half);
3835 __ movq(xmm_scratch, kScratchRegister);
3836 __ ucomisd(xmm_scratch, input_reg);
3837 __ j(below_equal, &round_to_zero, Label::kNear);
3839 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3840 // compare and compensate.
3841 __ movq(input_temp, input_reg); // Do not alter input_reg.
3842 __ subsd(input_temp, xmm_scratch);
3843 __ cvttsd2si(output_reg, input_temp);
3844 // Catch minint due to overflow, and to prevent overflow when compensating.
3845 __ cmpl(output_reg, Immediate(0x1));
3846 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3848 __ Cvtlsi2sd(xmm_scratch, output_reg);
3849 __ ucomisd(xmm_scratch, input_temp);
3850 __ j(equal, &done, dist);
3851 __ subl(output_reg, Immediate(1));
3852 // No overflow because we already ruled out minint.
3853 __ jmp(&done, dist);
3855 __ bind(&round_to_zero);
3856 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3857 // we can ignore the difference between a result of -0 and +0.
3858 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3859 __ movq(output_reg, input_reg);
3860 __ testq(output_reg, output_reg);
3861 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
3863 __ Set(output_reg, 0);
3868 void LCodeGen::DoMathFround(LMathFround* instr) {
3869 XMMRegister input_reg = ToDoubleRegister(instr->value());
3870 XMMRegister output_reg = ToDoubleRegister(instr->result());
3871 __ cvtsd2ss(output_reg, input_reg);
3872 __ cvtss2sd(output_reg, output_reg);
3876 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3877 XMMRegister output = ToDoubleRegister(instr->result());
3878 if (instr->value()->IsDoubleRegister()) {
3879 XMMRegister input = ToDoubleRegister(instr->value());
3880 __ sqrtsd(output, input);
3882 Operand input = ToOperand(instr->value());
3883 __ sqrtsd(output, input);
3888 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3889 XMMRegister xmm_scratch = double_scratch0();
3890 XMMRegister input_reg = ToDoubleRegister(instr->value());
3891 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3893 // Note that according to ECMA-262 15.8.2.13:
3894 // Math.pow(-Infinity, 0.5) == Infinity
3895 // Math.sqrt(-Infinity) == NaN
3897 // Check base for -Infinity. According to IEEE-754, double-precision
3898 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3899 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3900 __ movq(xmm_scratch, kScratchRegister);
3901 __ ucomisd(xmm_scratch, input_reg);
3902 // Comparing -Infinity with NaN results in "unordered", which sets the
3903 // zero flag as if both were equal. However, it also sets the carry flag.
3904 __ j(not_equal, &sqrt, Label::kNear);
3905 __ j(carry, &sqrt, Label::kNear);
3906 // If input is -Infinity, return Infinity.
3907 __ xorps(input_reg, input_reg);
3908 __ subsd(input_reg, xmm_scratch);
3909 __ jmp(&done, Label::kNear);
3913 __ xorps(xmm_scratch, xmm_scratch);
3914 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3915 __ sqrtsd(input_reg, input_reg);
3920 void LCodeGen::DoPower(LPower* instr) {
3921 Representation exponent_type = instr->hydrogen()->right()->representation();
3922 // Having marked this as a call, we can use any registers.
3923 // Just make sure that the input/output registers are the expected ones.
3925 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3926 DCHECK(!instr->right()->IsRegister() ||
3927 ToRegister(instr->right()).is(tagged_exponent));
3928 DCHECK(!instr->right()->IsDoubleRegister() ||
3929 ToDoubleRegister(instr->right()).is(xmm1));
3930 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3931 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3933 if (exponent_type.IsSmi()) {
3934 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3936 } else if (exponent_type.IsTagged()) {
3938 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3939 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3940 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3942 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3944 } else if (exponent_type.IsInteger32()) {
3945 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3948 DCHECK(exponent_type.IsDouble());
3949 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3955 void LCodeGen::DoMathExp(LMathExp* instr) {
3956 XMMRegister input = ToDoubleRegister(instr->value());
3957 XMMRegister result = ToDoubleRegister(instr->result());
3958 XMMRegister temp0 = double_scratch0();
3959 Register temp1 = ToRegister(instr->temp1());
3960 Register temp2 = ToRegister(instr->temp2());
3962 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3966 void LCodeGen::DoMathLog(LMathLog* instr) {
3967 DCHECK(instr->value()->Equals(instr->result()));
3968 XMMRegister input_reg = ToDoubleRegister(instr->value());
3969 XMMRegister xmm_scratch = double_scratch0();
3970 Label positive, done, zero;
3971 __ xorps(xmm_scratch, xmm_scratch);
3972 __ ucomisd(input_reg, xmm_scratch);
3973 __ j(above, &positive, Label::kNear);
3974 __ j(not_carry, &zero, Label::kNear);
3975 __ pcmpeqd(input_reg, input_reg);
3976 __ jmp(&done, Label::kNear);
3978 ExternalReference ninf =
3979 ExternalReference::address_of_negative_infinity();
3980 Operand ninf_operand = masm()->ExternalOperand(ninf);
3981 __ movsd(input_reg, ninf_operand);
3982 __ jmp(&done, Label::kNear);
3985 __ subp(rsp, Immediate(kDoubleSize));
3986 __ movsd(Operand(rsp, 0), input_reg);
3987 __ fld_d(Operand(rsp, 0));
3989 __ fstp_d(Operand(rsp, 0));
3990 __ movsd(input_reg, Operand(rsp, 0));
3991 __ addp(rsp, Immediate(kDoubleSize));
3996 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3997 Register input = ToRegister(instr->value());
3998 Register result = ToRegister(instr->result());
3999 Label not_zero_input;
4000 __ bsrl(result, input);
4002 __ j(not_zero, ¬_zero_input);
4003 __ Set(result, 63); // 63^31 == 32
4005 __ bind(¬_zero_input);
4006 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4010 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4011 DCHECK(ToRegister(instr->context()).is(rsi));
4012 DCHECK(ToRegister(instr->function()).is(rdi));
4013 DCHECK(instr->HasPointerMap());
4015 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4016 if (known_function.is_null()) {
4017 LPointerMap* pointers = instr->pointer_map();
4018 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4019 ParameterCount count(instr->arity());
4020 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4022 CallKnownFunction(known_function,
4023 instr->hydrogen()->formal_parameter_count(),
4024 instr->arity(), instr);
4029 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4030 DCHECK(ToRegister(instr->context()).is(rsi));
4031 DCHECK(ToRegister(instr->function()).is(rdi));
4032 DCHECK(ToRegister(instr->result()).is(rax));
4034 int arity = instr->arity();
4035 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4036 if (instr->hydrogen()->HasVectorAndSlot()) {
4037 Register slot_register = ToRegister(instr->temp_slot());
4038 Register vector_register = ToRegister(instr->temp_vector());
4039 DCHECK(slot_register.is(rdx));
4040 DCHECK(vector_register.is(rbx));
4042 AllowDeferredHandleDereference vector_structure_check;
4043 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4044 int index = vector->GetIndex(instr->hydrogen()->slot());
4046 __ Move(vector_register, vector);
4047 __ Move(slot_register, Smi::FromInt(index));
4049 CallICState::CallType call_type =
4050 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4053 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4054 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4056 CallFunctionStub stub(isolate(), arity, flags);
4057 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4062 void LCodeGen::DoCallNew(LCallNew* instr) {
4063 DCHECK(ToRegister(instr->context()).is(rsi));
4064 DCHECK(ToRegister(instr->constructor()).is(rdi));
4065 DCHECK(ToRegister(instr->result()).is(rax));
4067 __ Set(rax, instr->arity());
4068 // No cell in ebx for construct type feedback in optimized code
4069 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4070 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4071 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4075 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4076 DCHECK(ToRegister(instr->context()).is(rsi));
4077 DCHECK(ToRegister(instr->constructor()).is(rdi));
4078 DCHECK(ToRegister(instr->result()).is(rax));
4080 __ Set(rax, instr->arity());
4081 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4082 ElementsKind kind = instr->hydrogen()->elements_kind();
4083 AllocationSiteOverrideMode override_mode =
4084 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4085 ? DISABLE_ALLOCATION_SITES
4088 if (instr->arity() == 0) {
4089 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4090 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4091 } else if (instr->arity() == 1) {
4093 if (IsFastPackedElementsKind(kind)) {
4095 // We might need a change here
4096 // look at the first argument
4097 __ movp(rcx, Operand(rsp, 0));
4099 __ j(zero, &packed_case, Label::kNear);
4101 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4102 ArraySingleArgumentConstructorStub stub(isolate(),
4105 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4106 __ jmp(&done, Label::kNear);
4107 __ bind(&packed_case);
4110 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4111 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4114 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4115 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4120 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4121 DCHECK(ToRegister(instr->context()).is(rsi));
4122 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4126 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4127 Register function = ToRegister(instr->function());
4128 Register code_object = ToRegister(instr->code_object());
4129 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4130 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4134 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4135 Register result = ToRegister(instr->result());
4136 Register base = ToRegister(instr->base_object());
4137 if (instr->offset()->IsConstantOperand()) {
4138 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4139 __ leap(result, Operand(base, ToInteger32(offset)));
4141 Register offset = ToRegister(instr->offset());
4142 __ leap(result, Operand(base, offset, times_1, 0));
4147 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4148 HStoreNamedField* hinstr = instr->hydrogen();
4149 Representation representation = instr->representation();
4151 HObjectAccess access = hinstr->access();
4152 int offset = access.offset();
4154 if (access.IsExternalMemory()) {
4155 DCHECK(!hinstr->NeedsWriteBarrier());
4156 Register value = ToRegister(instr->value());
4157 if (instr->object()->IsConstantOperand()) {
4158 DCHECK(value.is(rax));
4159 LConstantOperand* object = LConstantOperand::cast(instr->object());
4160 __ store_rax(ToExternalReference(object));
4162 Register object = ToRegister(instr->object());
4163 __ Store(MemOperand(object, offset), value, representation);
4168 Register object = ToRegister(instr->object());
4169 __ AssertNotSmi(object);
4171 DCHECK(!representation.IsSmi() ||
4172 !instr->value()->IsConstantOperand() ||
4173 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4174 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4175 DCHECK(access.IsInobject());
4176 DCHECK(!hinstr->has_transition());
4177 DCHECK(!hinstr->NeedsWriteBarrier());
4178 XMMRegister value = ToDoubleRegister(instr->value());
4179 __ movsd(FieldOperand(object, offset), value);
4183 if (hinstr->has_transition()) {
4184 Handle<Map> transition = hinstr->transition_map();
4185 AddDeprecationDependency(transition);
4186 if (!hinstr->NeedsWriteBarrierForMap()) {
4187 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4189 Register temp = ToRegister(instr->temp());
4190 __ Move(kScratchRegister, transition);
4191 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4192 // Update the write barrier for the map field.
4193 __ RecordWriteForMap(object,
4201 Register write_register = object;
4202 if (!access.IsInobject()) {
4203 write_register = ToRegister(instr->temp());
4204 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4207 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4208 hinstr->value()->representation().IsInteger32()) {
4209 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4210 if (FLAG_debug_code) {
4211 Register scratch = kScratchRegister;
4212 __ Load(scratch, FieldOperand(write_register, offset), representation);
4213 __ AssertSmi(scratch);
4215 // Store int value directly to upper half of the smi.
4216 STATIC_ASSERT(kSmiTag == 0);
4217 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4218 offset += kPointerSize / 2;
4219 representation = Representation::Integer32();
4222 Operand operand = FieldOperand(write_register, offset);
4224 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4225 DCHECK(access.IsInobject());
4226 XMMRegister value = ToDoubleRegister(instr->value());
4227 __ movsd(operand, value);
4229 } else if (instr->value()->IsRegister()) {
4230 Register value = ToRegister(instr->value());
4231 __ Store(operand, value, representation);
4233 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4234 if (IsInteger32Constant(operand_value)) {
4235 DCHECK(!hinstr->NeedsWriteBarrier());
4236 int32_t value = ToInteger32(operand_value);
4237 if (representation.IsSmi()) {
4238 __ Move(operand, Smi::FromInt(value));
4241 __ movl(operand, Immediate(value));
4245 Handle<Object> handle_value = ToHandle(operand_value);
4246 DCHECK(!hinstr->NeedsWriteBarrier());
4247 __ Move(operand, handle_value);
4251 if (hinstr->NeedsWriteBarrier()) {
4252 Register value = ToRegister(instr->value());
4253 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4254 // Update the write barrier for the object for in-object properties.
4255 __ RecordWriteField(write_register,
4260 EMIT_REMEMBERED_SET,
4261 hinstr->SmiCheckForWriteBarrier(),
4262 hinstr->PointersToHereCheckForValue());
4267 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4268 DCHECK(ToRegister(instr->context()).is(rsi));
4269 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4270 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4272 __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
4273 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
4274 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4278 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4279 Representation representation = instr->hydrogen()->length()->representation();
4280 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4281 DCHECK(representation.IsSmiOrInteger32());
4283 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4284 if (instr->length()->IsConstantOperand()) {
4285 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4286 Register index = ToRegister(instr->index());
4287 if (representation.IsSmi()) {
4288 __ Cmp(index, Smi::FromInt(length));
4290 __ cmpl(index, Immediate(length));
4292 cc = CommuteCondition(cc);
4293 } else if (instr->index()->IsConstantOperand()) {
4294 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4295 if (instr->length()->IsRegister()) {
4296 Register length = ToRegister(instr->length());
4297 if (representation.IsSmi()) {
4298 __ Cmp(length, Smi::FromInt(index));
4300 __ cmpl(length, Immediate(index));
4303 Operand length = ToOperand(instr->length());
4304 if (representation.IsSmi()) {
4305 __ Cmp(length, Smi::FromInt(index));
4307 __ cmpl(length, Immediate(index));
4311 Register index = ToRegister(instr->index());
4312 if (instr->length()->IsRegister()) {
4313 Register length = ToRegister(instr->length());
4314 if (representation.IsSmi()) {
4315 __ cmpp(length, index);
4317 __ cmpl(length, index);
4320 Operand length = ToOperand(instr->length());
4321 if (representation.IsSmi()) {
4322 __ cmpp(length, index);
4324 __ cmpl(length, index);
4328 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4330 __ j(NegateCondition(cc), &done, Label::kNear);
4334 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4339 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4340 ElementsKind elements_kind = instr->elements_kind();
4341 LOperand* key = instr->key();
4342 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4343 Register key_reg = ToRegister(key);
4344 Representation key_representation =
4345 instr->hydrogen()->key()->representation();
4346 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4347 __ SmiToInteger64(key_reg, key_reg);
4348 } else if (instr->hydrogen()->IsDehoisted()) {
4349 // Sign extend key because it could be a 32 bit negative value
4350 // and the dehoisted address computation happens in 64 bits
4351 __ movsxlq(key_reg, key_reg);
4354 Operand operand(BuildFastArrayOperand(
4357 instr->hydrogen()->key()->representation(),
4359 instr->base_offset()));
4361 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4362 elements_kind == FLOAT32_ELEMENTS) {
4363 XMMRegister value(ToDoubleRegister(instr->value()));
4364 __ cvtsd2ss(value, value);
4365 __ movss(operand, value);
4366 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4367 elements_kind == FLOAT64_ELEMENTS) {
4368 __ movsd(operand, ToDoubleRegister(instr->value()));
4370 Register value(ToRegister(instr->value()));
4371 switch (elements_kind) {
4372 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4373 case EXTERNAL_INT8_ELEMENTS:
4374 case EXTERNAL_UINT8_ELEMENTS:
4376 case UINT8_ELEMENTS:
4377 case UINT8_CLAMPED_ELEMENTS:
4378 __ movb(operand, value);
4380 case EXTERNAL_INT16_ELEMENTS:
4381 case EXTERNAL_UINT16_ELEMENTS:
4382 case INT16_ELEMENTS:
4383 case UINT16_ELEMENTS:
4384 __ movw(operand, value);
4386 case EXTERNAL_INT32_ELEMENTS:
4387 case EXTERNAL_UINT32_ELEMENTS:
4388 case INT32_ELEMENTS:
4389 case UINT32_ELEMENTS:
4390 __ movl(operand, value);
4392 case EXTERNAL_FLOAT32_ELEMENTS:
4393 case EXTERNAL_FLOAT64_ELEMENTS:
4394 case FLOAT32_ELEMENTS:
4395 case FLOAT64_ELEMENTS:
4397 case FAST_SMI_ELEMENTS:
4398 case FAST_DOUBLE_ELEMENTS:
4399 case FAST_HOLEY_ELEMENTS:
4400 case FAST_HOLEY_SMI_ELEMENTS:
4401 case FAST_HOLEY_DOUBLE_ELEMENTS:
4402 case DICTIONARY_ELEMENTS:
4403 case SLOPPY_ARGUMENTS_ELEMENTS:
4411 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4412 XMMRegister value = ToDoubleRegister(instr->value());
4413 LOperand* key = instr->key();
4414 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4415 instr->hydrogen()->IsDehoisted()) {
4416 // Sign extend key because it could be a 32 bit negative value
4417 // and the dehoisted address computation happens in 64 bits
4418 __ movsxlq(ToRegister(key), ToRegister(key));
4420 if (instr->NeedsCanonicalization()) {
4421 XMMRegister xmm_scratch = double_scratch0();
4422 // Turn potential sNaN value into qNaN.
4423 __ xorps(xmm_scratch, xmm_scratch);
4424 __ subsd(value, xmm_scratch);
4427 Operand double_store_operand = BuildFastArrayOperand(
4430 instr->hydrogen()->key()->representation(),
4431 FAST_DOUBLE_ELEMENTS,
4432 instr->base_offset());
4434 __ movsd(double_store_operand, value);
4438 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4439 HStoreKeyed* hinstr = instr->hydrogen();
4440 LOperand* key = instr->key();
4441 int offset = instr->base_offset();
4442 Representation representation = hinstr->value()->representation();
4444 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4445 instr->hydrogen()->IsDehoisted()) {
4446 // Sign extend key because it could be a 32 bit negative value
4447 // and the dehoisted address computation happens in 64 bits
4448 __ movsxlq(ToRegister(key), ToRegister(key));
4450 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4451 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4452 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4453 if (FLAG_debug_code) {
4454 Register scratch = kScratchRegister;
4456 BuildFastArrayOperand(instr->elements(),
4458 instr->hydrogen()->key()->representation(),
4461 Representation::Smi());
4462 __ AssertSmi(scratch);
4464 // Store int value directly to upper half of the smi.
4465 STATIC_ASSERT(kSmiTag == 0);
4466 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4467 offset += kPointerSize / 2;
4471 BuildFastArrayOperand(instr->elements(),
4473 instr->hydrogen()->key()->representation(),
4476 if (instr->value()->IsRegister()) {
4477 __ Store(operand, ToRegister(instr->value()), representation);
4479 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4480 if (IsInteger32Constant(operand_value)) {
4481 int32_t value = ToInteger32(operand_value);
4482 if (representation.IsSmi()) {
4483 __ Move(operand, Smi::FromInt(value));
4486 __ movl(operand, Immediate(value));
4489 Handle<Object> handle_value = ToHandle(operand_value);
4490 __ Move(operand, handle_value);
4494 if (hinstr->NeedsWriteBarrier()) {
4495 Register elements = ToRegister(instr->elements());
4496 DCHECK(instr->value()->IsRegister());
4497 Register value = ToRegister(instr->value());
4498 DCHECK(!key->IsConstantOperand());
4499 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4500 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4501 // Compute address of modified element and store it into key register.
4502 Register key_reg(ToRegister(key));
4503 __ leap(key_reg, operand);
4504 __ RecordWrite(elements,
4508 EMIT_REMEMBERED_SET,
4510 hinstr->PointersToHereCheckForValue());
4515 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4516 if (instr->is_typed_elements()) {
4517 DoStoreKeyedExternalArray(instr);
4518 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4519 DoStoreKeyedFixedDoubleArray(instr);
4521 DoStoreKeyedFixedArray(instr);
4526 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4527 DCHECK(ToRegister(instr->context()).is(rsi));
4528 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4529 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4530 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4533 CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
4534 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4538 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4539 Register object_reg = ToRegister(instr->object());
4541 Handle<Map> from_map = instr->original_map();
4542 Handle<Map> to_map = instr->transitioned_map();
4543 ElementsKind from_kind = instr->from_kind();
4544 ElementsKind to_kind = instr->to_kind();
4546 Label not_applicable;
4547 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4548 __ j(not_equal, ¬_applicable);
4549 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4550 Register new_map_reg = ToRegister(instr->new_map_temp());
4551 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4552 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4554 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4557 DCHECK(object_reg.is(rax));
4558 DCHECK(ToRegister(instr->context()).is(rsi));
4559 PushSafepointRegistersScope scope(this);
4560 __ Move(rbx, to_map);
4561 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4562 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4564 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4566 __ bind(¬_applicable);
4570 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4571 Register object = ToRegister(instr->object());
4572 Register temp = ToRegister(instr->temp());
4573 Label no_memento_found;
4574 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4575 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4576 __ bind(&no_memento_found);
4580 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4581 DCHECK(ToRegister(instr->context()).is(rsi));
4582 DCHECK(ToRegister(instr->left()).is(rdx));
4583 DCHECK(ToRegister(instr->right()).is(rax));
4584 StringAddStub stub(isolate(),
4585 instr->hydrogen()->flags(),
4586 instr->hydrogen()->pretenure_flag());
4587 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4591 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4592 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4594 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4595 : LDeferredCode(codegen), instr_(instr) { }
4596 void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
4597 LInstruction* instr() OVERRIDE { return instr_; }
4600 LStringCharCodeAt* instr_;
4603 DeferredStringCharCodeAt* deferred =
4604 new(zone()) DeferredStringCharCodeAt(this, instr);
4606 StringCharLoadGenerator::Generate(masm(),
4607 ToRegister(instr->string()),
4608 ToRegister(instr->index()),
4609 ToRegister(instr->result()),
4611 __ bind(deferred->exit());
4615 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4616 Register string = ToRegister(instr->string());
4617 Register result = ToRegister(instr->result());
4619 // TODO(3095996): Get rid of this. For now, we need to make the
4620 // result register contain a valid pointer because it is already
4621 // contained in the register pointer map.
4624 PushSafepointRegistersScope scope(this);
4626 // Push the index as a smi. This is safe because of the checks in
4627 // DoStringCharCodeAt above.
4628 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4629 if (instr->index()->IsConstantOperand()) {
4630 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4631 __ Push(Smi::FromInt(const_index));
4633 Register index = ToRegister(instr->index());
4634 __ Integer32ToSmi(index, index);
4637 CallRuntimeFromDeferred(
4638 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4640 __ SmiToInteger32(rax, rax);
4641 __ StoreToSafepointRegisterSlot(result, rax);
4645 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4646 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4648 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4649 : LDeferredCode(codegen), instr_(instr) { }
4650 void Generate() OVERRIDE {
4651 codegen()->DoDeferredStringCharFromCode(instr_);
4653 LInstruction* instr() OVERRIDE { return instr_; }
4656 LStringCharFromCode* instr_;
4659 DeferredStringCharFromCode* deferred =
4660 new(zone()) DeferredStringCharFromCode(this, instr);
4662 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4663 Register char_code = ToRegister(instr->char_code());
4664 Register result = ToRegister(instr->result());
4665 DCHECK(!char_code.is(result));
4667 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4668 __ j(above, deferred->entry());
4669 __ movsxlq(char_code, char_code);
4670 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4671 __ movp(result, FieldOperand(result,
4672 char_code, times_pointer_size,
4673 FixedArray::kHeaderSize));
4674 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4675 __ j(equal, deferred->entry());
4676 __ bind(deferred->exit());
4680 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4681 Register char_code = ToRegister(instr->char_code());
4682 Register result = ToRegister(instr->result());
4684 // TODO(3095996): Get rid of this. For now, we need to make the
4685 // result register contain a valid pointer because it is already
4686 // contained in the register pointer map.
4689 PushSafepointRegistersScope scope(this);
4690 __ Integer32ToSmi(char_code, char_code);
4692 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4693 __ StoreToSafepointRegisterSlot(result, rax);
4697 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4698 LOperand* input = instr->value();
4699 DCHECK(input->IsRegister() || input->IsStackSlot());
4700 LOperand* output = instr->result();
4701 DCHECK(output->IsDoubleRegister());
4702 if (input->IsRegister()) {
4703 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4705 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4710 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4711 LOperand* input = instr->value();
4712 LOperand* output = instr->result();
4714 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4718 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4719 class DeferredNumberTagI FINAL : public LDeferredCode {
4721 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4722 : LDeferredCode(codegen), instr_(instr) { }
4723 void Generate() OVERRIDE {
4724 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4725 instr_->temp2(), SIGNED_INT32);
4727 LInstruction* instr() OVERRIDE { return instr_; }
4730 LNumberTagI* instr_;
4733 LOperand* input = instr->value();
4734 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4735 Register reg = ToRegister(input);
4737 if (SmiValuesAre32Bits()) {
4738 __ Integer32ToSmi(reg, reg);
4740 DCHECK(SmiValuesAre31Bits());
4741 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4742 __ Integer32ToSmi(reg, reg);
4743 __ j(overflow, deferred->entry());
4744 __ bind(deferred->exit());
4749 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4750 class DeferredNumberTagU FINAL : public LDeferredCode {
4752 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4753 : LDeferredCode(codegen), instr_(instr) { }
4754 void Generate() OVERRIDE {
4755 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4756 instr_->temp2(), UNSIGNED_INT32);
4758 LInstruction* instr() OVERRIDE { return instr_; }
4761 LNumberTagU* instr_;
4764 LOperand* input = instr->value();
4765 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4766 Register reg = ToRegister(input);
4768 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4769 __ cmpl(reg, Immediate(Smi::kMaxValue));
4770 __ j(above, deferred->entry());
4771 __ Integer32ToSmi(reg, reg);
4772 __ bind(deferred->exit());
4776 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4780 IntegerSignedness signedness) {
4782 Register reg = ToRegister(value);
4783 Register tmp = ToRegister(temp1);
4784 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4786 // Load value into temp_xmm which will be preserved across potential call to
4787 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4788 // XMM registers on x64).
4789 if (signedness == SIGNED_INT32) {
4790 DCHECK(SmiValuesAre31Bits());
4791 // There was overflow, so bits 30 and 31 of the original integer
4792 // disagree. Try to allocate a heap number in new space and store
4793 // the value in there. If that fails, call the runtime system.
4794 __ SmiToInteger32(reg, reg);
4795 __ xorl(reg, Immediate(0x80000000));
4796 __ cvtlsi2sd(temp_xmm, reg);
4798 DCHECK(signedness == UNSIGNED_INT32);
4799 __ LoadUint32(temp_xmm, reg);
4802 if (FLAG_inline_new) {
4803 __ AllocateHeapNumber(reg, tmp, &slow);
4804 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4807 // Slow case: Call the runtime system to do the number allocation.
4810 // Put a valid pointer value in the stack slot where the result
4811 // register is stored, as this register is in the pointer map, but contains
4812 // an integer value.
4815 // Preserve the value of all registers.
4816 PushSafepointRegistersScope scope(this);
4818 // NumberTagIU uses the context from the frame, rather than
4819 // the environment's HContext or HInlinedContext value.
4820 // They only call Runtime::kAllocateHeapNumber.
4821 // The corresponding HChange instructions are added in a phase that does
4822 // not have easy access to the local context.
4823 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4824 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4825 RecordSafepointWithRegisters(
4826 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4827 __ StoreToSafepointRegisterSlot(reg, rax);
4830 // Done. Put the value in temp_xmm into the value of the allocated heap
4833 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4837 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4838 class DeferredNumberTagD FINAL : public LDeferredCode {
4840 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4841 : LDeferredCode(codegen), instr_(instr) { }
4842 void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
4843 LInstruction* instr() OVERRIDE { return instr_; }
4846 LNumberTagD* instr_;
4849 XMMRegister input_reg = ToDoubleRegister(instr->value());
4850 Register reg = ToRegister(instr->result());
4851 Register tmp = ToRegister(instr->temp());
4853 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4854 if (FLAG_inline_new) {
4855 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4857 __ jmp(deferred->entry());
4859 __ bind(deferred->exit());
4860 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4864 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4865 // TODO(3095996): Get rid of this. For now, we need to make the
4866 // result register contain a valid pointer because it is already
4867 // contained in the register pointer map.
4868 Register reg = ToRegister(instr->result());
4869 __ Move(reg, Smi::FromInt(0));
4872 PushSafepointRegistersScope scope(this);
4873 // NumberTagD uses the context from the frame, rather than
4874 // the environment's HContext or HInlinedContext value.
4875 // They only call Runtime::kAllocateHeapNumber.
4876 // The corresponding HChange instructions are added in a phase that does
4877 // not have easy access to the local context.
4878 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4879 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4880 RecordSafepointWithRegisters(
4881 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4882 __ movp(kScratchRegister, rax);
4884 __ movp(reg, kScratchRegister);
4888 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4889 HChange* hchange = instr->hydrogen();
4890 Register input = ToRegister(instr->value());
4891 Register output = ToRegister(instr->result());
4892 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4893 hchange->value()->CheckFlag(HValue::kUint32)) {
4894 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4895 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
4897 __ Integer32ToSmi(output, input);
4898 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4899 !hchange->value()->CheckFlag(HValue::kUint32)) {
4900 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4905 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4906 DCHECK(instr->value()->Equals(instr->result()));
4907 Register input = ToRegister(instr->value());
4908 if (instr->needs_check()) {
4909 Condition is_smi = __ CheckSmi(input);
4910 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
4912 __ AssertSmi(input);
4914 __ SmiToInteger32(input, input);
4918 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4919 XMMRegister result_reg, NumberUntagDMode mode) {
4920 bool can_convert_undefined_to_nan =
4921 instr->hydrogen()->can_convert_undefined_to_nan();
4922 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4924 Label convert, load_smi, done;
4926 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4928 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4930 // Heap number map check.
4931 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4932 Heap::kHeapNumberMapRootIndex);
4934 // On x64 it is safe to load at heap number offset before evaluating the map
4935 // check, since all heap objects are at least two words long.
4936 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4938 if (can_convert_undefined_to_nan) {
4939 __ j(not_equal, &convert, Label::kNear);
4941 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4944 if (deoptimize_on_minus_zero) {
4945 XMMRegister xmm_scratch = double_scratch0();
4946 __ xorps(xmm_scratch, xmm_scratch);
4947 __ ucomisd(xmm_scratch, result_reg);
4948 __ j(not_equal, &done, Label::kNear);
4949 __ movmskpd(kScratchRegister, result_reg);
4950 __ testq(kScratchRegister, Immediate(1));
4951 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4953 __ jmp(&done, Label::kNear);
4955 if (can_convert_undefined_to_nan) {
4958 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4959 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4960 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4962 __ pcmpeqd(result_reg, result_reg);
4963 __ jmp(&done, Label::kNear);
4966 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4969 // Smi to XMM conversion
4971 __ SmiToInteger32(kScratchRegister, input_reg);
4972 __ Cvtlsi2sd(result_reg, kScratchRegister);
4977 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4978 Register input_reg = ToRegister(instr->value());
4980 if (instr->truncating()) {
4981 Label no_heap_number, check_bools, check_false;
4983 // Heap number map check.
4984 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4985 Heap::kHeapNumberMapRootIndex);
4986 __ j(not_equal, &no_heap_number, Label::kNear);
4987 __ TruncateHeapNumberToI(input_reg, input_reg);
4990 __ bind(&no_heap_number);
4991 // Check for Oddballs. Undefined/False is converted to zero and True to one
4992 // for truncating conversions.
4993 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4994 __ j(not_equal, &check_bools, Label::kNear);
4995 __ Set(input_reg, 0);
4998 __ bind(&check_bools);
4999 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5000 __ j(not_equal, &check_false, Label::kNear);
5001 __ Set(input_reg, 1);
5004 __ bind(&check_false);
5005 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5006 DeoptimizeIf(not_equal, instr,
5007 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5008 __ Set(input_reg, 0);
5010 XMMRegister scratch = ToDoubleRegister(instr->temp());
5011 DCHECK(!scratch.is(xmm0));
5012 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5013 Heap::kHeapNumberMapRootIndex);
5014 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
5015 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5016 __ cvttsd2si(input_reg, xmm0);
5017 __ Cvtlsi2sd(scratch, input_reg);
5018 __ ucomisd(xmm0, scratch);
5019 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
5020 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
5021 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5022 __ testl(input_reg, input_reg);
5023 __ j(not_zero, done);
5024 __ movmskpd(input_reg, xmm0);
5025 __ andl(input_reg, Immediate(1));
5026 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
5032 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5033 class DeferredTaggedToI FINAL : public LDeferredCode {
5035 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5036 : LDeferredCode(codegen), instr_(instr) { }
5037 void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); }
5038 LInstruction* instr() OVERRIDE { return instr_; }
5044 LOperand* input = instr->value();
5045 DCHECK(input->IsRegister());
5046 DCHECK(input->Equals(instr->result()));
5047 Register input_reg = ToRegister(input);
5049 if (instr->hydrogen()->value()->representation().IsSmi()) {
5050 __ SmiToInteger32(input_reg, input_reg);
5052 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5053 __ JumpIfNotSmi(input_reg, deferred->entry());
5054 __ SmiToInteger32(input_reg, input_reg);
5055 __ bind(deferred->exit());
5060 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5061 LOperand* input = instr->value();
5062 DCHECK(input->IsRegister());
5063 LOperand* result = instr->result();
5064 DCHECK(result->IsDoubleRegister());
5066 Register input_reg = ToRegister(input);
5067 XMMRegister result_reg = ToDoubleRegister(result);
5069 HValue* value = instr->hydrogen()->value();
5070 NumberUntagDMode mode = value->representation().IsSmi()
5071 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5073 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5077 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5078 LOperand* input = instr->value();
5079 DCHECK(input->IsDoubleRegister());
5080 LOperand* result = instr->result();
5081 DCHECK(result->IsRegister());
5083 XMMRegister input_reg = ToDoubleRegister(input);
5084 Register result_reg = ToRegister(result);
5086 if (instr->truncating()) {
5087 __ TruncateDoubleToI(result_reg, input_reg);
5089 Label lost_precision, is_nan, minus_zero, done;
5090 XMMRegister xmm_scratch = double_scratch0();
5091 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5092 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5093 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
5094 &is_nan, &minus_zero, dist);
5095 __ jmp(&done, dist);
5096 __ bind(&lost_precision);
5097 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5099 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5100 __ bind(&minus_zero);
5101 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5107 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5108 LOperand* input = instr->value();
5109 DCHECK(input->IsDoubleRegister());
5110 LOperand* result = instr->result();
5111 DCHECK(result->IsRegister());
5113 XMMRegister input_reg = ToDoubleRegister(input);
5114 Register result_reg = ToRegister(result);
5116 Label lost_precision, is_nan, minus_zero, done;
5117 XMMRegister xmm_scratch = double_scratch0();
5118 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5119 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5120 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
5122 __ jmp(&done, dist);
5123 __ bind(&lost_precision);
5124 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
5126 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
5127 __ bind(&minus_zero);
5128 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
5130 __ Integer32ToSmi(result_reg, result_reg);
5131 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
5135 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5136 LOperand* input = instr->value();
5137 Condition cc = masm()->CheckSmi(ToRegister(input));
5138 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
5142 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5143 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5144 LOperand* input = instr->value();
5145 Condition cc = masm()->CheckSmi(ToRegister(input));
5146 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5151 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5152 Register input = ToRegister(instr->value());
5154 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5156 if (instr->hydrogen()->is_interval_check()) {
5159 instr->hydrogen()->GetCheckInterval(&first, &last);
5161 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5162 Immediate(static_cast<int8_t>(first)));
5164 // If there is only one type in the interval check for equality.
5165 if (first == last) {
5166 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5168 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5169 // Omit check for the last type.
5170 if (last != LAST_TYPE) {
5171 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5172 Immediate(static_cast<int8_t>(last)));
5173 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5179 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5181 if (base::bits::IsPowerOfTwo32(mask)) {
5182 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5183 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5185 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5186 Deoptimizer::kWrongInstanceType);
5188 __ movzxbl(kScratchRegister,
5189 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5190 __ andb(kScratchRegister, Immediate(mask));
5191 __ cmpb(kScratchRegister, Immediate(tag));
5192 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5198 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5199 Register reg = ToRegister(instr->value());
5200 __ Cmp(reg, instr->hydrogen()->object().handle());
5201 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5205 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5207 PushSafepointRegistersScope scope(this);
5210 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5211 RecordSafepointWithRegisters(
5212 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5214 __ testp(rax, Immediate(kSmiTagMask));
5216 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5220 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5221 class DeferredCheckMaps FINAL : public LDeferredCode {
5223 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5224 : LDeferredCode(codegen), instr_(instr), object_(object) {
5225 SetExit(check_maps());
5227 void Generate() OVERRIDE {
5228 codegen()->DoDeferredInstanceMigration(instr_, object_);
5230 Label* check_maps() { return &check_maps_; }
5231 LInstruction* instr() OVERRIDE { return instr_; }
5239 if (instr->hydrogen()->IsStabilityCheck()) {
5240 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5241 for (int i = 0; i < maps->size(); ++i) {
5242 AddStabilityDependency(maps->at(i).handle());
5247 LOperand* input = instr->value();
5248 DCHECK(input->IsRegister());
5249 Register reg = ToRegister(input);
5251 DeferredCheckMaps* deferred = NULL;
5252 if (instr->hydrogen()->HasMigrationTarget()) {
5253 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5254 __ bind(deferred->check_maps());
5257 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5259 for (int i = 0; i < maps->size() - 1; i++) {
5260 Handle<Map> map = maps->at(i).handle();
5261 __ CompareMap(reg, map);
5262 __ j(equal, &success, Label::kNear);
5265 Handle<Map> map = maps->at(maps->size() - 1).handle();
5266 __ CompareMap(reg, map);
5267 if (instr->hydrogen()->HasMigrationTarget()) {
5268 __ j(not_equal, deferred->entry());
5270 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5277 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5278 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5279 XMMRegister xmm_scratch = double_scratch0();
5280 Register result_reg = ToRegister(instr->result());
5281 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5285 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5286 DCHECK(instr->unclamped()->Equals(instr->result()));
5287 Register value_reg = ToRegister(instr->result());
5288 __ ClampUint8(value_reg);
5292 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5293 DCHECK(instr->unclamped()->Equals(instr->result()));
5294 Register input_reg = ToRegister(instr->unclamped());
5295 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5296 XMMRegister xmm_scratch = double_scratch0();
5297 Label is_smi, done, heap_number;
5298 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5299 __ JumpIfSmi(input_reg, &is_smi, dist);
5301 // Check for heap number
5302 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5303 factory()->heap_number_map());
5304 __ j(equal, &heap_number, Label::kNear);
5306 // Check for undefined. Undefined is converted to zero for clamping
5308 __ Cmp(input_reg, factory()->undefined_value());
5309 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5310 __ xorl(input_reg, input_reg);
5311 __ jmp(&done, Label::kNear);
5314 __ bind(&heap_number);
5315 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5316 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5317 __ jmp(&done, Label::kNear);
5321 __ SmiToInteger32(input_reg, input_reg);
5322 __ ClampUint8(input_reg);
5328 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5329 XMMRegister value_reg = ToDoubleRegister(instr->value());
5330 Register result_reg = ToRegister(instr->result());
5331 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5332 __ movq(result_reg, value_reg);
5333 __ shrq(result_reg, Immediate(32));
5335 __ movd(result_reg, value_reg);
5340 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5341 Register hi_reg = ToRegister(instr->hi());
5342 Register lo_reg = ToRegister(instr->lo());
5343 XMMRegister result_reg = ToDoubleRegister(instr->result());
5344 XMMRegister xmm_scratch = double_scratch0();
5345 __ movd(result_reg, hi_reg);
5346 __ psllq(result_reg, 32);
5347 __ movd(xmm_scratch, lo_reg);
5348 __ orps(result_reg, xmm_scratch);
5352 void LCodeGen::DoAllocate(LAllocate* instr) {
5353 class DeferredAllocate FINAL : public LDeferredCode {
5355 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5356 : LDeferredCode(codegen), instr_(instr) { }
5357 void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
5358 LInstruction* instr() OVERRIDE { return instr_; }
5364 DeferredAllocate* deferred =
5365 new(zone()) DeferredAllocate(this, instr);
5367 Register result = ToRegister(instr->result());
5368 Register temp = ToRegister(instr->temp());
5370 // Allocate memory for the object.
5371 AllocationFlags flags = TAG_OBJECT;
5372 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5373 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5375 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5376 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5377 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5378 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5379 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5380 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5381 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5384 if (instr->size()->IsConstantOperand()) {
5385 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5386 if (size <= Page::kMaxRegularHeapObjectSize) {
5387 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5389 __ jmp(deferred->entry());
5392 Register size = ToRegister(instr->size());
5393 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5396 __ bind(deferred->exit());
5398 if (instr->hydrogen()->MustPrefillWithFiller()) {
5399 if (instr->size()->IsConstantOperand()) {
5400 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5401 __ movl(temp, Immediate((size / kPointerSize) - 1));
5403 temp = ToRegister(instr->size());
5404 __ sarp(temp, Immediate(kPointerSizeLog2));
5409 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5410 isolate()->factory()->one_pointer_filler_map());
5412 __ j(not_zero, &loop);
5417 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5418 Register result = ToRegister(instr->result());
5420 // TODO(3095996): Get rid of this. For now, we need to make the
5421 // result register contain a valid pointer because it is already
5422 // contained in the register pointer map.
5423 __ Move(result, Smi::FromInt(0));
5425 PushSafepointRegistersScope scope(this);
5426 if (instr->size()->IsRegister()) {
5427 Register size = ToRegister(instr->size());
5428 DCHECK(!size.is(result));
5429 __ Integer32ToSmi(size, size);
5432 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5433 __ Push(Smi::FromInt(size));
5437 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5438 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5439 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5440 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5441 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5442 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5443 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5445 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5447 __ Push(Smi::FromInt(flags));
5449 CallRuntimeFromDeferred(
5450 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5451 __ StoreToSafepointRegisterSlot(result, rax);
5455 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5456 DCHECK(ToRegister(instr->value()).is(rax));
5458 CallRuntime(Runtime::kToFastProperties, 1, instr);
5462 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5463 DCHECK(ToRegister(instr->context()).is(rsi));
5465 // Registers will be used as follows:
5466 // rcx = literals array.
5467 // rbx = regexp literal.
5468 // rax = regexp literal clone.
5469 int literal_offset =
5470 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5471 __ Move(rcx, instr->hydrogen()->literals());
5472 __ movp(rbx, FieldOperand(rcx, literal_offset));
5473 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5474 __ j(not_equal, &materialized, Label::kNear);
5476 // Create regexp literal using runtime function
5477 // Result will be in rax.
5479 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5480 __ Push(instr->hydrogen()->pattern());
5481 __ Push(instr->hydrogen()->flags());
5482 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5485 __ bind(&materialized);
5486 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5487 Label allocated, runtime_allocate;
5488 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5489 __ jmp(&allocated, Label::kNear);
5491 __ bind(&runtime_allocate);
5493 __ Push(Smi::FromInt(size));
5494 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5497 __ bind(&allocated);
5498 // Copy the content into the newly allocated memory.
5499 // (Unroll copy loop once for better throughput).
5500 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5501 __ movp(rdx, FieldOperand(rbx, i));
5502 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5503 __ movp(FieldOperand(rax, i), rdx);
5504 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5506 if ((size % (2 * kPointerSize)) != 0) {
5507 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5508 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5513 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5514 DCHECK(ToRegister(instr->context()).is(rsi));
5515 // Use the fast case closure allocation code that allocates in new
5516 // space for nested functions that don't need literals cloning.
5517 bool pretenure = instr->hydrogen()->pretenure();
5518 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5519 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5520 instr->hydrogen()->kind());
5521 __ Move(rbx, instr->hydrogen()->shared_info());
5522 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5525 __ Push(instr->hydrogen()->shared_info());
5526 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5527 Heap::kFalseValueRootIndex);
5528 CallRuntime(Runtime::kNewClosure, 3, instr);
5533 void LCodeGen::DoTypeof(LTypeof* instr) {
5534 DCHECK(ToRegister(instr->context()).is(rsi));
5535 LOperand* input = instr->value();
5536 EmitPushTaggedOperand(input);
5537 CallRuntime(Runtime::kTypeof, 1, instr);
5541 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5542 DCHECK(!operand->IsDoubleRegister());
5543 if (operand->IsConstantOperand()) {
5544 __ Push(ToHandle(LConstantOperand::cast(operand)));
5545 } else if (operand->IsRegister()) {
5546 __ Push(ToRegister(operand));
5548 __ Push(ToOperand(operand));
5553 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5554 Register input = ToRegister(instr->value());
5555 Condition final_branch_condition = EmitTypeofIs(instr, input);
5556 if (final_branch_condition != no_condition) {
5557 EmitBranch(instr, final_branch_condition);
5562 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5563 Label* true_label = instr->TrueLabel(chunk_);
5564 Label* false_label = instr->FalseLabel(chunk_);
5565 Handle<String> type_name = instr->type_literal();
5566 int left_block = instr->TrueDestination(chunk_);
5567 int right_block = instr->FalseDestination(chunk_);
5568 int next_block = GetNextEmittedBlock();
5570 Label::Distance true_distance = left_block == next_block ? Label::kNear
5572 Label::Distance false_distance = right_block == next_block ? Label::kNear
5574 Condition final_branch_condition = no_condition;
5575 Factory* factory = isolate()->factory();
5576 if (String::Equals(type_name, factory->number_string())) {
5577 __ JumpIfSmi(input, true_label, true_distance);
5578 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5579 Heap::kHeapNumberMapRootIndex);
5581 final_branch_condition = equal;
5583 } else if (String::Equals(type_name, factory->string_string())) {
5584 __ JumpIfSmi(input, false_label, false_distance);
5585 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5586 __ j(above_equal, false_label, false_distance);
5587 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5588 Immediate(1 << Map::kIsUndetectable));
5589 final_branch_condition = zero;
5591 } else if (String::Equals(type_name, factory->symbol_string())) {
5592 __ JumpIfSmi(input, false_label, false_distance);
5593 __ CmpObjectType(input, SYMBOL_TYPE, input);
5594 final_branch_condition = equal;
5596 } else if (String::Equals(type_name, factory->boolean_string())) {
5597 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5598 __ j(equal, true_label, true_distance);
5599 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5600 final_branch_condition = equal;
5602 } else if (String::Equals(type_name, factory->undefined_string())) {
5603 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5604 __ j(equal, true_label, true_distance);
5605 __ JumpIfSmi(input, false_label, false_distance);
5606 // Check for undetectable objects => true.
5607 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5608 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5609 Immediate(1 << Map::kIsUndetectable));
5610 final_branch_condition = not_zero;
5612 } else if (String::Equals(type_name, factory->function_string())) {
5613 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5614 __ JumpIfSmi(input, false_label, false_distance);
5615 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5616 __ j(equal, true_label, true_distance);
5617 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5618 final_branch_condition = equal;
5620 } else if (String::Equals(type_name, factory->object_string())) {
5621 __ JumpIfSmi(input, false_label, false_distance);
5622 __ CompareRoot(input, Heap::kNullValueRootIndex);
5623 __ j(equal, true_label, true_distance);
5624 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5625 __ j(below, false_label, false_distance);
5626 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5627 __ j(above, false_label, false_distance);
5628 // Check for undetectable objects => false.
5629 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5630 Immediate(1 << Map::kIsUndetectable));
5631 final_branch_condition = zero;
5634 __ jmp(false_label, false_distance);
5637 return final_branch_condition;
5641 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5642 Register temp = ToRegister(instr->temp());
5644 EmitIsConstructCall(temp);
5645 EmitBranch(instr, equal);
5649 void LCodeGen::EmitIsConstructCall(Register temp) {
5650 // Get the frame pointer for the calling frame.
5651 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5653 // Skip the arguments adaptor frame if it exists.
5654 Label check_frame_marker;
5655 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5656 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5657 __ j(not_equal, &check_frame_marker, Label::kNear);
5658 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5660 // Check the marker in the calling frame.
5661 __ bind(&check_frame_marker);
5662 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5663 Smi::FromInt(StackFrame::CONSTRUCT));
5667 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5668 if (!info()->IsStub()) {
5669 // Ensure that we have enough space after the previous lazy-bailout
5670 // instruction for patching the code here.
5671 int current_pc = masm()->pc_offset();
5672 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5673 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5674 __ Nop(padding_size);
5677 last_lazy_deopt_pc_ = masm()->pc_offset();
5681 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5682 last_lazy_deopt_pc_ = masm()->pc_offset();
5683 DCHECK(instr->HasEnvironment());
5684 LEnvironment* env = instr->environment();
5685 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5686 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5690 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5691 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5692 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5693 // needed return address), even though the implementation of LAZY and EAGER is
5694 // now identical. When LAZY is eventually completely folded into EAGER, remove
5695 // the special case below.
5696 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5697 type = Deoptimizer::LAZY;
5699 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5703 void LCodeGen::DoDummy(LDummy* instr) {
5704 // Nothing to see here, move on!
5708 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5709 // Nothing to see here, move on!
5713 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5714 PushSafepointRegistersScope scope(this);
5715 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5716 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5717 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5718 DCHECK(instr->HasEnvironment());
5719 LEnvironment* env = instr->environment();
5720 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5724 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5725 class DeferredStackCheck FINAL : public LDeferredCode {
5727 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5728 : LDeferredCode(codegen), instr_(instr) { }
5729 void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
5730 LInstruction* instr() OVERRIDE { return instr_; }
5733 LStackCheck* instr_;
5736 DCHECK(instr->HasEnvironment());
5737 LEnvironment* env = instr->environment();
5738 // There is no LLazyBailout instruction for stack-checks. We have to
5739 // prepare for lazy deoptimization explicitly here.
5740 if (instr->hydrogen()->is_function_entry()) {
5741 // Perform stack overflow check.
5743 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5744 __ j(above_equal, &done, Label::kNear);
5746 DCHECK(instr->context()->IsRegister());
5747 DCHECK(ToRegister(instr->context()).is(rsi));
5748 CallCode(isolate()->builtins()->StackCheck(),
5749 RelocInfo::CODE_TARGET,
5753 DCHECK(instr->hydrogen()->is_backwards_branch());
5754 // Perform stack overflow check if this goto needs it before jumping.
5755 DeferredStackCheck* deferred_stack_check =
5756 new(zone()) DeferredStackCheck(this, instr);
5757 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5758 __ j(below, deferred_stack_check->entry());
5759 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5760 __ bind(instr->done_label());
5761 deferred_stack_check->SetExit(instr->done_label());
5762 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5763 // Don't record a deoptimization index for the safepoint here.
5764 // This will be done explicitly when emitting call and the safepoint in
5765 // the deferred code.
5770 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5771 // This is a pseudo-instruction that ensures that the environment here is
5772 // properly registered for deoptimization and records the assembler's PC
5774 LEnvironment* environment = instr->environment();
5776 // If the environment were already registered, we would have no way of
5777 // backpatching it with the spill slot operands.
5778 DCHECK(!environment->HasBeenRegistered());
5779 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5781 GenerateOsrPrologue();
5785 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5786 DCHECK(ToRegister(instr->context()).is(rsi));
5787 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5788 DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
5790 Register null_value = rdi;
5791 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5792 __ cmpp(rax, null_value);
5793 DeoptimizeIf(equal, instr, Deoptimizer::kNull);
5795 Condition cc = masm()->CheckSmi(rax);
5796 DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
5798 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5799 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5800 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5802 Label use_cache, call_runtime;
5803 __ CheckEnumCache(null_value, &call_runtime);
5805 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5806 __ jmp(&use_cache, Label::kNear);
5808 // Get the set of properties to enumerate.
5809 __ bind(&call_runtime);
5811 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5813 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5814 Heap::kMetaMapRootIndex);
5815 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5816 __ bind(&use_cache);
5820 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5821 Register map = ToRegister(instr->map());
5822 Register result = ToRegister(instr->result());
5823 Label load_cache, done;
5824 __ EnumLength(result, map);
5825 __ Cmp(result, Smi::FromInt(0));
5826 __ j(not_equal, &load_cache, Label::kNear);
5827 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5828 __ jmp(&done, Label::kNear);
5829 __ bind(&load_cache);
5830 __ LoadInstanceDescriptors(map, result);
5832 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5834 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5836 Condition cc = masm()->CheckSmi(result);
5837 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
5841 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5842 Register object = ToRegister(instr->value());
5843 __ cmpp(ToRegister(instr->map()),
5844 FieldOperand(object, HeapObject::kMapOffset));
5845 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5849 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5852 PushSafepointRegistersScope scope(this);
5856 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5857 RecordSafepointWithRegisters(
5858 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5859 __ StoreToSafepointRegisterSlot(object, rax);
5863 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5864 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5866 DeferredLoadMutableDouble(LCodeGen* codegen,
5867 LLoadFieldByIndex* instr,
5870 : LDeferredCode(codegen),
5875 void Generate() OVERRIDE {
5876 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5878 LInstruction* instr() OVERRIDE { return instr_; }
5881 LLoadFieldByIndex* instr_;
5886 Register object = ToRegister(instr->object());
5887 Register index = ToRegister(instr->index());
5889 DeferredLoadMutableDouble* deferred;
5890 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5892 Label out_of_object, done;
5893 __ Move(kScratchRegister, Smi::FromInt(1));
5894 __ testp(index, kScratchRegister);
5895 __ j(not_zero, deferred->entry());
5897 __ sarp(index, Immediate(1));
5899 __ SmiToInteger32(index, index);
5900 __ cmpl(index, Immediate(0));
5901 __ j(less, &out_of_object, Label::kNear);
5902 __ movp(object, FieldOperand(object,
5905 JSObject::kHeaderSize));
5906 __ jmp(&done, Label::kNear);
5908 __ bind(&out_of_object);
5909 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5911 // Index is now equal to out of object property index plus 1.
5912 __ movp(object, FieldOperand(object,
5915 FixedArray::kHeaderSize - kPointerSize));
5916 __ bind(deferred->exit());
5921 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5922 Register context = ToRegister(instr->context());
5923 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
5927 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5928 Handle<ScopeInfo> scope_info = instr->scope_info();
5929 __ Push(scope_info);
5930 __ Push(ToRegister(instr->function()));
5931 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5932 RecordSafepoint(Safepoint::kNoLazyDeopt);
5938 } } // namespace v8::internal
5940 #endif // V8_TARGET_ARCH_X64