1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h"
11 #include "src/x64/lithium-codegen-x64.h"
17 // When invoking builtins, we need to record the safepoint in the middle of
18 // the invoke instruction sequence generated by the macro assembler.
19 class SafepointGenerator V8_FINAL : public CallWrapper {
21 SafepointGenerator(LCodeGen* codegen,
22 LPointerMap* pointers,
23 Safepoint::DeoptMode mode)
27 virtual ~SafepointGenerator() {}
29 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
31 virtual void AfterCall() const V8_OVERRIDE {
32 codegen_->RecordSafepoint(pointers_, deopt_mode_);
37 LPointerMap* pointers_;
38 Safepoint::DeoptMode deopt_mode_;
44 bool LCodeGen::GenerateCode() {
45 LPhase phase("Z_Code generation", chunk());
49 // Open a frame scope to indicate that there is a frame on the stack. The
50 // MANUAL indicates that the scope shouldn't actually generate code to set up
51 // the frame (that is done in GeneratePrologue).
52 FrameScope frame_scope(masm_, StackFrame::MANUAL);
54 return GeneratePrologue() &&
56 GenerateDeferredCode() &&
57 GenerateJumpTable() &&
58 GenerateSafepointTable();
62 void LCodeGen::FinishCode(Handle<Code> code) {
64 code->set_stack_slots(GetStackSlotCount());
65 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
66 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
67 PopulateDeoptimizationData(code);
72 void LCodeGen::MakeSureStackPagesMapped(int offset) {
73 const int kPageSize = 4 * KB;
74 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
75 __ movp(Operand(rsp, offset), rax);
81 void LCodeGen::SaveCallerDoubles() {
82 DCHECK(info()->saves_caller_doubles());
83 DCHECK(NeedsEagerFrame());
84 Comment(";;; Save clobbered callee double registers");
86 BitVector* doubles = chunk()->allocated_double_registers();
87 BitVector::Iterator save_iterator(doubles);
88 while (!save_iterator.Done()) {
89 __ movsd(MemOperand(rsp, count * kDoubleSize),
90 XMMRegister::FromAllocationIndex(save_iterator.Current()));
91 save_iterator.Advance();
97 void LCodeGen::RestoreCallerDoubles() {
98 DCHECK(info()->saves_caller_doubles());
99 DCHECK(NeedsEagerFrame());
100 Comment(";;; Restore clobbered callee double registers");
101 BitVector* doubles = chunk()->allocated_double_registers();
102 BitVector::Iterator save_iterator(doubles);
104 while (!save_iterator.Done()) {
105 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
106 MemOperand(rsp, count * kDoubleSize));
107 save_iterator.Advance();
113 bool LCodeGen::GeneratePrologue() {
114 DCHECK(is_generating());
116 if (info()->IsOptimizing()) {
117 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
120 if (strlen(FLAG_stop_at) > 0 &&
121 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
126 // Sloppy mode functions need to replace the receiver with the global proxy
127 // when called as functions (without an explicit receiver object).
128 if (info_->this_has_uses() &&
129 info_->strict_mode() == SLOPPY &&
130 !info_->is_native()) {
132 StackArgumentsAccessor args(rsp, scope()->num_parameters());
133 __ movp(rcx, args.GetReceiverOperand());
135 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
136 __ j(not_equal, &ok, Label::kNear);
138 __ movp(rcx, GlobalObjectOperand());
139 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
141 __ movp(args.GetReceiverOperand(), rcx);
147 info()->set_prologue_offset(masm_->pc_offset());
148 if (NeedsEagerFrame()) {
149 DCHECK(!frame_is_built_);
150 frame_is_built_ = true;
151 if (info()->IsStub()) {
154 __ Prologue(info()->IsCodePreAgingActive());
156 info()->AddNoFrameRange(0, masm_->pc_offset());
159 // Reserve space for the stack slots needed by the code.
160 int slots = GetStackSlotCount();
162 if (FLAG_debug_code) {
163 __ subp(rsp, Immediate(slots * kPointerSize));
165 MakeSureStackPagesMapped(slots * kPointerSize);
169 __ Set(kScratchRegister, kSlotsZapValue);
172 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
175 __ j(not_zero, &loop);
178 __ subp(rsp, Immediate(slots * kPointerSize));
180 MakeSureStackPagesMapped(slots * kPointerSize);
184 if (info()->saves_caller_doubles()) {
189 // Possibly allocate a local context.
190 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
191 if (heap_slots > 0) {
192 Comment(";;; Allocate local context");
193 bool need_write_barrier = true;
194 // Argument to NewContext is the function, which is still in rdi.
195 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
196 FastNewContextStub stub(isolate(), heap_slots);
198 // Result of FastNewContextStub is always in new space.
199 need_write_barrier = false;
202 __ CallRuntime(Runtime::kNewFunctionContext, 1);
204 RecordSafepoint(Safepoint::kNoLazyDeopt);
205 // Context is returned in rax. It replaces the context passed to us.
206 // It's saved in the stack and kept live in rsi.
208 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
210 // Copy any necessary parameters into the context.
211 int num_parameters = scope()->num_parameters();
212 for (int i = 0; i < num_parameters; i++) {
213 Variable* var = scope()->parameter(i);
214 if (var->IsContextSlot()) {
215 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
216 (num_parameters - 1 - i) * kPointerSize;
217 // Load parameter from stack.
218 __ movp(rax, Operand(rbp, parameter_offset));
219 // Store it in the context.
220 int context_offset = Context::SlotOffset(var->index());
221 __ movp(Operand(rsi, context_offset), rax);
222 // Update the write barrier. This clobbers rax and rbx.
223 if (need_write_barrier) {
224 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
225 } else if (FLAG_debug_code) {
227 __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
228 __ Abort(kExpectedNewSpaceObject);
233 Comment(";;; End allocate local context");
237 if (FLAG_trace && info()->IsOptimizing()) {
238 __ CallRuntime(Runtime::kTraceEnter, 0);
240 return !is_aborted();
244 void LCodeGen::GenerateOsrPrologue() {
245 // Generate the OSR entry prologue at the first unknown OSR value, or if there
246 // are none, at the OSR entrypoint instruction.
247 if (osr_pc_offset_ >= 0) return;
249 osr_pc_offset_ = masm()->pc_offset();
251 // Adjust the frame size, subsuming the unoptimized frame into the
253 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
255 __ subp(rsp, Immediate(slots * kPointerSize));
259 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
260 if (instr->IsCall()) {
261 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
263 if (!instr->IsLazyBailout() && !instr->IsGap()) {
264 safepoints_.BumpLastLazySafepointIndex();
269 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
270 if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
271 instr->hydrogen_value()->representation().IsInteger32() &&
272 instr->result()->IsRegister()) {
273 __ AssertZeroExtended(ToRegister(instr->result()));
276 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
277 // We sign extend the dehoisted key at the definition point when the pointer
278 // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
279 // points and MustSignExtendResult is always false. We can't use
280 // STATIC_ASSERT here as the pointer size is 32-bit for x32.
281 DCHECK(kPointerSize == kInt64Size);
282 if (instr->result()->IsRegister()) {
283 Register result_reg = ToRegister(instr->result());
284 __ movsxlq(result_reg, result_reg);
286 // Sign extend the 32bit result in the stack slots.
287 DCHECK(instr->result()->IsStackSlot());
288 Operand src = ToOperand(instr->result());
289 __ movsxlq(kScratchRegister, src);
290 __ movq(src, kScratchRegister);
296 bool LCodeGen::GenerateJumpTable() {
298 if (jump_table_.length() > 0) {
299 Comment(";;; -------------------- Jump table --------------------");
301 for (int i = 0; i < jump_table_.length(); i++) {
302 __ bind(&jump_table_[i].label);
303 Address entry = jump_table_[i].address;
304 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
305 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
306 if (id == Deoptimizer::kNotDeoptimizationEntry) {
307 Comment(";;; jump table entry %d.", i);
309 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
311 if (jump_table_[i].needs_frame) {
312 DCHECK(!info()->saves_caller_doubles());
313 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
314 if (needs_frame.is_bound()) {
315 __ jmp(&needs_frame);
317 __ bind(&needs_frame);
318 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
322 // This variant of deopt can only be used with stubs. Since we don't
323 // have a function pointer to install in the stack frame that we're
324 // building, install a special marker there instead.
325 DCHECK(info()->IsStub());
326 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
328 __ movp(rsi, MemOperand(rsp, kPointerSize));
329 __ call(kScratchRegister);
332 if (info()->saves_caller_doubles()) {
333 DCHECK(info()->IsStub());
334 RestoreCallerDoubles();
336 __ call(entry, RelocInfo::RUNTIME_ENTRY);
339 return !is_aborted();
343 bool LCodeGen::GenerateDeferredCode() {
344 DCHECK(is_generating());
345 if (deferred_.length() > 0) {
346 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
347 LDeferredCode* code = deferred_[i];
350 instructions_->at(code->instruction_index())->hydrogen_value();
351 RecordAndWritePosition(
352 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
354 Comment(";;; <@%d,#%d> "
355 "-------------------- Deferred %s --------------------",
356 code->instruction_index(),
357 code->instr()->hydrogen_value()->id(),
358 code->instr()->Mnemonic());
359 __ bind(code->entry());
360 if (NeedsDeferredFrame()) {
361 Comment(";;; Build frame");
362 DCHECK(!frame_is_built_);
363 DCHECK(info()->IsStub());
364 frame_is_built_ = true;
365 // Build the frame in such a way that esi isn't trashed.
366 __ pushq(rbp); // Caller's frame pointer.
367 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
368 __ Push(Smi::FromInt(StackFrame::STUB));
369 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
370 Comment(";;; Deferred code");
373 if (NeedsDeferredFrame()) {
374 __ bind(code->done());
375 Comment(";;; Destroy frame");
376 DCHECK(frame_is_built_);
377 frame_is_built_ = false;
381 __ jmp(code->exit());
385 // Deferred code is the last part of the instruction sequence. Mark
386 // the generated code as done unless we bailed out.
387 if (!is_aborted()) status_ = DONE;
388 return !is_aborted();
392 bool LCodeGen::GenerateSafepointTable() {
394 safepoints_.Emit(masm(), GetStackSlotCount());
395 return !is_aborted();
399 Register LCodeGen::ToRegister(int index) const {
400 return Register::FromAllocationIndex(index);
404 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
405 return XMMRegister::FromAllocationIndex(index);
409 Register LCodeGen::ToRegister(LOperand* op) const {
410 DCHECK(op->IsRegister());
411 return ToRegister(op->index());
415 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
416 DCHECK(op->IsDoubleRegister());
417 return ToDoubleRegister(op->index());
421 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
422 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
426 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
427 return op->IsConstantOperand() &&
428 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
432 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
433 return chunk_->LookupLiteralRepresentation(op).IsSmi();
437 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
438 return ToRepresentation(op, Representation::Integer32());
442 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
443 const Representation& r) const {
444 HConstant* constant = chunk_->LookupConstant(op);
445 int32_t value = constant->Integer32Value();
446 if (r.IsInteger32()) return value;
447 DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
448 return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
452 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
453 HConstant* constant = chunk_->LookupConstant(op);
454 return Smi::FromInt(constant->Integer32Value());
458 double LCodeGen::ToDouble(LConstantOperand* op) const {
459 HConstant* constant = chunk_->LookupConstant(op);
460 DCHECK(constant->HasDoubleValue());
461 return constant->DoubleValue();
465 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
466 HConstant* constant = chunk_->LookupConstant(op);
467 DCHECK(constant->HasExternalReferenceValue());
468 return constant->ExternalReferenceValue();
472 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
473 HConstant* constant = chunk_->LookupConstant(op);
474 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
475 return constant->handle(isolate());
479 static int ArgumentsOffsetWithoutFrame(int index) {
481 return -(index + 1) * kPointerSize + kPCOnStackSize;
485 Operand LCodeGen::ToOperand(LOperand* op) const {
486 // Does not handle registers. In X64 assembler, plain registers are not
487 // representable as an Operand.
488 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
489 if (NeedsEagerFrame()) {
490 return Operand(rbp, StackSlotOffset(op->index()));
492 // Retrieve parameter without eager stack-frame relative to the
494 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
499 void LCodeGen::WriteTranslation(LEnvironment* environment,
500 Translation* translation) {
501 if (environment == NULL) return;
503 // The translation includes one command per value in the environment.
504 int translation_size = environment->translation_size();
505 // The output frame height does not include the parameters.
506 int height = translation_size - environment->parameter_count();
508 WriteTranslation(environment->outer(), translation);
509 bool has_closure_id = !info()->closure().is_null() &&
510 !info()->closure().is_identical_to(environment->closure());
511 int closure_id = has_closure_id
512 ? DefineDeoptimizationLiteral(environment->closure())
513 : Translation::kSelfLiteralId;
515 switch (environment->frame_type()) {
517 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
520 translation->BeginConstructStubFrame(closure_id, translation_size);
523 DCHECK(translation_size == 1);
525 translation->BeginGetterStubFrame(closure_id);
528 DCHECK(translation_size == 2);
530 translation->BeginSetterStubFrame(closure_id);
532 case ARGUMENTS_ADAPTOR:
533 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
536 translation->BeginCompiledStubFrame();
540 int object_index = 0;
541 int dematerialized_index = 0;
542 for (int i = 0; i < translation_size; ++i) {
543 LOperand* value = environment->values()->at(i);
544 AddToTranslation(environment,
547 environment->HasTaggedValueAt(i),
548 environment->HasUint32ValueAt(i),
550 &dematerialized_index);
555 void LCodeGen::AddToTranslation(LEnvironment* environment,
556 Translation* translation,
560 int* object_index_pointer,
561 int* dematerialized_index_pointer) {
562 if (op == LEnvironment::materialization_marker()) {
563 int object_index = (*object_index_pointer)++;
564 if (environment->ObjectIsDuplicateAt(object_index)) {
565 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
566 translation->DuplicateObject(dupe_of);
569 int object_length = environment->ObjectLengthAt(object_index);
570 if (environment->ObjectIsArgumentsAt(object_index)) {
571 translation->BeginArgumentsObject(object_length);
573 translation->BeginCapturedObject(object_length);
575 int dematerialized_index = *dematerialized_index_pointer;
576 int env_offset = environment->translation_size() + dematerialized_index;
577 *dematerialized_index_pointer += object_length;
578 for (int i = 0; i < object_length; ++i) {
579 LOperand* value = environment->values()->at(env_offset + i);
580 AddToTranslation(environment,
583 environment->HasTaggedValueAt(env_offset + i),
584 environment->HasUint32ValueAt(env_offset + i),
585 object_index_pointer,
586 dematerialized_index_pointer);
591 if (op->IsStackSlot()) {
593 translation->StoreStackSlot(op->index());
594 } else if (is_uint32) {
595 translation->StoreUint32StackSlot(op->index());
597 translation->StoreInt32StackSlot(op->index());
599 } else if (op->IsDoubleStackSlot()) {
600 translation->StoreDoubleStackSlot(op->index());
601 } else if (op->IsRegister()) {
602 Register reg = ToRegister(op);
604 translation->StoreRegister(reg);
605 } else if (is_uint32) {
606 translation->StoreUint32Register(reg);
608 translation->StoreInt32Register(reg);
610 } else if (op->IsDoubleRegister()) {
611 XMMRegister reg = ToDoubleRegister(op);
612 translation->StoreDoubleRegister(reg);
613 } else if (op->IsConstantOperand()) {
614 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
615 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
616 translation->StoreLiteral(src_index);
623 void LCodeGen::CallCodeGeneric(Handle<Code> code,
624 RelocInfo::Mode mode,
626 SafepointMode safepoint_mode,
628 DCHECK(instr != NULL);
630 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
632 // Signal that we don't inline smi code before these stubs in the
633 // optimizing code generator.
634 if (code->kind() == Code::BINARY_OP_IC ||
635 code->kind() == Code::COMPARE_IC) {
641 void LCodeGen::CallCode(Handle<Code> code,
642 RelocInfo::Mode mode,
643 LInstruction* instr) {
644 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
648 void LCodeGen::CallRuntime(const Runtime::Function* function,
651 SaveFPRegsMode save_doubles) {
652 DCHECK(instr != NULL);
653 DCHECK(instr->HasPointerMap());
655 __ CallRuntime(function, num_arguments, save_doubles);
657 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
661 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
662 if (context->IsRegister()) {
663 if (!ToRegister(context).is(rsi)) {
664 __ movp(rsi, ToRegister(context));
666 } else if (context->IsStackSlot()) {
667 __ movp(rsi, ToOperand(context));
668 } else if (context->IsConstantOperand()) {
669 HConstant* constant =
670 chunk_->LookupConstant(LConstantOperand::cast(context));
671 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
679 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
683 LoadContextFromDeferred(context);
685 __ CallRuntimeSaveDoubles(id);
686 RecordSafepointWithRegisters(
687 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
691 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
692 Safepoint::DeoptMode mode) {
693 environment->set_has_been_used();
694 if (!environment->HasBeenRegistered()) {
695 // Physical stack frame layout:
696 // -x ............. -4 0 ..................................... y
697 // [incoming arguments] [spill slots] [pushed outgoing arguments]
699 // Layout of the environment:
700 // 0 ..................................................... size-1
701 // [parameters] [locals] [expression stack including arguments]
703 // Layout of the translation:
704 // 0 ........................................................ size - 1 + 4
705 // [expression stack including arguments] [locals] [4 words] [parameters]
706 // |>------------ translation_size ------------<|
709 int jsframe_count = 0;
710 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
712 if (e->frame_type() == JS_FUNCTION) {
716 Translation translation(&translations_, frame_count, jsframe_count, zone());
717 WriteTranslation(environment, &translation);
718 int deoptimization_index = deoptimizations_.length();
719 int pc_offset = masm()->pc_offset();
720 environment->Register(deoptimization_index,
722 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
723 deoptimizations_.Add(environment, environment->zone());
728 void LCodeGen::DeoptimizeIf(Condition cc,
729 LEnvironment* environment,
730 Deoptimizer::BailoutType bailout_type) {
731 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
732 DCHECK(environment->HasBeenRegistered());
733 int id = environment->deoptimization_index();
734 DCHECK(info()->IsOptimizing() || info()->IsStub());
736 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
738 Abort(kBailoutWasNotPrepared);
742 if (DeoptEveryNTimes()) {
743 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
747 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
748 __ movl(rax, count_operand);
749 __ subl(rax, Immediate(1));
750 __ j(not_zero, &no_deopt, Label::kNear);
751 if (FLAG_trap_on_deopt) __ int3();
752 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
753 __ movl(count_operand, rax);
756 DCHECK(frame_is_built_);
757 __ call(entry, RelocInfo::RUNTIME_ENTRY);
759 __ movl(count_operand, rax);
764 if (info()->ShouldTrapOnDeopt()) {
766 if (cc != no_condition) {
767 __ j(NegateCondition(cc), &done, Label::kNear);
773 DCHECK(info()->IsStub() || frame_is_built_);
774 // Go through jump table if we need to handle condition, build frame, or
775 // restore caller doubles.
776 if (cc == no_condition && frame_is_built_ &&
777 !info()->saves_caller_doubles()) {
778 __ call(entry, RelocInfo::RUNTIME_ENTRY);
780 // We often have several deopts to the same entry, reuse the last
781 // jump entry if this is the case.
782 if (jump_table_.is_empty() ||
783 jump_table_.last().address != entry ||
784 jump_table_.last().needs_frame != !frame_is_built_ ||
785 jump_table_.last().bailout_type != bailout_type) {
786 Deoptimizer::JumpTableEntry table_entry(entry,
789 jump_table_.Add(table_entry, zone());
791 if (cc == no_condition) {
792 __ jmp(&jump_table_.last().label);
794 __ j(cc, &jump_table_.last().label);
800 void LCodeGen::DeoptimizeIf(Condition cc,
801 LEnvironment* environment) {
802 Deoptimizer::BailoutType bailout_type = info()->IsStub()
804 : Deoptimizer::EAGER;
805 DeoptimizeIf(cc, environment, bailout_type);
809 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
810 int length = deoptimizations_.length();
811 if (length == 0) return;
812 Handle<DeoptimizationInputData> data =
813 DeoptimizationInputData::New(isolate(), length, 0, TENURED);
815 Handle<ByteArray> translations =
816 translations_.CreateByteArray(isolate()->factory());
817 data->SetTranslationByteArray(*translations);
818 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
819 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
820 if (info_->IsOptimizing()) {
821 // Reference to shared function info does not change between phases.
822 AllowDeferredHandleDereference allow_handle_dereference;
823 data->SetSharedFunctionInfo(*info_->shared_info());
825 data->SetSharedFunctionInfo(Smi::FromInt(0));
828 Handle<FixedArray> literals =
829 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
830 { AllowDeferredHandleDereference copy_handles;
831 for (int i = 0; i < deoptimization_literals_.length(); i++) {
832 literals->set(i, *deoptimization_literals_[i]);
834 data->SetLiteralArray(*literals);
837 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
838 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
840 // Populate the deoptimization entries.
841 for (int i = 0; i < length; i++) {
842 LEnvironment* env = deoptimizations_[i];
843 data->SetAstId(i, env->ast_id());
844 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
845 data->SetArgumentsStackHeight(i,
846 Smi::FromInt(env->arguments_stack_height()));
847 data->SetPc(i, Smi::FromInt(env->pc_offset()));
849 code->set_deoptimization_data(*data);
853 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
854 int result = deoptimization_literals_.length();
855 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
856 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
858 deoptimization_literals_.Add(literal, zone());
863 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
864 DCHECK(deoptimization_literals_.length() == 0);
866 const ZoneList<Handle<JSFunction> >* inlined_closures =
867 chunk()->inlined_closures();
869 for (int i = 0, length = inlined_closures->length();
872 DefineDeoptimizationLiteral(inlined_closures->at(i));
875 inlined_function_count_ = deoptimization_literals_.length();
879 void LCodeGen::RecordSafepointWithLazyDeopt(
880 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
881 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
882 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
884 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
885 RecordSafepointWithRegisters(
886 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
891 void LCodeGen::RecordSafepoint(
892 LPointerMap* pointers,
893 Safepoint::Kind kind,
895 Safepoint::DeoptMode deopt_mode) {
896 DCHECK(kind == expected_safepoint_kind_);
898 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
900 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
901 kind, arguments, deopt_mode);
902 for (int i = 0; i < operands->length(); i++) {
903 LOperand* pointer = operands->at(i);
904 if (pointer->IsStackSlot()) {
905 safepoint.DefinePointerSlot(pointer->index(), zone());
906 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
907 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
913 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
914 Safepoint::DeoptMode deopt_mode) {
915 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
919 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
920 LPointerMap empty_pointers(zone());
921 RecordSafepoint(&empty_pointers, deopt_mode);
925 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
927 Safepoint::DeoptMode deopt_mode) {
928 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
932 void LCodeGen::RecordAndWritePosition(int position) {
933 if (position == RelocInfo::kNoPosition) return;
934 masm()->positions_recorder()->RecordPosition(position);
935 masm()->positions_recorder()->WriteRecordedPositions();
939 static const char* LabelType(LLabel* label) {
940 if (label->is_loop_header()) return " (loop header)";
941 if (label->is_osr_entry()) return " (OSR entry)";
946 void LCodeGen::DoLabel(LLabel* label) {
947 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
948 current_instruction_,
949 label->hydrogen_value()->id(),
952 __ bind(label->label());
953 current_block_ = label->block_id();
958 void LCodeGen::DoParallelMove(LParallelMove* move) {
959 resolver_.Resolve(move);
963 void LCodeGen::DoGap(LGap* gap) {
964 for (int i = LGap::FIRST_INNER_POSITION;
965 i <= LGap::LAST_INNER_POSITION;
967 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
968 LParallelMove* move = gap->GetParallelMove(inner_pos);
969 if (move != NULL) DoParallelMove(move);
974 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
979 void LCodeGen::DoParameter(LParameter* instr) {
984 void LCodeGen::DoCallStub(LCallStub* instr) {
985 DCHECK(ToRegister(instr->context()).is(rsi));
986 DCHECK(ToRegister(instr->result()).is(rax));
987 switch (instr->hydrogen()->major_key()) {
988 case CodeStub::RegExpExec: {
989 RegExpExecStub stub(isolate());
990 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
993 case CodeStub::SubString: {
994 SubStringStub stub(isolate());
995 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
998 case CodeStub::StringCompare: {
999 StringCompareStub stub(isolate());
1000 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1009 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1010 GenerateOsrPrologue();
1014 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1015 Register dividend = ToRegister(instr->dividend());
1016 int32_t divisor = instr->divisor();
1017 DCHECK(dividend.is(ToRegister(instr->result())));
1019 // Theoretically, a variation of the branch-free code for integer division by
1020 // a power of 2 (calculating the remainder via an additional multiplication
1021 // (which gets simplified to an 'and') and subtraction) should be faster, and
1022 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1023 // indicate that positive dividends are heavily favored, so the branching
1024 // version performs better.
1025 HMod* hmod = instr->hydrogen();
1026 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1027 Label dividend_is_not_negative, done;
1028 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1029 __ testl(dividend, dividend);
1030 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1031 // Note that this is correct even for kMinInt operands.
1033 __ andl(dividend, Immediate(mask));
1035 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1036 DeoptimizeIf(zero, instr->environment());
1038 __ jmp(&done, Label::kNear);
1041 __ bind(÷nd_is_not_negative);
1042 __ andl(dividend, Immediate(mask));
1047 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1048 Register dividend = ToRegister(instr->dividend());
1049 int32_t divisor = instr->divisor();
1050 DCHECK(ToRegister(instr->result()).is(rax));
1053 DeoptimizeIf(no_condition, instr->environment());
1057 __ TruncatingDiv(dividend, Abs(divisor));
1058 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1059 __ movl(rax, dividend);
1062 // Check for negative zero.
1063 HMod* hmod = instr->hydrogen();
1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1065 Label remainder_not_zero;
1066 __ j(not_zero, &remainder_not_zero, Label::kNear);
1067 __ cmpl(dividend, Immediate(0));
1068 DeoptimizeIf(less, instr->environment());
1069 __ bind(&remainder_not_zero);
1074 void LCodeGen::DoModI(LModI* instr) {
1075 HMod* hmod = instr->hydrogen();
1077 Register left_reg = ToRegister(instr->left());
1078 DCHECK(left_reg.is(rax));
1079 Register right_reg = ToRegister(instr->right());
1080 DCHECK(!right_reg.is(rax));
1081 DCHECK(!right_reg.is(rdx));
1082 Register result_reg = ToRegister(instr->result());
1083 DCHECK(result_reg.is(rdx));
1086 // Check for x % 0, idiv would signal a divide error. We have to
1087 // deopt in this case because we can't return a NaN.
1088 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1089 __ testl(right_reg, right_reg);
1090 DeoptimizeIf(zero, instr->environment());
1093 // Check for kMinInt % -1, idiv would signal a divide error. We
1094 // have to deopt if we care about -0, because we can't return that.
1095 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1096 Label no_overflow_possible;
1097 __ cmpl(left_reg, Immediate(kMinInt));
1098 __ j(not_zero, &no_overflow_possible, Label::kNear);
1099 __ cmpl(right_reg, Immediate(-1));
1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1101 DeoptimizeIf(equal, instr->environment());
1103 __ j(not_equal, &no_overflow_possible, Label::kNear);
1104 __ Set(result_reg, 0);
1105 __ jmp(&done, Label::kNear);
1107 __ bind(&no_overflow_possible);
1110 // Sign extend dividend in eax into edx:eax, since we are using only the low
1111 // 32 bits of the values.
1114 // If we care about -0, test if the dividend is <0 and the result is 0.
1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1116 Label positive_left;
1117 __ testl(left_reg, left_reg);
1118 __ j(not_sign, &positive_left, Label::kNear);
1119 __ idivl(right_reg);
1120 __ testl(result_reg, result_reg);
1121 DeoptimizeIf(zero, instr->environment());
1122 __ jmp(&done, Label::kNear);
1123 __ bind(&positive_left);
1125 __ idivl(right_reg);
1130 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1131 Register dividend = ToRegister(instr->dividend());
1132 int32_t divisor = instr->divisor();
1133 DCHECK(dividend.is(ToRegister(instr->result())));
1135 // If the divisor is positive, things are easy: There can be no deopts and we
1136 // can simply do an arithmetic right shift.
1137 if (divisor == 1) return;
1138 int32_t shift = WhichPowerOf2Abs(divisor);
1140 __ sarl(dividend, Immediate(shift));
1144 // If the divisor is negative, we have to negate and handle edge cases.
1146 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1147 DeoptimizeIf(zero, instr->environment());
1150 // Dividing by -1 is basically negation, unless we overflow.
1151 if (divisor == -1) {
1152 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1153 DeoptimizeIf(overflow, instr->environment());
1158 // If the negation could not overflow, simply shifting is OK.
1159 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1160 __ sarl(dividend, Immediate(shift));
1164 Label not_kmin_int, done;
1165 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1166 __ movl(dividend, Immediate(kMinInt / divisor));
1167 __ jmp(&done, Label::kNear);
1168 __ bind(¬_kmin_int);
1169 __ sarl(dividend, Immediate(shift));
1174 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1175 Register dividend = ToRegister(instr->dividend());
1176 int32_t divisor = instr->divisor();
1177 DCHECK(ToRegister(instr->result()).is(rdx));
1180 DeoptimizeIf(no_condition, instr->environment());
1184 // Check for (0 / -x) that will produce negative zero.
1185 HMathFloorOfDiv* hdiv = instr->hydrogen();
1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1187 __ testl(dividend, dividend);
1188 DeoptimizeIf(zero, instr->environment());
1191 // Easy case: We need no dynamic check for the dividend and the flooring
1192 // division is the same as the truncating division.
1193 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1194 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1195 __ TruncatingDiv(dividend, Abs(divisor));
1196 if (divisor < 0) __ negl(rdx);
1200 // In the general case we may need to adjust before and after the truncating
1201 // division to get a flooring division.
1202 Register temp = ToRegister(instr->temp3());
1203 DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1204 Label needs_adjustment, done;
1205 __ cmpl(dividend, Immediate(0));
1206 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1207 __ TruncatingDiv(dividend, Abs(divisor));
1208 if (divisor < 0) __ negl(rdx);
1209 __ jmp(&done, Label::kNear);
1210 __ bind(&needs_adjustment);
1211 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1212 __ TruncatingDiv(temp, Abs(divisor));
1213 if (divisor < 0) __ negl(rdx);
1219 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1220 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1221 HBinaryOperation* hdiv = instr->hydrogen();
1222 Register dividend = ToRegister(instr->dividend());
1223 Register divisor = ToRegister(instr->divisor());
1224 Register remainder = ToRegister(instr->temp());
1225 Register result = ToRegister(instr->result());
1226 DCHECK(dividend.is(rax));
1227 DCHECK(remainder.is(rdx));
1228 DCHECK(result.is(rax));
1229 DCHECK(!divisor.is(rax));
1230 DCHECK(!divisor.is(rdx));
1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234 __ testl(divisor, divisor);
1235 DeoptimizeIf(zero, instr->environment());
1238 // Check for (0 / -x) that will produce negative zero.
1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1240 Label dividend_not_zero;
1241 __ testl(dividend, dividend);
1242 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1243 __ testl(divisor, divisor);
1244 DeoptimizeIf(sign, instr->environment());
1245 __ bind(÷nd_not_zero);
1248 // Check for (kMinInt / -1).
1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1250 Label dividend_not_min_int;
1251 __ cmpl(dividend, Immediate(kMinInt));
1252 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1253 __ cmpl(divisor, Immediate(-1));
1254 DeoptimizeIf(zero, instr->environment());
1255 __ bind(÷nd_not_min_int);
1258 // Sign extend to rdx (= remainder).
1263 __ testl(remainder, remainder);
1264 __ j(zero, &done, Label::kNear);
1265 __ xorl(remainder, divisor);
1266 __ sarl(remainder, Immediate(31));
1267 __ addl(result, remainder);
1272 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1273 Register dividend = ToRegister(instr->dividend());
1274 int32_t divisor = instr->divisor();
1275 Register result = ToRegister(instr->result());
1276 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1277 DCHECK(!result.is(dividend));
1279 // Check for (0 / -x) that will produce negative zero.
1280 HDiv* hdiv = instr->hydrogen();
1281 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1282 __ testl(dividend, dividend);
1283 DeoptimizeIf(zero, instr->environment());
1285 // Check for (kMinInt / -1).
1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1287 __ cmpl(dividend, Immediate(kMinInt));
1288 DeoptimizeIf(zero, instr->environment());
1290 // Deoptimize if remainder will not be 0.
1291 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1292 divisor != 1 && divisor != -1) {
1293 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1294 __ testl(dividend, Immediate(mask));
1295 DeoptimizeIf(not_zero, instr->environment());
1297 __ Move(result, dividend);
1298 int32_t shift = WhichPowerOf2Abs(divisor);
1300 // The arithmetic shift is always OK, the 'if' is an optimization only.
1301 if (shift > 1) __ sarl(result, Immediate(31));
1302 __ shrl(result, Immediate(32 - shift));
1303 __ addl(result, dividend);
1304 __ sarl(result, Immediate(shift));
1306 if (divisor < 0) __ negl(result);
1310 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1311 Register dividend = ToRegister(instr->dividend());
1312 int32_t divisor = instr->divisor();
1313 DCHECK(ToRegister(instr->result()).is(rdx));
1316 DeoptimizeIf(no_condition, instr->environment());
1320 // Check for (0 / -x) that will produce negative zero.
1321 HDiv* hdiv = instr->hydrogen();
1322 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1323 __ testl(dividend, dividend);
1324 DeoptimizeIf(zero, instr->environment());
1327 __ TruncatingDiv(dividend, Abs(divisor));
1328 if (divisor < 0) __ negl(rdx);
1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1332 __ imull(rax, rax, Immediate(divisor));
1333 __ subl(rax, dividend);
1334 DeoptimizeIf(not_equal, instr->environment());
1339 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1340 void LCodeGen::DoDivI(LDivI* instr) {
1341 HBinaryOperation* hdiv = instr->hydrogen();
1342 Register dividend = ToRegister(instr->dividend());
1343 Register divisor = ToRegister(instr->divisor());
1344 Register remainder = ToRegister(instr->temp());
1345 DCHECK(dividend.is(rax));
1346 DCHECK(remainder.is(rdx));
1347 DCHECK(ToRegister(instr->result()).is(rax));
1348 DCHECK(!divisor.is(rax));
1349 DCHECK(!divisor.is(rdx));
1352 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1353 __ testl(divisor, divisor);
1354 DeoptimizeIf(zero, instr->environment());
1357 // Check for (0 / -x) that will produce negative zero.
1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1359 Label dividend_not_zero;
1360 __ testl(dividend, dividend);
1361 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1362 __ testl(divisor, divisor);
1363 DeoptimizeIf(sign, instr->environment());
1364 __ bind(÷nd_not_zero);
1367 // Check for (kMinInt / -1).
1368 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1369 Label dividend_not_min_int;
1370 __ cmpl(dividend, Immediate(kMinInt));
1371 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1372 __ cmpl(divisor, Immediate(-1));
1373 DeoptimizeIf(zero, instr->environment());
1374 __ bind(÷nd_not_min_int);
1377 // Sign extend to rdx (= remainder).
1381 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1382 // Deoptimize if remainder is not 0.
1383 __ testl(remainder, remainder);
1384 DeoptimizeIf(not_zero, instr->environment());
1389 void LCodeGen::DoMulI(LMulI* instr) {
1390 Register left = ToRegister(instr->left());
1391 LOperand* right = instr->right();
1393 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1394 if (instr->hydrogen_value()->representation().IsSmi()) {
1395 __ movp(kScratchRegister, left);
1397 __ movl(kScratchRegister, left);
1402 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1403 if (right->IsConstantOperand()) {
1404 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1405 if (right_value == -1) {
1407 } else if (right_value == 0) {
1408 __ xorl(left, left);
1409 } else if (right_value == 2) {
1410 __ addl(left, left);
1411 } else if (!can_overflow) {
1412 // If the multiplication is known to not overflow, we
1413 // can use operations that don't set the overflow flag
1415 switch (right_value) {
1420 __ leal(left, Operand(left, left, times_2, 0));
1423 __ shll(left, Immediate(2));
1426 __ leal(left, Operand(left, left, times_4, 0));
1429 __ shll(left, Immediate(3));
1432 __ leal(left, Operand(left, left, times_8, 0));
1435 __ shll(left, Immediate(4));
1438 __ imull(left, left, Immediate(right_value));
1442 __ imull(left, left, Immediate(right_value));
1444 } else if (right->IsStackSlot()) {
1445 if (instr->hydrogen_value()->representation().IsSmi()) {
1446 __ SmiToInteger64(left, left);
1447 __ imulp(left, ToOperand(right));
1449 __ imull(left, ToOperand(right));
1452 if (instr->hydrogen_value()->representation().IsSmi()) {
1453 __ SmiToInteger64(left, left);
1454 __ imulp(left, ToRegister(right));
1456 __ imull(left, ToRegister(right));
1461 DeoptimizeIf(overflow, instr->environment());
1464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1465 // Bail out if the result is supposed to be negative zero.
1467 if (instr->hydrogen_value()->representation().IsSmi()) {
1468 __ testp(left, left);
1470 __ testl(left, left);
1472 __ j(not_zero, &done, Label::kNear);
1473 if (right->IsConstantOperand()) {
1474 // Constant can't be represented as 32-bit Smi due to immediate size
1476 DCHECK(SmiValuesAre32Bits()
1477 ? !instr->hydrogen_value()->representation().IsSmi()
1478 : SmiValuesAre31Bits());
1479 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1480 DeoptimizeIf(no_condition, instr->environment());
1481 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1482 __ cmpl(kScratchRegister, Immediate(0));
1483 DeoptimizeIf(less, instr->environment());
1485 } else if (right->IsStackSlot()) {
1486 if (instr->hydrogen_value()->representation().IsSmi()) {
1487 __ orp(kScratchRegister, ToOperand(right));
1489 __ orl(kScratchRegister, ToOperand(right));
1491 DeoptimizeIf(sign, instr->environment());
1493 // Test the non-zero operand for negative sign.
1494 if (instr->hydrogen_value()->representation().IsSmi()) {
1495 __ orp(kScratchRegister, ToRegister(right));
1497 __ orl(kScratchRegister, ToRegister(right));
1499 DeoptimizeIf(sign, instr->environment());
1506 void LCodeGen::DoBitI(LBitI* instr) {
1507 LOperand* left = instr->left();
1508 LOperand* right = instr->right();
1509 DCHECK(left->Equals(instr->result()));
1510 DCHECK(left->IsRegister());
1512 if (right->IsConstantOperand()) {
1513 int32_t right_operand =
1514 ToRepresentation(LConstantOperand::cast(right),
1515 instr->hydrogen()->right()->representation());
1516 switch (instr->op()) {
1517 case Token::BIT_AND:
1518 __ andl(ToRegister(left), Immediate(right_operand));
1521 __ orl(ToRegister(left), Immediate(right_operand));
1523 case Token::BIT_XOR:
1524 if (right_operand == int32_t(~0)) {
1525 __ notl(ToRegister(left));
1527 __ xorl(ToRegister(left), Immediate(right_operand));
1534 } else if (right->IsStackSlot()) {
1535 switch (instr->op()) {
1536 case Token::BIT_AND:
1537 if (instr->IsInteger32()) {
1538 __ andl(ToRegister(left), ToOperand(right));
1540 __ andp(ToRegister(left), ToOperand(right));
1544 if (instr->IsInteger32()) {
1545 __ orl(ToRegister(left), ToOperand(right));
1547 __ orp(ToRegister(left), ToOperand(right));
1550 case Token::BIT_XOR:
1551 if (instr->IsInteger32()) {
1552 __ xorl(ToRegister(left), ToOperand(right));
1554 __ xorp(ToRegister(left), ToOperand(right));
1562 DCHECK(right->IsRegister());
1563 switch (instr->op()) {
1564 case Token::BIT_AND:
1565 if (instr->IsInteger32()) {
1566 __ andl(ToRegister(left), ToRegister(right));
1568 __ andp(ToRegister(left), ToRegister(right));
1572 if (instr->IsInteger32()) {
1573 __ orl(ToRegister(left), ToRegister(right));
1575 __ orp(ToRegister(left), ToRegister(right));
1578 case Token::BIT_XOR:
1579 if (instr->IsInteger32()) {
1580 __ xorl(ToRegister(left), ToRegister(right));
1582 __ xorp(ToRegister(left), ToRegister(right));
1593 void LCodeGen::DoShiftI(LShiftI* instr) {
1594 LOperand* left = instr->left();
1595 LOperand* right = instr->right();
1596 DCHECK(left->Equals(instr->result()));
1597 DCHECK(left->IsRegister());
1598 if (right->IsRegister()) {
1599 DCHECK(ToRegister(right).is(rcx));
1601 switch (instr->op()) {
1603 __ rorl_cl(ToRegister(left));
1606 __ sarl_cl(ToRegister(left));
1609 __ shrl_cl(ToRegister(left));
1610 if (instr->can_deopt()) {
1611 __ testl(ToRegister(left), ToRegister(left));
1612 DeoptimizeIf(negative, instr->environment());
1616 __ shll_cl(ToRegister(left));
1623 int32_t value = ToInteger32(LConstantOperand::cast(right));
1624 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1625 switch (instr->op()) {
1627 if (shift_count != 0) {
1628 __ rorl(ToRegister(left), Immediate(shift_count));
1632 if (shift_count != 0) {
1633 __ sarl(ToRegister(left), Immediate(shift_count));
1637 if (shift_count != 0) {
1638 __ shrl(ToRegister(left), Immediate(shift_count));
1639 } else if (instr->can_deopt()) {
1640 __ testl(ToRegister(left), ToRegister(left));
1641 DeoptimizeIf(negative, instr->environment());
1645 if (shift_count != 0) {
1646 if (instr->hydrogen_value()->representation().IsSmi()) {
1647 if (SmiValuesAre32Bits()) {
1648 __ shlp(ToRegister(left), Immediate(shift_count));
1650 DCHECK(SmiValuesAre31Bits());
1651 if (instr->can_deopt()) {
1652 if (shift_count != 1) {
1653 __ shll(ToRegister(left), Immediate(shift_count - 1));
1655 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1656 DeoptimizeIf(overflow, instr->environment());
1658 __ shll(ToRegister(left), Immediate(shift_count));
1662 __ shll(ToRegister(left), Immediate(shift_count));
1674 void LCodeGen::DoSubI(LSubI* instr) {
1675 LOperand* left = instr->left();
1676 LOperand* right = instr->right();
1677 DCHECK(left->Equals(instr->result()));
1679 if (right->IsConstantOperand()) {
1680 int32_t right_operand =
1681 ToRepresentation(LConstantOperand::cast(right),
1682 instr->hydrogen()->right()->representation());
1683 __ subl(ToRegister(left), Immediate(right_operand));
1684 } else if (right->IsRegister()) {
1685 if (instr->hydrogen_value()->representation().IsSmi()) {
1686 __ subp(ToRegister(left), ToRegister(right));
1688 __ subl(ToRegister(left), ToRegister(right));
1691 if (instr->hydrogen_value()->representation().IsSmi()) {
1692 __ subp(ToRegister(left), ToOperand(right));
1694 __ subl(ToRegister(left), ToOperand(right));
1698 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1699 DeoptimizeIf(overflow, instr->environment());
1704 void LCodeGen::DoConstantI(LConstantI* instr) {
1705 Register dst = ToRegister(instr->result());
1706 if (instr->value() == 0) {
1709 __ movl(dst, Immediate(instr->value()));
1714 void LCodeGen::DoConstantS(LConstantS* instr) {
1715 __ Move(ToRegister(instr->result()), instr->value());
1719 void LCodeGen::DoConstantD(LConstantD* instr) {
1720 DCHECK(instr->result()->IsDoubleRegister());
1721 XMMRegister res = ToDoubleRegister(instr->result());
1722 double v = instr->value();
1723 uint64_t int_val = BitCast<uint64_t, double>(v);
1724 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1725 // do so if the constant is -0.0.
1729 Register tmp = ToRegister(instr->temp());
1730 __ Set(tmp, int_val);
1736 void LCodeGen::DoConstantE(LConstantE* instr) {
1737 __ LoadAddress(ToRegister(instr->result()), instr->value());
1741 void LCodeGen::DoConstantT(LConstantT* instr) {
1742 Handle<Object> object = instr->value(isolate());
1743 AllowDeferredHandleDereference smi_check;
1744 __ Move(ToRegister(instr->result()), object);
1748 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1749 Register result = ToRegister(instr->result());
1750 Register map = ToRegister(instr->value());
1751 __ EnumLength(result, map);
1755 void LCodeGen::DoDateField(LDateField* instr) {
1756 Register object = ToRegister(instr->date());
1757 Register result = ToRegister(instr->result());
1758 Smi* index = instr->index();
1759 Label runtime, done, not_date_object;
1760 DCHECK(object.is(result));
1761 DCHECK(object.is(rax));
1763 Condition cc = masm()->CheckSmi(object);
1764 DeoptimizeIf(cc, instr->environment());
1765 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1766 DeoptimizeIf(not_equal, instr->environment());
1768 if (index->value() == 0) {
1769 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1771 if (index->value() < JSDate::kFirstUncachedField) {
1772 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1773 Operand stamp_operand = __ ExternalOperand(stamp);
1774 __ movp(kScratchRegister, stamp_operand);
1775 __ cmpp(kScratchRegister, FieldOperand(object,
1776 JSDate::kCacheStampOffset));
1777 __ j(not_equal, &runtime, Label::kNear);
1778 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1779 kPointerSize * index->value()));
1780 __ jmp(&done, Label::kNear);
1783 __ PrepareCallCFunction(2);
1784 __ movp(arg_reg_1, object);
1785 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1786 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1792 Operand LCodeGen::BuildSeqStringOperand(Register string,
1794 String::Encoding encoding) {
1795 if (index->IsConstantOperand()) {
1796 int offset = ToInteger32(LConstantOperand::cast(index));
1797 if (encoding == String::TWO_BYTE_ENCODING) {
1798 offset *= kUC16Size;
1800 STATIC_ASSERT(kCharSize == 1);
1801 return FieldOperand(string, SeqString::kHeaderSize + offset);
1803 return FieldOperand(
1804 string, ToRegister(index),
1805 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1806 SeqString::kHeaderSize);
1810 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1811 String::Encoding encoding = instr->hydrogen()->encoding();
1812 Register result = ToRegister(instr->result());
1813 Register string = ToRegister(instr->string());
1815 if (FLAG_debug_code) {
1817 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1818 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1820 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1821 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1822 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1823 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1824 ? one_byte_seq_type : two_byte_seq_type));
1825 __ Check(equal, kUnexpectedStringType);
1829 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1830 if (encoding == String::ONE_BYTE_ENCODING) {
1831 __ movzxbl(result, operand);
1833 __ movzxwl(result, operand);
1838 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1839 String::Encoding encoding = instr->hydrogen()->encoding();
1840 Register string = ToRegister(instr->string());
1842 if (FLAG_debug_code) {
1843 Register value = ToRegister(instr->value());
1844 Register index = ToRegister(instr->index());
1845 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1846 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1848 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1849 ? one_byte_seq_type : two_byte_seq_type;
1850 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1853 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1854 if (instr->value()->IsConstantOperand()) {
1855 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1856 DCHECK_LE(0, value);
1857 if (encoding == String::ONE_BYTE_ENCODING) {
1858 DCHECK_LE(value, String::kMaxOneByteCharCode);
1859 __ movb(operand, Immediate(value));
1861 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1862 __ movw(operand, Immediate(value));
1865 Register value = ToRegister(instr->value());
1866 if (encoding == String::ONE_BYTE_ENCODING) {
1867 __ movb(operand, value);
1869 __ movw(operand, value);
1875 void LCodeGen::DoAddI(LAddI* instr) {
1876 LOperand* left = instr->left();
1877 LOperand* right = instr->right();
1879 Representation target_rep = instr->hydrogen()->representation();
1880 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1882 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1883 if (right->IsConstantOperand()) {
1884 // No support for smi-immediates for 32-bit SMI.
1885 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1887 ToRepresentation(LConstantOperand::cast(right),
1888 instr->hydrogen()->right()->representation());
1890 __ leap(ToRegister(instr->result()),
1891 MemOperand(ToRegister(left), offset));
1893 __ leal(ToRegister(instr->result()),
1894 MemOperand(ToRegister(left), offset));
1897 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1899 __ leap(ToRegister(instr->result()), address);
1901 __ leal(ToRegister(instr->result()), address);
1905 if (right->IsConstantOperand()) {
1906 // No support for smi-immediates for 32-bit SMI.
1907 DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1908 int32_t right_operand =
1909 ToRepresentation(LConstantOperand::cast(right),
1910 instr->hydrogen()->right()->representation());
1912 __ addp(ToRegister(left), Immediate(right_operand));
1914 __ addl(ToRegister(left), Immediate(right_operand));
1916 } else if (right->IsRegister()) {
1918 __ addp(ToRegister(left), ToRegister(right));
1920 __ addl(ToRegister(left), ToRegister(right));
1924 __ addp(ToRegister(left), ToOperand(right));
1926 __ addl(ToRegister(left), ToOperand(right));
1929 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1930 DeoptimizeIf(overflow, instr->environment());
1936 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1937 LOperand* left = instr->left();
1938 LOperand* right = instr->right();
1939 DCHECK(left->Equals(instr->result()));
1940 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1941 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1943 Condition condition = (operation == HMathMinMax::kMathMin)
1946 Register left_reg = ToRegister(left);
1947 if (right->IsConstantOperand()) {
1948 Immediate right_imm = Immediate(
1949 ToRepresentation(LConstantOperand::cast(right),
1950 instr->hydrogen()->right()->representation()));
1951 DCHECK(SmiValuesAre32Bits()
1952 ? !instr->hydrogen()->representation().IsSmi()
1953 : SmiValuesAre31Bits());
1954 __ cmpl(left_reg, right_imm);
1955 __ j(condition, &return_left, Label::kNear);
1956 __ movp(left_reg, right_imm);
1957 } else if (right->IsRegister()) {
1958 Register right_reg = ToRegister(right);
1959 if (instr->hydrogen_value()->representation().IsSmi()) {
1960 __ cmpp(left_reg, right_reg);
1962 __ cmpl(left_reg, right_reg);
1964 __ j(condition, &return_left, Label::kNear);
1965 __ movp(left_reg, right_reg);
1967 Operand right_op = ToOperand(right);
1968 if (instr->hydrogen_value()->representation().IsSmi()) {
1969 __ cmpp(left_reg, right_op);
1971 __ cmpl(left_reg, right_op);
1973 __ j(condition, &return_left, Label::kNear);
1974 __ movp(left_reg, right_op);
1976 __ bind(&return_left);
1978 DCHECK(instr->hydrogen()->representation().IsDouble());
1979 Label check_nan_left, check_zero, return_left, return_right;
1980 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1981 XMMRegister left_reg = ToDoubleRegister(left);
1982 XMMRegister right_reg = ToDoubleRegister(right);
1983 __ ucomisd(left_reg, right_reg);
1984 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1985 __ j(equal, &check_zero, Label::kNear); // left == right.
1986 __ j(condition, &return_left, Label::kNear);
1987 __ jmp(&return_right, Label::kNear);
1989 __ bind(&check_zero);
1990 XMMRegister xmm_scratch = double_scratch0();
1991 __ xorps(xmm_scratch, xmm_scratch);
1992 __ ucomisd(left_reg, xmm_scratch);
1993 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1994 // At this point, both left and right are either 0 or -0.
1995 if (operation == HMathMinMax::kMathMin) {
1996 __ orps(left_reg, right_reg);
1998 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1999 __ addsd(left_reg, right_reg);
2001 __ jmp(&return_left, Label::kNear);
2003 __ bind(&check_nan_left);
2004 __ ucomisd(left_reg, left_reg); // NaN check.
2005 __ j(parity_even, &return_left, Label::kNear);
2006 __ bind(&return_right);
2007 __ movaps(left_reg, right_reg);
2009 __ bind(&return_left);
2014 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2015 XMMRegister left = ToDoubleRegister(instr->left());
2016 XMMRegister right = ToDoubleRegister(instr->right());
2017 XMMRegister result = ToDoubleRegister(instr->result());
2018 // All operations except MOD are computed in-place.
2019 DCHECK(instr->op() == Token::MOD || left.is(result));
2020 switch (instr->op()) {
2022 __ addsd(left, right);
2025 __ subsd(left, right);
2028 __ mulsd(left, right);
2031 __ divsd(left, right);
2032 // Don't delete this mov. It may improve performance on some CPUs,
2033 // when there is a mulsd depending on the result
2034 __ movaps(left, left);
2037 XMMRegister xmm_scratch = double_scratch0();
2038 __ PrepareCallCFunction(2);
2039 __ movaps(xmm_scratch, left);
2040 DCHECK(right.is(xmm1));
2042 ExternalReference::mod_two_doubles_operation(isolate()), 2);
2043 __ movaps(result, xmm_scratch);
2053 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2054 DCHECK(ToRegister(instr->context()).is(rsi));
2055 DCHECK(ToRegister(instr->left()).is(rdx));
2056 DCHECK(ToRegister(instr->right()).is(rax));
2057 DCHECK(ToRegister(instr->result()).is(rax));
2059 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2060 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2064 template<class InstrType>
2065 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2066 int left_block = instr->TrueDestination(chunk_);
2067 int right_block = instr->FalseDestination(chunk_);
2069 int next_block = GetNextEmittedBlock();
2071 if (right_block == left_block || cc == no_condition) {
2072 EmitGoto(left_block);
2073 } else if (left_block == next_block) {
2074 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2075 } else if (right_block == next_block) {
2076 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2078 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2080 __ jmp(chunk_->GetAssemblyLabel(right_block));
2086 template<class InstrType>
2087 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2088 int false_block = instr->FalseDestination(chunk_);
2089 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2093 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2098 void LCodeGen::DoBranch(LBranch* instr) {
2099 Representation r = instr->hydrogen()->value()->representation();
2100 if (r.IsInteger32()) {
2101 DCHECK(!info()->IsStub());
2102 Register reg = ToRegister(instr->value());
2104 EmitBranch(instr, not_zero);
2105 } else if (r.IsSmi()) {
2106 DCHECK(!info()->IsStub());
2107 Register reg = ToRegister(instr->value());
2109 EmitBranch(instr, not_zero);
2110 } else if (r.IsDouble()) {
2111 DCHECK(!info()->IsStub());
2112 XMMRegister reg = ToDoubleRegister(instr->value());
2113 XMMRegister xmm_scratch = double_scratch0();
2114 __ xorps(xmm_scratch, xmm_scratch);
2115 __ ucomisd(reg, xmm_scratch);
2116 EmitBranch(instr, not_equal);
2118 DCHECK(r.IsTagged());
2119 Register reg = ToRegister(instr->value());
2120 HType type = instr->hydrogen()->value()->type();
2121 if (type.IsBoolean()) {
2122 DCHECK(!info()->IsStub());
2123 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2124 EmitBranch(instr, equal);
2125 } else if (type.IsSmi()) {
2126 DCHECK(!info()->IsStub());
2127 __ SmiCompare(reg, Smi::FromInt(0));
2128 EmitBranch(instr, not_equal);
2129 } else if (type.IsJSArray()) {
2130 DCHECK(!info()->IsStub());
2131 EmitBranch(instr, no_condition);
2132 } else if (type.IsHeapNumber()) {
2133 DCHECK(!info()->IsStub());
2134 XMMRegister xmm_scratch = double_scratch0();
2135 __ xorps(xmm_scratch, xmm_scratch);
2136 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2137 EmitBranch(instr, not_equal);
2138 } else if (type.IsString()) {
2139 DCHECK(!info()->IsStub());
2140 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2141 EmitBranch(instr, not_equal);
2143 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2144 // Avoid deopts in the case where we've never executed this path before.
2145 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2147 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2148 // undefined -> false.
2149 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2150 __ j(equal, instr->FalseLabel(chunk_));
2152 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2154 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2155 __ j(equal, instr->TrueLabel(chunk_));
2157 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2158 __ j(equal, instr->FalseLabel(chunk_));
2160 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2162 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2163 __ j(equal, instr->FalseLabel(chunk_));
2166 if (expected.Contains(ToBooleanStub::SMI)) {
2167 // Smis: 0 -> false, all other -> true.
2168 __ Cmp(reg, Smi::FromInt(0));
2169 __ j(equal, instr->FalseLabel(chunk_));
2170 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2171 } else if (expected.NeedsMap()) {
2172 // If we need a map later and have a Smi -> deopt.
2173 __ testb(reg, Immediate(kSmiTagMask));
2174 DeoptimizeIf(zero, instr->environment());
2177 const Register map = kScratchRegister;
2178 if (expected.NeedsMap()) {
2179 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2181 if (expected.CanBeUndetectable()) {
2182 // Undetectable -> false.
2183 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2184 Immediate(1 << Map::kIsUndetectable));
2185 __ j(not_zero, instr->FalseLabel(chunk_));
2189 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2190 // spec object -> true.
2191 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2192 __ j(above_equal, instr->TrueLabel(chunk_));
2195 if (expected.Contains(ToBooleanStub::STRING)) {
2196 // String value -> false iff empty.
2198 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2199 __ j(above_equal, ¬_string, Label::kNear);
2200 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2201 __ j(not_zero, instr->TrueLabel(chunk_));
2202 __ jmp(instr->FalseLabel(chunk_));
2203 __ bind(¬_string);
2206 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2207 // Symbol value -> true.
2208 __ CmpInstanceType(map, SYMBOL_TYPE);
2209 __ j(equal, instr->TrueLabel(chunk_));
2212 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2213 // heap number -> false iff +0, -0, or NaN.
2214 Label not_heap_number;
2215 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2216 __ j(not_equal, ¬_heap_number, Label::kNear);
2217 XMMRegister xmm_scratch = double_scratch0();
2218 __ xorps(xmm_scratch, xmm_scratch);
2219 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2220 __ j(zero, instr->FalseLabel(chunk_));
2221 __ jmp(instr->TrueLabel(chunk_));
2222 __ bind(¬_heap_number);
2225 if (!expected.IsGeneric()) {
2226 // We've seen something for the first time -> deopt.
2227 // This can only happen if we are not generic already.
2228 DeoptimizeIf(no_condition, instr->environment());
2235 void LCodeGen::EmitGoto(int block) {
2236 if (!IsNextEmittedBlock(block)) {
2237 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2242 void LCodeGen::DoGoto(LGoto* instr) {
2243 EmitGoto(instr->block_id());
2247 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2248 Condition cond = no_condition;
2251 case Token::EQ_STRICT:
2255 case Token::NE_STRICT:
2259 cond = is_unsigned ? below : less;
2262 cond = is_unsigned ? above : greater;
2265 cond = is_unsigned ? below_equal : less_equal;
2268 cond = is_unsigned ? above_equal : greater_equal;
2271 case Token::INSTANCEOF:
2279 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2280 LOperand* left = instr->left();
2281 LOperand* right = instr->right();
2283 instr->is_double() ||
2284 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2285 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2286 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2288 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2289 // We can statically evaluate the comparison.
2290 double left_val = ToDouble(LConstantOperand::cast(left));
2291 double right_val = ToDouble(LConstantOperand::cast(right));
2292 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2293 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2294 EmitGoto(next_block);
2296 if (instr->is_double()) {
2297 // Don't base result on EFLAGS when a NaN is involved. Instead
2298 // jump to the false block.
2299 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2300 __ j(parity_even, instr->FalseLabel(chunk_));
2303 if (right->IsConstantOperand()) {
2304 value = ToInteger32(LConstantOperand::cast(right));
2305 if (instr->hydrogen_value()->representation().IsSmi()) {
2306 __ Cmp(ToRegister(left), Smi::FromInt(value));
2308 __ cmpl(ToRegister(left), Immediate(value));
2310 } else if (left->IsConstantOperand()) {
2311 value = ToInteger32(LConstantOperand::cast(left));
2312 if (instr->hydrogen_value()->representation().IsSmi()) {
2313 if (right->IsRegister()) {
2314 __ Cmp(ToRegister(right), Smi::FromInt(value));
2316 __ Cmp(ToOperand(right), Smi::FromInt(value));
2318 } else if (right->IsRegister()) {
2319 __ cmpl(ToRegister(right), Immediate(value));
2321 __ cmpl(ToOperand(right), Immediate(value));
2323 // We commuted the operands, so commute the condition.
2324 cc = CommuteCondition(cc);
2325 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2326 if (right->IsRegister()) {
2327 __ cmpp(ToRegister(left), ToRegister(right));
2329 __ cmpp(ToRegister(left), ToOperand(right));
2332 if (right->IsRegister()) {
2333 __ cmpl(ToRegister(left), ToRegister(right));
2335 __ cmpl(ToRegister(left), ToOperand(right));
2339 EmitBranch(instr, cc);
2344 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2345 Register left = ToRegister(instr->left());
2347 if (instr->right()->IsConstantOperand()) {
2348 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2349 __ Cmp(left, right);
2351 Register right = ToRegister(instr->right());
2352 __ cmpp(left, right);
2354 EmitBranch(instr, equal);
2358 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2359 if (instr->hydrogen()->representation().IsTagged()) {
2360 Register input_reg = ToRegister(instr->object());
2361 __ Cmp(input_reg, factory()->the_hole_value());
2362 EmitBranch(instr, equal);
2366 XMMRegister input_reg = ToDoubleRegister(instr->object());
2367 __ ucomisd(input_reg, input_reg);
2368 EmitFalseBranch(instr, parity_odd);
2370 __ subp(rsp, Immediate(kDoubleSize));
2371 __ movsd(MemOperand(rsp, 0), input_reg);
2372 __ addp(rsp, Immediate(kDoubleSize));
2374 int offset = sizeof(kHoleNanUpper32);
2375 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2376 EmitBranch(instr, equal);
2380 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2381 Representation rep = instr->hydrogen()->value()->representation();
2382 DCHECK(!rep.IsInteger32());
2384 if (rep.IsDouble()) {
2385 XMMRegister value = ToDoubleRegister(instr->value());
2386 XMMRegister xmm_scratch = double_scratch0();
2387 __ xorps(xmm_scratch, xmm_scratch);
2388 __ ucomisd(xmm_scratch, value);
2389 EmitFalseBranch(instr, not_equal);
2390 __ movmskpd(kScratchRegister, value);
2391 __ testl(kScratchRegister, Immediate(1));
2392 EmitBranch(instr, not_zero);
2394 Register value = ToRegister(instr->value());
2395 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2396 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2397 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2399 EmitFalseBranch(instr, no_overflow);
2400 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2401 Immediate(0x00000000));
2402 EmitBranch(instr, equal);
2407 Condition LCodeGen::EmitIsObject(Register input,
2408 Label* is_not_object,
2410 DCHECK(!input.is(kScratchRegister));
2412 __ JumpIfSmi(input, is_not_object);
2414 __ CompareRoot(input, Heap::kNullValueRootIndex);
2415 __ j(equal, is_object);
2417 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2418 // Undetectable objects behave like undefined.
2419 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2420 Immediate(1 << Map::kIsUndetectable));
2421 __ j(not_zero, is_not_object);
2423 __ movzxbl(kScratchRegister,
2424 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2425 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2426 __ j(below, is_not_object);
2427 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2432 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2433 Register reg = ToRegister(instr->value());
2435 Condition true_cond = EmitIsObject(
2436 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2438 EmitBranch(instr, true_cond);
2442 Condition LCodeGen::EmitIsString(Register input,
2444 Label* is_not_string,
2445 SmiCheck check_needed = INLINE_SMI_CHECK) {
2446 if (check_needed == INLINE_SMI_CHECK) {
2447 __ JumpIfSmi(input, is_not_string);
2450 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2456 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2457 Register reg = ToRegister(instr->value());
2458 Register temp = ToRegister(instr->temp());
2460 SmiCheck check_needed =
2461 instr->hydrogen()->value()->type().IsHeapObject()
2462 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2464 Condition true_cond = EmitIsString(
2465 reg, temp, instr->FalseLabel(chunk_), check_needed);
2467 EmitBranch(instr, true_cond);
2471 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2473 if (instr->value()->IsRegister()) {
2474 Register input = ToRegister(instr->value());
2475 is_smi = masm()->CheckSmi(input);
2477 Operand input = ToOperand(instr->value());
2478 is_smi = masm()->CheckSmi(input);
2480 EmitBranch(instr, is_smi);
2484 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2485 Register input = ToRegister(instr->value());
2486 Register temp = ToRegister(instr->temp());
2488 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2489 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2491 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2492 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2493 Immediate(1 << Map::kIsUndetectable));
2494 EmitBranch(instr, not_zero);
2498 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2499 DCHECK(ToRegister(instr->context()).is(rsi));
2500 Token::Value op = instr->op();
2502 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2503 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2505 Condition condition = TokenToCondition(op, false);
2508 EmitBranch(instr, condition);
2512 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2513 InstanceType from = instr->from();
2514 InstanceType to = instr->to();
2515 if (from == FIRST_TYPE) return to;
2516 DCHECK(from == to || to == LAST_TYPE);
2521 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2522 InstanceType from = instr->from();
2523 InstanceType to = instr->to();
2524 if (from == to) return equal;
2525 if (to == LAST_TYPE) return above_equal;
2526 if (from == FIRST_TYPE) return below_equal;
2532 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2533 Register input = ToRegister(instr->value());
2535 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2536 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2539 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2540 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2544 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2545 Register input = ToRegister(instr->value());
2546 Register result = ToRegister(instr->result());
2548 __ AssertString(input);
2550 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2551 DCHECK(String::kHashShift >= kSmiTagSize);
2552 __ IndexFromHash(result, result);
2556 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2557 LHasCachedArrayIndexAndBranch* instr) {
2558 Register input = ToRegister(instr->value());
2560 __ testl(FieldOperand(input, String::kHashFieldOffset),
2561 Immediate(String::kContainsCachedArrayIndexMask));
2562 EmitBranch(instr, equal);
2566 // Branches to a label or falls through with the answer in the z flag.
2567 // Trashes the temp register.
2568 void LCodeGen::EmitClassOfTest(Label* is_true,
2570 Handle<String> class_name,
2574 DCHECK(!input.is(temp));
2575 DCHECK(!input.is(temp2));
2576 DCHECK(!temp.is(temp2));
2578 __ JumpIfSmi(input, is_false);
2580 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2581 // Assuming the following assertions, we can use the same compares to test
2582 // for both being a function type and being in the object type range.
2583 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2584 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2585 FIRST_SPEC_OBJECT_TYPE + 1);
2586 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2587 LAST_SPEC_OBJECT_TYPE - 1);
2588 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2589 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2590 __ j(below, is_false);
2591 __ j(equal, is_true);
2592 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2593 __ j(equal, is_true);
2595 // Faster code path to avoid two compares: subtract lower bound from the
2596 // actual type and do a signed compare with the width of the type range.
2597 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2598 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2599 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2600 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2601 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2602 __ j(above, is_false);
2605 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2606 // Check if the constructor in the map is a function.
2607 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2609 // Objects with a non-function constructor have class 'Object'.
2610 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2611 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2612 __ j(not_equal, is_true);
2614 __ j(not_equal, is_false);
2617 // temp now contains the constructor function. Grab the
2618 // instance class name from there.
2619 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2620 __ movp(temp, FieldOperand(temp,
2621 SharedFunctionInfo::kInstanceClassNameOffset));
2622 // The class name we are testing against is internalized since it's a literal.
2623 // The name in the constructor is internalized because of the way the context
2624 // is booted. This routine isn't expected to work for random API-created
2625 // classes and it doesn't have to because you can't access it with natives
2626 // syntax. Since both sides are internalized it is sufficient to use an
2627 // identity comparison.
2628 DCHECK(class_name->IsInternalizedString());
2629 __ Cmp(temp, class_name);
2630 // End with the answer in the z flag.
2634 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2635 Register input = ToRegister(instr->value());
2636 Register temp = ToRegister(instr->temp());
2637 Register temp2 = ToRegister(instr->temp2());
2638 Handle<String> class_name = instr->hydrogen()->class_name();
2640 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2641 class_name, input, temp, temp2);
2643 EmitBranch(instr, equal);
2647 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2648 Register reg = ToRegister(instr->value());
2650 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2651 EmitBranch(instr, equal);
2655 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2656 DCHECK(ToRegister(instr->context()).is(rsi));
2657 InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
2658 __ Push(ToRegister(instr->left()));
2659 __ Push(ToRegister(instr->right()));
2660 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2661 Label true_value, done;
2663 __ j(zero, &true_value, Label::kNear);
2664 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2665 __ jmp(&done, Label::kNear);
2666 __ bind(&true_value);
2667 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2672 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2673 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2675 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2676 LInstanceOfKnownGlobal* instr)
2677 : LDeferredCode(codegen), instr_(instr) { }
2678 virtual void Generate() V8_OVERRIDE {
2679 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2681 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2682 Label* map_check() { return &map_check_; }
2684 LInstanceOfKnownGlobal* instr_;
2688 DCHECK(ToRegister(instr->context()).is(rsi));
2689 DeferredInstanceOfKnownGlobal* deferred;
2690 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2692 Label done, false_result;
2693 Register object = ToRegister(instr->value());
2695 // A Smi is not an instance of anything.
2696 __ JumpIfSmi(object, &false_result, Label::kNear);
2698 // This is the inlined call site instanceof cache. The two occurences of the
2699 // hole value will be patched to the last map/result pair generated by the
2702 // Use a temp register to avoid memory operands with variable lengths.
2703 Register map = ToRegister(instr->temp());
2704 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2705 __ bind(deferred->map_check()); // Label for calculating code patching.
2706 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2707 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2708 __ cmpp(map, Operand(kScratchRegister, 0));
2709 __ j(not_equal, &cache_miss, Label::kNear);
2710 // Patched to load either true or false.
2711 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2713 // Check that the code size between patch label and patch sites is invariant.
2714 Label end_of_patched_code;
2715 __ bind(&end_of_patched_code);
2718 __ jmp(&done, Label::kNear);
2720 // The inlined call site cache did not match. Check for null and string
2721 // before calling the deferred code.
2722 __ bind(&cache_miss); // Null is not an instance of anything.
2723 __ CompareRoot(object, Heap::kNullValueRootIndex);
2724 __ j(equal, &false_result, Label::kNear);
2726 // String values are not instances of anything.
2727 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2729 __ bind(&false_result);
2730 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2732 __ bind(deferred->exit());
2737 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2740 PushSafepointRegistersScope scope(this);
2741 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2742 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2743 InstanceofStub stub(isolate(), flags);
2745 __ Push(ToRegister(instr->value()));
2746 __ Push(instr->function());
2748 static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
2750 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2752 __ PushImm32(delta);
2754 // We are pushing three values on the stack but recording a
2755 // safepoint with two arguments because stub is going to
2756 // remove the third argument from the stack before jumping
2757 // to instanceof builtin on the slow path.
2758 CallCodeGeneric(stub.GetCode(),
2759 RelocInfo::CODE_TARGET,
2761 RECORD_SAFEPOINT_WITH_REGISTERS,
2763 DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2764 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2765 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2766 // Move result to a register that survives the end of the
2767 // PushSafepointRegisterScope.
2768 __ movp(kScratchRegister, rax);
2770 __ testp(kScratchRegister, kScratchRegister);
2773 __ j(not_zero, &load_false, Label::kNear);
2774 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2775 __ jmp(&done, Label::kNear);
2776 __ bind(&load_false);
2777 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2782 void LCodeGen::DoCmpT(LCmpT* instr) {
2783 DCHECK(ToRegister(instr->context()).is(rsi));
2784 Token::Value op = instr->op();
2786 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2787 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2789 Condition condition = TokenToCondition(op, false);
2790 Label true_value, done;
2792 __ j(condition, &true_value, Label::kNear);
2793 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2794 __ jmp(&done, Label::kNear);
2795 __ bind(&true_value);
2796 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2801 void LCodeGen::DoReturn(LReturn* instr) {
2802 if (FLAG_trace && info()->IsOptimizing()) {
2803 // Preserve the return value on the stack and rely on the runtime call
2804 // to return the value in the same register. We're leaving the code
2805 // managed by the register allocator and tearing down the frame, it's
2806 // safe to write to the context register.
2808 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2809 __ CallRuntime(Runtime::kTraceExit, 1);
2811 if (info()->saves_caller_doubles()) {
2812 RestoreCallerDoubles();
2814 int no_frame_start = -1;
2815 if (NeedsEagerFrame()) {
2818 no_frame_start = masm_->pc_offset();
2820 if (instr->has_constant_parameter_count()) {
2821 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2824 Register reg = ToRegister(instr->parameter_count());
2825 // The argument count parameter is a smi
2826 __ SmiToInteger32(reg, reg);
2827 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2828 __ PopReturnAddressTo(return_addr_reg);
2829 __ shlp(reg, Immediate(kPointerSizeLog2));
2831 __ jmp(return_addr_reg);
2833 if (no_frame_start != -1) {
2834 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2839 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2840 Register result = ToRegister(instr->result());
2841 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2842 if (instr->hydrogen()->RequiresHoleCheck()) {
2843 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2844 DeoptimizeIf(equal, instr->environment());
2849 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2850 DCHECK(ToRegister(instr->context()).is(rsi));
2851 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
2852 DCHECK(ToRegister(instr->result()).is(rax));
2854 __ Move(LoadIC::NameRegister(), instr->name());
2855 if (FLAG_vector_ics) {
2856 Register vector = ToRegister(instr->temp_vector());
2857 DCHECK(vector.is(LoadIC::VectorRegister()));
2858 __ Move(vector, instr->hydrogen()->feedback_vector());
2859 // No need to allocate this register.
2860 DCHECK(LoadIC::SlotRegister().is(rax));
2861 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
2863 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2864 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2865 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2869 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2870 Register value = ToRegister(instr->value());
2871 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2873 // If the cell we are storing to contains the hole it could have
2874 // been deleted from the property dictionary. In that case, we need
2875 // to update the property details in the property dictionary to mark
2876 // it as no longer deleted. We deoptimize in that case.
2877 if (instr->hydrogen()->RequiresHoleCheck()) {
2878 // We have a temp because CompareRoot might clobber kScratchRegister.
2879 Register cell = ToRegister(instr->temp());
2880 DCHECK(!value.is(cell));
2881 __ Move(cell, cell_handle, RelocInfo::CELL);
2882 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2883 DeoptimizeIf(equal, instr->environment());
2885 __ movp(Operand(cell, 0), value);
2888 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2889 __ movp(Operand(kScratchRegister, 0), value);
2891 // Cells are always rescanned, so no write barrier here.
2895 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2896 Register context = ToRegister(instr->context());
2897 Register result = ToRegister(instr->result());
2898 __ movp(result, ContextOperand(context, instr->slot_index()));
2899 if (instr->hydrogen()->RequiresHoleCheck()) {
2900 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2901 if (instr->hydrogen()->DeoptimizesOnHole()) {
2902 DeoptimizeIf(equal, instr->environment());
2905 __ j(not_equal, &is_not_hole, Label::kNear);
2906 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2907 __ bind(&is_not_hole);
2913 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2914 Register context = ToRegister(instr->context());
2915 Register value = ToRegister(instr->value());
2917 Operand target = ContextOperand(context, instr->slot_index());
2919 Label skip_assignment;
2920 if (instr->hydrogen()->RequiresHoleCheck()) {
2921 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2922 if (instr->hydrogen()->DeoptimizesOnHole()) {
2923 DeoptimizeIf(equal, instr->environment());
2925 __ j(not_equal, &skip_assignment);
2928 __ movp(target, value);
2930 if (instr->hydrogen()->NeedsWriteBarrier()) {
2931 SmiCheck check_needed =
2932 instr->hydrogen()->value()->type().IsHeapObject()
2933 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2934 int offset = Context::SlotOffset(instr->slot_index());
2935 Register scratch = ToRegister(instr->temp());
2936 __ RecordWriteContextSlot(context,
2941 EMIT_REMEMBERED_SET,
2945 __ bind(&skip_assignment);
2949 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2950 HObjectAccess access = instr->hydrogen()->access();
2951 int offset = access.offset();
2953 if (access.IsExternalMemory()) {
2954 Register result = ToRegister(instr->result());
2955 if (instr->object()->IsConstantOperand()) {
2956 DCHECK(result.is(rax));
2957 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2959 Register object = ToRegister(instr->object());
2960 __ Load(result, MemOperand(object, offset), access.representation());
2965 Register object = ToRegister(instr->object());
2966 if (instr->hydrogen()->representation().IsDouble()) {
2967 XMMRegister result = ToDoubleRegister(instr->result());
2968 __ movsd(result, FieldOperand(object, offset));
2972 Register result = ToRegister(instr->result());
2973 if (!access.IsInobject()) {
2974 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2978 Representation representation = access.representation();
2979 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2980 instr->hydrogen()->representation().IsInteger32()) {
2981 if (FLAG_debug_code) {
2982 Register scratch = kScratchRegister;
2983 __ Load(scratch, FieldOperand(object, offset), representation);
2984 __ AssertSmi(scratch);
2987 // Read int value directly from upper half of the smi.
2988 STATIC_ASSERT(kSmiTag == 0);
2989 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2990 offset += kPointerSize / 2;
2991 representation = Representation::Integer32();
2993 __ Load(result, FieldOperand(object, offset), representation);
2997 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2998 DCHECK(ToRegister(instr->context()).is(rsi));
2999 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3000 DCHECK(ToRegister(instr->result()).is(rax));
3002 __ Move(LoadIC::NameRegister(), instr->name());
3003 if (FLAG_vector_ics) {
3004 Register vector = ToRegister(instr->temp_vector());
3005 DCHECK(vector.is(LoadIC::VectorRegister()));
3006 __ Move(vector, instr->hydrogen()->feedback_vector());
3007 // No need to allocate this register.
3008 DCHECK(LoadIC::SlotRegister().is(rax));
3009 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
3011 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3012 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3016 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3017 Register function = ToRegister(instr->function());
3018 Register result = ToRegister(instr->result());
3020 // Get the prototype or initial map from the function.
3022 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3024 // Check that the function has a prototype or an initial map.
3025 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3026 DeoptimizeIf(equal, instr->environment());
3028 // If the function does not have an initial map, we're done.
3030 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
3031 __ j(not_equal, &done, Label::kNear);
3033 // Get the prototype from the initial map.
3034 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
3041 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3042 Register result = ToRegister(instr->result());
3043 __ LoadRoot(result, instr->index());
3047 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3048 Register arguments = ToRegister(instr->arguments());
3049 Register result = ToRegister(instr->result());
3051 if (instr->length()->IsConstantOperand() &&
3052 instr->index()->IsConstantOperand()) {
3053 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3054 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3055 if (const_index >= 0 && const_index < const_length) {
3056 StackArgumentsAccessor args(arguments, const_length,
3057 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3058 __ movp(result, args.GetArgumentOperand(const_index));
3059 } else if (FLAG_debug_code) {
3063 Register length = ToRegister(instr->length());
3064 // There are two words between the frame pointer and the last argument.
3065 // Subtracting from length accounts for one of them add one more.
3066 if (instr->index()->IsRegister()) {
3067 __ subl(length, ToRegister(instr->index()));
3069 __ subl(length, ToOperand(instr->index()));
3071 StackArgumentsAccessor args(arguments, length,
3072 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3073 __ movp(result, args.GetArgumentOperand(0));
3078 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3079 ElementsKind elements_kind = instr->elements_kind();
3080 LOperand* key = instr->key();
3081 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3082 Register key_reg = ToRegister(key);
3083 Representation key_representation =
3084 instr->hydrogen()->key()->representation();
3085 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3086 __ SmiToInteger64(key_reg, key_reg);
3087 } else if (instr->hydrogen()->IsDehoisted()) {
3088 // Sign extend key because it could be a 32 bit negative value
3089 // and the dehoisted address computation happens in 64 bits
3090 __ movsxlq(key_reg, key_reg);
3093 Operand operand(BuildFastArrayOperand(
3096 instr->hydrogen()->key()->representation(),
3098 instr->base_offset()));
3100 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3101 elements_kind == FLOAT32_ELEMENTS) {
3102 XMMRegister result(ToDoubleRegister(instr->result()));
3103 __ movss(result, operand);
3104 __ cvtss2sd(result, result);
3105 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3106 elements_kind == FLOAT64_ELEMENTS) {
3107 __ movsd(ToDoubleRegister(instr->result()), operand);
3109 Register result(ToRegister(instr->result()));
3110 switch (elements_kind) {
3111 case EXTERNAL_INT8_ELEMENTS:
3113 __ movsxbl(result, operand);
3115 case EXTERNAL_UINT8_ELEMENTS:
3116 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3117 case UINT8_ELEMENTS:
3118 case UINT8_CLAMPED_ELEMENTS:
3119 __ movzxbl(result, operand);
3121 case EXTERNAL_INT16_ELEMENTS:
3122 case INT16_ELEMENTS:
3123 __ movsxwl(result, operand);
3125 case EXTERNAL_UINT16_ELEMENTS:
3126 case UINT16_ELEMENTS:
3127 __ movzxwl(result, operand);
3129 case EXTERNAL_INT32_ELEMENTS:
3130 case INT32_ELEMENTS:
3131 __ movl(result, operand);
3133 case EXTERNAL_UINT32_ELEMENTS:
3134 case UINT32_ELEMENTS:
3135 __ movl(result, operand);
3136 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3137 __ testl(result, result);
3138 DeoptimizeIf(negative, instr->environment());
3141 case EXTERNAL_FLOAT32_ELEMENTS:
3142 case EXTERNAL_FLOAT64_ELEMENTS:
3143 case FLOAT32_ELEMENTS:
3144 case FLOAT64_ELEMENTS:
3146 case FAST_SMI_ELEMENTS:
3147 case FAST_DOUBLE_ELEMENTS:
3148 case FAST_HOLEY_ELEMENTS:
3149 case FAST_HOLEY_SMI_ELEMENTS:
3150 case FAST_HOLEY_DOUBLE_ELEMENTS:
3151 case DICTIONARY_ELEMENTS:
3152 case SLOPPY_ARGUMENTS_ELEMENTS:
3160 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3161 XMMRegister result(ToDoubleRegister(instr->result()));
3162 LOperand* key = instr->key();
3163 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3164 instr->hydrogen()->IsDehoisted()) {
3165 // Sign extend key because it could be a 32 bit negative value
3166 // and the dehoisted address computation happens in 64 bits
3167 __ movsxlq(ToRegister(key), ToRegister(key));
3169 if (instr->hydrogen()->RequiresHoleCheck()) {
3170 Operand hole_check_operand = BuildFastArrayOperand(
3173 instr->hydrogen()->key()->representation(),
3174 FAST_DOUBLE_ELEMENTS,
3175 instr->base_offset() + sizeof(kHoleNanLower32));
3176 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3177 DeoptimizeIf(equal, instr->environment());
3180 Operand double_load_operand = BuildFastArrayOperand(
3183 instr->hydrogen()->key()->representation(),
3184 FAST_DOUBLE_ELEMENTS,
3185 instr->base_offset());
3186 __ movsd(result, double_load_operand);
3190 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3191 HLoadKeyed* hinstr = instr->hydrogen();
3192 Register result = ToRegister(instr->result());
3193 LOperand* key = instr->key();
3194 bool requires_hole_check = hinstr->RequiresHoleCheck();
3195 Representation representation = hinstr->representation();
3196 int offset = instr->base_offset();
3198 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3199 instr->hydrogen()->IsDehoisted()) {
3200 // Sign extend key because it could be a 32 bit negative value
3201 // and the dehoisted address computation happens in 64 bits
3202 __ movsxlq(ToRegister(key), ToRegister(key));
3204 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3205 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3206 DCHECK(!requires_hole_check);
3207 if (FLAG_debug_code) {
3208 Register scratch = kScratchRegister;
3210 BuildFastArrayOperand(instr->elements(),
3212 instr->hydrogen()->key()->representation(),
3215 Representation::Smi());
3216 __ AssertSmi(scratch);
3218 // Read int value directly from upper half of the smi.
3219 STATIC_ASSERT(kSmiTag == 0);
3220 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3221 offset += kPointerSize / 2;
3225 BuildFastArrayOperand(instr->elements(),
3227 instr->hydrogen()->key()->representation(),
3232 // Check for the hole value.
3233 if (requires_hole_check) {
3234 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3235 Condition smi = __ CheckSmi(result);
3236 DeoptimizeIf(NegateCondition(smi), instr->environment());
3238 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3239 DeoptimizeIf(equal, instr->environment());
3245 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3246 if (instr->is_typed_elements()) {
3247 DoLoadKeyedExternalArray(instr);
3248 } else if (instr->hydrogen()->representation().IsDouble()) {
3249 DoLoadKeyedFixedDoubleArray(instr);
3251 DoLoadKeyedFixedArray(instr);
3256 Operand LCodeGen::BuildFastArrayOperand(
3257 LOperand* elements_pointer,
3259 Representation key_representation,
3260 ElementsKind elements_kind,
3262 Register elements_pointer_reg = ToRegister(elements_pointer);
3263 int shift_size = ElementsKindToShiftSize(elements_kind);
3264 if (key->IsConstantOperand()) {
3265 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3266 if (constant_value & 0xF0000000) {
3267 Abort(kArrayIndexConstantValueTooBig);
3269 return Operand(elements_pointer_reg,
3270 (constant_value << shift_size) + offset);
3272 // Take the tag bit into account while computing the shift size.
3273 if (key_representation.IsSmi() && (shift_size >= 1)) {
3274 DCHECK(SmiValuesAre31Bits());
3275 shift_size -= kSmiTagSize;
3277 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3278 return Operand(elements_pointer_reg,
3286 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3287 DCHECK(ToRegister(instr->context()).is(rsi));
3288 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3289 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3291 if (FLAG_vector_ics) {
3292 Register vector = ToRegister(instr->temp_vector());
3293 DCHECK(vector.is(LoadIC::VectorRegister()));
3294 __ Move(vector, instr->hydrogen()->feedback_vector());
3295 // No need to allocate this register.
3296 DCHECK(LoadIC::SlotRegister().is(rax));
3297 __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
3300 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3301 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3305 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3306 Register result = ToRegister(instr->result());
3308 if (instr->hydrogen()->from_inlined()) {
3309 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3311 // Check for arguments adapter frame.
3312 Label done, adapted;
3313 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3314 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3315 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3316 __ j(equal, &adapted, Label::kNear);
3318 // No arguments adaptor frame.
3319 __ movp(result, rbp);
3320 __ jmp(&done, Label::kNear);
3322 // Arguments adaptor frame present.
3324 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3326 // Result is the frame pointer for the frame if not adapted and for the real
3327 // frame below the adaptor frame if adapted.
3333 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3334 Register result = ToRegister(instr->result());
3338 // If no arguments adaptor frame the number of arguments is fixed.
3339 if (instr->elements()->IsRegister()) {
3340 __ cmpp(rbp, ToRegister(instr->elements()));
3342 __ cmpp(rbp, ToOperand(instr->elements()));
3344 __ movl(result, Immediate(scope()->num_parameters()));
3345 __ j(equal, &done, Label::kNear);
3347 // Arguments adaptor frame present. Get argument length from there.
3348 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3349 __ SmiToInteger32(result,
3351 ArgumentsAdaptorFrameConstants::kLengthOffset));
3353 // Argument length is in result register.
3358 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3359 Register receiver = ToRegister(instr->receiver());
3360 Register function = ToRegister(instr->function());
3362 // If the receiver is null or undefined, we have to pass the global
3363 // object as a receiver to normal functions. Values have to be
3364 // passed unchanged to builtins and strict-mode functions.
3365 Label global_object, receiver_ok;
3366 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3368 if (!instr->hydrogen()->known_function()) {
3369 // Do not transform the receiver to object for strict mode
3371 __ movp(kScratchRegister,
3372 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3373 __ testb(FieldOperand(kScratchRegister,
3374 SharedFunctionInfo::kStrictModeByteOffset),
3375 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3376 __ j(not_equal, &receiver_ok, dist);
3378 // Do not transform the receiver to object for builtins.
3379 __ testb(FieldOperand(kScratchRegister,
3380 SharedFunctionInfo::kNativeByteOffset),
3381 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3382 __ j(not_equal, &receiver_ok, dist);
3385 // Normal function. Replace undefined or null with global receiver.
3386 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3387 __ j(equal, &global_object, Label::kNear);
3388 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3389 __ j(equal, &global_object, Label::kNear);
3391 // The receiver should be a JS object.
3392 Condition is_smi = __ CheckSmi(receiver);
3393 DeoptimizeIf(is_smi, instr->environment());
3394 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3395 DeoptimizeIf(below, instr->environment());
3397 __ jmp(&receiver_ok, Label::kNear);
3398 __ bind(&global_object);
3399 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3402 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3403 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
3405 __ bind(&receiver_ok);
3409 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3410 Register receiver = ToRegister(instr->receiver());
3411 Register function = ToRegister(instr->function());
3412 Register length = ToRegister(instr->length());
3413 Register elements = ToRegister(instr->elements());
3414 DCHECK(receiver.is(rax)); // Used for parameter count.
3415 DCHECK(function.is(rdi)); // Required by InvokeFunction.
3416 DCHECK(ToRegister(instr->result()).is(rax));
3418 // Copy the arguments to this function possibly from the
3419 // adaptor frame below it.
3420 const uint32_t kArgumentsLimit = 1 * KB;
3421 __ cmpp(length, Immediate(kArgumentsLimit));
3422 DeoptimizeIf(above, instr->environment());
3425 __ movp(receiver, length);
3427 // Loop through the arguments pushing them onto the execution
3430 // length is a small non-negative integer, due to the test above.
3431 __ testl(length, length);
3432 __ j(zero, &invoke, Label::kNear);
3434 StackArgumentsAccessor args(elements, length,
3435 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3436 __ Push(args.GetArgumentOperand(0));
3438 __ j(not_zero, &loop);
3440 // Invoke the function.
3442 DCHECK(instr->HasPointerMap());
3443 LPointerMap* pointers = instr->pointer_map();
3444 SafepointGenerator safepoint_generator(
3445 this, pointers, Safepoint::kLazyDeopt);
3446 ParameterCount actual(rax);
3447 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3451 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3452 LOperand* argument = instr->value();
3453 EmitPushTaggedOperand(argument);
3457 void LCodeGen::DoDrop(LDrop* instr) {
3458 __ Drop(instr->count());
3462 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3463 Register result = ToRegister(instr->result());
3464 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3468 void LCodeGen::DoContext(LContext* instr) {
3469 Register result = ToRegister(instr->result());
3470 if (info()->IsOptimizing()) {
3471 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3473 // If there is no frame, the context must be in rsi.
3474 DCHECK(result.is(rsi));
3479 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3480 DCHECK(ToRegister(instr->context()).is(rsi));
3481 __ Push(rsi); // The context is the first argument.
3482 __ Push(instr->hydrogen()->pairs());
3483 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3484 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3488 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3489 int formal_parameter_count,
3491 LInstruction* instr,
3492 RDIState rdi_state) {
3493 bool dont_adapt_arguments =
3494 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3495 bool can_invoke_directly =
3496 dont_adapt_arguments || formal_parameter_count == arity;
3498 LPointerMap* pointers = instr->pointer_map();
3500 if (can_invoke_directly) {
3501 if (rdi_state == RDI_UNINITIALIZED) {
3502 __ Move(rdi, function);
3506 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3508 // Set rax to arguments count if adaption is not needed. Assumes that rax
3509 // is available to write to at this point.
3510 if (dont_adapt_arguments) {
3515 if (function.is_identical_to(info()->closure())) {
3518 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3521 // Set up deoptimization.
3522 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3524 // We need to adapt arguments.
3525 SafepointGenerator generator(
3526 this, pointers, Safepoint::kLazyDeopt);
3527 ParameterCount count(arity);
3528 ParameterCount expected(formal_parameter_count);
3529 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3534 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3535 DCHECK(ToRegister(instr->result()).is(rax));
3537 LPointerMap* pointers = instr->pointer_map();
3538 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3540 if (instr->target()->IsConstantOperand()) {
3541 LConstantOperand* target = LConstantOperand::cast(instr->target());
3542 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3543 generator.BeforeCall(__ CallSize(code));
3544 __ call(code, RelocInfo::CODE_TARGET);
3546 DCHECK(instr->target()->IsRegister());
3547 Register target = ToRegister(instr->target());
3548 generator.BeforeCall(__ CallSize(target));
3549 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3552 generator.AfterCall();
3556 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3557 DCHECK(ToRegister(instr->function()).is(rdi));
3558 DCHECK(ToRegister(instr->result()).is(rax));
3560 if (instr->hydrogen()->pass_argument_count()) {
3561 __ Set(rax, instr->arity());
3565 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3567 LPointerMap* pointers = instr->pointer_map();
3568 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3570 bool is_self_call = false;
3571 if (instr->hydrogen()->function()->IsConstant()) {
3572 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3573 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3574 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3575 is_self_call = jsfun.is_identical_to(info()->closure());
3581 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3582 generator.BeforeCall(__ CallSize(target));
3585 generator.AfterCall();
3589 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3590 Register input_reg = ToRegister(instr->value());
3591 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3592 Heap::kHeapNumberMapRootIndex);
3593 DeoptimizeIf(not_equal, instr->environment());
3595 Label slow, allocated, done;
3596 Register tmp = input_reg.is(rax) ? rcx : rax;
3597 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3599 // Preserve the value of all registers.
3600 PushSafepointRegistersScope scope(this);
3602 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3603 // Check the sign of the argument. If the argument is positive, just
3604 // return it. We do not need to patch the stack since |input| and
3605 // |result| are the same register and |input| will be restored
3606 // unchanged by popping safepoint registers.
3607 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3610 __ AllocateHeapNumber(tmp, tmp2, &slow);
3611 __ jmp(&allocated, Label::kNear);
3613 // Slow case: Call the runtime system to do the number allocation.
3615 CallRuntimeFromDeferred(
3616 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3617 // Set the pointer to the new heap number in tmp.
3618 if (!tmp.is(rax)) __ movp(tmp, rax);
3619 // Restore input_reg after call to runtime.
3620 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3622 __ bind(&allocated);
3623 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3624 __ shlq(tmp2, Immediate(1));
3625 __ shrq(tmp2, Immediate(1));
3626 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3627 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3633 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3634 Register input_reg = ToRegister(instr->value());
3635 __ testl(input_reg, input_reg);
3637 __ j(not_sign, &is_positive, Label::kNear);
3638 __ negl(input_reg); // Sets flags.
3639 DeoptimizeIf(negative, instr->environment());
3640 __ bind(&is_positive);
3644 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3645 Register input_reg = ToRegister(instr->value());
3646 __ testp(input_reg, input_reg);
3648 __ j(not_sign, &is_positive, Label::kNear);
3649 __ negp(input_reg); // Sets flags.
3650 DeoptimizeIf(negative, instr->environment());
3651 __ bind(&is_positive);
3655 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3656 // Class for deferred case.
3657 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3659 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3660 : LDeferredCode(codegen), instr_(instr) { }
3661 virtual void Generate() V8_OVERRIDE {
3662 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3664 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3669 DCHECK(instr->value()->Equals(instr->result()));
3670 Representation r = instr->hydrogen()->value()->representation();
3673 XMMRegister scratch = double_scratch0();
3674 XMMRegister input_reg = ToDoubleRegister(instr->value());
3675 __ xorps(scratch, scratch);
3676 __ subsd(scratch, input_reg);
3677 __ andps(input_reg, scratch);
3678 } else if (r.IsInteger32()) {
3679 EmitIntegerMathAbs(instr);
3680 } else if (r.IsSmi()) {
3681 EmitSmiMathAbs(instr);
3682 } else { // Tagged case.
3683 DeferredMathAbsTaggedHeapNumber* deferred =
3684 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3685 Register input_reg = ToRegister(instr->value());
3687 __ JumpIfNotSmi(input_reg, deferred->entry());
3688 EmitSmiMathAbs(instr);
3689 __ bind(deferred->exit());
3694 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3695 XMMRegister xmm_scratch = double_scratch0();
3696 Register output_reg = ToRegister(instr->result());
3697 XMMRegister input_reg = ToDoubleRegister(instr->value());
3699 if (CpuFeatures::IsSupported(SSE4_1)) {
3700 CpuFeatureScope scope(masm(), SSE4_1);
3701 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3702 // Deoptimize if minus zero.
3703 __ movq(output_reg, input_reg);
3704 __ subq(output_reg, Immediate(1));
3705 DeoptimizeIf(overflow, instr->environment());
3707 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3708 __ cvttsd2si(output_reg, xmm_scratch);
3709 __ cmpl(output_reg, Immediate(0x1));
3710 DeoptimizeIf(overflow, instr->environment());
3712 Label negative_sign, done;
3713 // Deoptimize on unordered.
3714 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3715 __ ucomisd(input_reg, xmm_scratch);
3716 DeoptimizeIf(parity_even, instr->environment());
3717 __ j(below, &negative_sign, Label::kNear);
3719 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3720 // Check for negative zero.
3721 Label positive_sign;
3722 __ j(above, &positive_sign, Label::kNear);
3723 __ movmskpd(output_reg, input_reg);
3724 __ testq(output_reg, Immediate(1));
3725 DeoptimizeIf(not_zero, instr->environment());
3726 __ Set(output_reg, 0);
3728 __ bind(&positive_sign);
3731 // Use truncating instruction (OK because input is positive).
3732 __ cvttsd2si(output_reg, input_reg);
3733 // Overflow is signalled with minint.
3734 __ cmpl(output_reg, Immediate(0x1));
3735 DeoptimizeIf(overflow, instr->environment());
3736 __ jmp(&done, Label::kNear);
3738 // Non-zero negative reaches here.
3739 __ bind(&negative_sign);
3740 // Truncate, then compare and compensate.
3741 __ cvttsd2si(output_reg, input_reg);
3742 __ Cvtlsi2sd(xmm_scratch, output_reg);
3743 __ ucomisd(input_reg, xmm_scratch);
3744 __ j(equal, &done, Label::kNear);
3745 __ subl(output_reg, Immediate(1));
3746 DeoptimizeIf(overflow, instr->environment());
3753 void LCodeGen::DoMathRound(LMathRound* instr) {
3754 const XMMRegister xmm_scratch = double_scratch0();
3755 Register output_reg = ToRegister(instr->result());
3756 XMMRegister input_reg = ToDoubleRegister(instr->value());
3757 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3758 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3759 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3761 Label done, round_to_zero, below_one_half;
3762 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3763 __ movq(kScratchRegister, one_half);
3764 __ movq(xmm_scratch, kScratchRegister);
3765 __ ucomisd(xmm_scratch, input_reg);
3766 __ j(above, &below_one_half, Label::kNear);
3768 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3769 __ addsd(xmm_scratch, input_reg);
3770 __ cvttsd2si(output_reg, xmm_scratch);
3771 // Overflow is signalled with minint.
3772 __ cmpl(output_reg, Immediate(0x1));
3773 __ RecordComment("D2I conversion overflow");
3774 DeoptimizeIf(overflow, instr->environment());
3775 __ jmp(&done, dist);
3777 __ bind(&below_one_half);
3778 __ movq(kScratchRegister, minus_one_half);
3779 __ movq(xmm_scratch, kScratchRegister);
3780 __ ucomisd(xmm_scratch, input_reg);
3781 __ j(below_equal, &round_to_zero, Label::kNear);
3783 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3784 // compare and compensate.
3785 __ movq(input_temp, input_reg); // Do not alter input_reg.
3786 __ subsd(input_temp, xmm_scratch);
3787 __ cvttsd2si(output_reg, input_temp);
3788 // Catch minint due to overflow, and to prevent overflow when compensating.
3789 __ cmpl(output_reg, Immediate(0x1));
3790 __ RecordComment("D2I conversion overflow");
3791 DeoptimizeIf(overflow, instr->environment());
3793 __ Cvtlsi2sd(xmm_scratch, output_reg);
3794 __ ucomisd(xmm_scratch, input_temp);
3795 __ j(equal, &done, dist);
3796 __ subl(output_reg, Immediate(1));
3797 // No overflow because we already ruled out minint.
3798 __ jmp(&done, dist);
3800 __ bind(&round_to_zero);
3801 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3802 // we can ignore the difference between a result of -0 and +0.
3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3804 __ movq(output_reg, input_reg);
3805 __ testq(output_reg, output_reg);
3806 __ RecordComment("Minus zero");
3807 DeoptimizeIf(negative, instr->environment());
3809 __ Set(output_reg, 0);
3814 void LCodeGen::DoMathFround(LMathFround* instr) {
3815 XMMRegister input_reg = ToDoubleRegister(instr->value());
3816 XMMRegister output_reg = ToDoubleRegister(instr->result());
3817 __ cvtsd2ss(output_reg, input_reg);
3818 __ cvtss2sd(output_reg, output_reg);
3822 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3823 XMMRegister output = ToDoubleRegister(instr->result());
3824 if (instr->value()->IsDoubleRegister()) {
3825 XMMRegister input = ToDoubleRegister(instr->value());
3826 __ sqrtsd(output, input);
3828 Operand input = ToOperand(instr->value());
3829 __ sqrtsd(output, input);
3834 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3835 XMMRegister xmm_scratch = double_scratch0();
3836 XMMRegister input_reg = ToDoubleRegister(instr->value());
3837 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3839 // Note that according to ECMA-262 15.8.2.13:
3840 // Math.pow(-Infinity, 0.5) == Infinity
3841 // Math.sqrt(-Infinity) == NaN
3843 // Check base for -Infinity. According to IEEE-754, double-precision
3844 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3845 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3846 __ movq(xmm_scratch, kScratchRegister);
3847 __ ucomisd(xmm_scratch, input_reg);
3848 // Comparing -Infinity with NaN results in "unordered", which sets the
3849 // zero flag as if both were equal. However, it also sets the carry flag.
3850 __ j(not_equal, &sqrt, Label::kNear);
3851 __ j(carry, &sqrt, Label::kNear);
3852 // If input is -Infinity, return Infinity.
3853 __ xorps(input_reg, input_reg);
3854 __ subsd(input_reg, xmm_scratch);
3855 __ jmp(&done, Label::kNear);
3859 __ xorps(xmm_scratch, xmm_scratch);
3860 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3861 __ sqrtsd(input_reg, input_reg);
3866 void LCodeGen::DoPower(LPower* instr) {
3867 Representation exponent_type = instr->hydrogen()->right()->representation();
3868 // Having marked this as a call, we can use any registers.
3869 // Just make sure that the input/output registers are the expected ones.
3871 Register exponent = rdx;
3872 DCHECK(!instr->right()->IsRegister() ||
3873 ToRegister(instr->right()).is(exponent));
3874 DCHECK(!instr->right()->IsDoubleRegister() ||
3875 ToDoubleRegister(instr->right()).is(xmm1));
3876 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3877 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3879 if (exponent_type.IsSmi()) {
3880 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3882 } else if (exponent_type.IsTagged()) {
3884 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
3885 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3886 DeoptimizeIf(not_equal, instr->environment());
3888 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3890 } else if (exponent_type.IsInteger32()) {
3891 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3894 DCHECK(exponent_type.IsDouble());
3895 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3901 void LCodeGen::DoMathExp(LMathExp* instr) {
3902 XMMRegister input = ToDoubleRegister(instr->value());
3903 XMMRegister result = ToDoubleRegister(instr->result());
3904 XMMRegister temp0 = double_scratch0();
3905 Register temp1 = ToRegister(instr->temp1());
3906 Register temp2 = ToRegister(instr->temp2());
3908 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3912 void LCodeGen::DoMathLog(LMathLog* instr) {
3913 DCHECK(instr->value()->Equals(instr->result()));
3914 XMMRegister input_reg = ToDoubleRegister(instr->value());
3915 XMMRegister xmm_scratch = double_scratch0();
3916 Label positive, done, zero;
3917 __ xorps(xmm_scratch, xmm_scratch);
3918 __ ucomisd(input_reg, xmm_scratch);
3919 __ j(above, &positive, Label::kNear);
3920 __ j(not_carry, &zero, Label::kNear);
3921 ExternalReference nan =
3922 ExternalReference::address_of_canonical_non_hole_nan();
3923 Operand nan_operand = masm()->ExternalOperand(nan);
3924 __ movsd(input_reg, nan_operand);
3925 __ jmp(&done, Label::kNear);
3927 ExternalReference ninf =
3928 ExternalReference::address_of_negative_infinity();
3929 Operand ninf_operand = masm()->ExternalOperand(ninf);
3930 __ movsd(input_reg, ninf_operand);
3931 __ jmp(&done, Label::kNear);
3934 __ subp(rsp, Immediate(kDoubleSize));
3935 __ movsd(Operand(rsp, 0), input_reg);
3936 __ fld_d(Operand(rsp, 0));
3938 __ fstp_d(Operand(rsp, 0));
3939 __ movsd(input_reg, Operand(rsp, 0));
3940 __ addp(rsp, Immediate(kDoubleSize));
3945 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3946 Register input = ToRegister(instr->value());
3947 Register result = ToRegister(instr->result());
3948 Label not_zero_input;
3949 __ bsrl(result, input);
3951 __ j(not_zero, ¬_zero_input);
3952 __ Set(result, 63); // 63^31 == 32
3954 __ bind(¬_zero_input);
3955 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
3959 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3960 DCHECK(ToRegister(instr->context()).is(rsi));
3961 DCHECK(ToRegister(instr->function()).is(rdi));
3962 DCHECK(instr->HasPointerMap());
3964 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3965 if (known_function.is_null()) {
3966 LPointerMap* pointers = instr->pointer_map();
3967 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3968 ParameterCount count(instr->arity());
3969 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
3971 CallKnownFunction(known_function,
3972 instr->hydrogen()->formal_parameter_count(),
3975 RDI_CONTAINS_TARGET);
3980 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3981 DCHECK(ToRegister(instr->context()).is(rsi));
3982 DCHECK(ToRegister(instr->function()).is(rdi));
3983 DCHECK(ToRegister(instr->result()).is(rax));
3985 int arity = instr->arity();
3986 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
3987 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3991 void LCodeGen::DoCallNew(LCallNew* instr) {
3992 DCHECK(ToRegister(instr->context()).is(rsi));
3993 DCHECK(ToRegister(instr->constructor()).is(rdi));
3994 DCHECK(ToRegister(instr->result()).is(rax));
3996 __ Set(rax, instr->arity());
3997 // No cell in ebx for construct type feedback in optimized code
3998 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
3999 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4000 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4004 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4005 DCHECK(ToRegister(instr->context()).is(rsi));
4006 DCHECK(ToRegister(instr->constructor()).is(rdi));
4007 DCHECK(ToRegister(instr->result()).is(rax));
4009 __ Set(rax, instr->arity());
4010 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4011 ElementsKind kind = instr->hydrogen()->elements_kind();
4012 AllocationSiteOverrideMode override_mode =
4013 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4014 ? DISABLE_ALLOCATION_SITES
4017 if (instr->arity() == 0) {
4018 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4019 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4020 } else if (instr->arity() == 1) {
4022 if (IsFastPackedElementsKind(kind)) {
4024 // We might need a change here
4025 // look at the first argument
4026 __ movp(rcx, Operand(rsp, 0));
4028 __ j(zero, &packed_case, Label::kNear);
4030 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4031 ArraySingleArgumentConstructorStub stub(isolate(),
4034 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4035 __ jmp(&done, Label::kNear);
4036 __ bind(&packed_case);
4039 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4040 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4043 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4044 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4049 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4050 DCHECK(ToRegister(instr->context()).is(rsi));
4051 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4055 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4056 Register function = ToRegister(instr->function());
4057 Register code_object = ToRegister(instr->code_object());
4058 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4059 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4063 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4064 Register result = ToRegister(instr->result());
4065 Register base = ToRegister(instr->base_object());
4066 if (instr->offset()->IsConstantOperand()) {
4067 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4068 __ leap(result, Operand(base, ToInteger32(offset)));
4070 Register offset = ToRegister(instr->offset());
4071 __ leap(result, Operand(base, offset, times_1, 0));
4076 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4077 HStoreNamedField* hinstr = instr->hydrogen();
4078 Representation representation = instr->representation();
4080 HObjectAccess access = hinstr->access();
4081 int offset = access.offset();
4083 if (access.IsExternalMemory()) {
4084 DCHECK(!hinstr->NeedsWriteBarrier());
4085 Register value = ToRegister(instr->value());
4086 if (instr->object()->IsConstantOperand()) {
4087 DCHECK(value.is(rax));
4088 LConstantOperand* object = LConstantOperand::cast(instr->object());
4089 __ store_rax(ToExternalReference(object));
4091 Register object = ToRegister(instr->object());
4092 __ Store(MemOperand(object, offset), value, representation);
4097 Register object = ToRegister(instr->object());
4098 __ AssertNotSmi(object);
4100 DCHECK(!representation.IsSmi() ||
4101 !instr->value()->IsConstantOperand() ||
4102 IsInteger32Constant(LConstantOperand::cast(instr->value())));
4103 if (representation.IsDouble()) {
4104 DCHECK(access.IsInobject());
4105 DCHECK(!hinstr->has_transition());
4106 DCHECK(!hinstr->NeedsWriteBarrier());
4107 XMMRegister value = ToDoubleRegister(instr->value());
4108 __ movsd(FieldOperand(object, offset), value);
4112 if (hinstr->has_transition()) {
4113 Handle<Map> transition = hinstr->transition_map();
4114 AddDeprecationDependency(transition);
4115 if (!hinstr->NeedsWriteBarrierForMap()) {
4116 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4118 Register temp = ToRegister(instr->temp());
4119 __ Move(kScratchRegister, transition);
4120 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4121 // Update the write barrier for the map field.
4122 __ RecordWriteForMap(object,
4130 Register write_register = object;
4131 if (!access.IsInobject()) {
4132 write_register = ToRegister(instr->temp());
4133 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4136 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4137 hinstr->value()->representation().IsInteger32()) {
4138 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4139 if (FLAG_debug_code) {
4140 Register scratch = kScratchRegister;
4141 __ Load(scratch, FieldOperand(write_register, offset), representation);
4142 __ AssertSmi(scratch);
4144 // Store int value directly to upper half of the smi.
4145 STATIC_ASSERT(kSmiTag == 0);
4146 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4147 offset += kPointerSize / 2;
4148 representation = Representation::Integer32();
4151 Operand operand = FieldOperand(write_register, offset);
4153 if (instr->value()->IsRegister()) {
4154 Register value = ToRegister(instr->value());
4155 __ Store(operand, value, representation);
4157 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4158 if (IsInteger32Constant(operand_value)) {
4159 DCHECK(!hinstr->NeedsWriteBarrier());
4160 int32_t value = ToInteger32(operand_value);
4161 if (representation.IsSmi()) {
4162 __ Move(operand, Smi::FromInt(value));
4165 __ movl(operand, Immediate(value));
4169 Handle<Object> handle_value = ToHandle(operand_value);
4170 DCHECK(!hinstr->NeedsWriteBarrier());
4171 __ Move(operand, handle_value);
4175 if (hinstr->NeedsWriteBarrier()) {
4176 Register value = ToRegister(instr->value());
4177 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4178 // Update the write barrier for the object for in-object properties.
4179 __ RecordWriteField(write_register,
4184 EMIT_REMEMBERED_SET,
4185 hinstr->SmiCheckForWriteBarrier(),
4186 hinstr->PointersToHereCheckForValue());
4191 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4192 DCHECK(ToRegister(instr->context()).is(rsi));
4193 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
4194 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
4196 __ Move(StoreIC::NameRegister(), instr->hydrogen()->name());
4197 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4198 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4202 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4203 Representation representation = instr->hydrogen()->length()->representation();
4204 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4205 DCHECK(representation.IsSmiOrInteger32());
4207 Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
4208 if (instr->length()->IsConstantOperand()) {
4209 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4210 Register index = ToRegister(instr->index());
4211 if (representation.IsSmi()) {
4212 __ Cmp(index, Smi::FromInt(length));
4214 __ cmpl(index, Immediate(length));
4216 cc = CommuteCondition(cc);
4217 } else if (instr->index()->IsConstantOperand()) {
4218 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4219 if (instr->length()->IsRegister()) {
4220 Register length = ToRegister(instr->length());
4221 if (representation.IsSmi()) {
4222 __ Cmp(length, Smi::FromInt(index));
4224 __ cmpl(length, Immediate(index));
4227 Operand length = ToOperand(instr->length());
4228 if (representation.IsSmi()) {
4229 __ Cmp(length, Smi::FromInt(index));
4231 __ cmpl(length, Immediate(index));
4235 Register index = ToRegister(instr->index());
4236 if (instr->length()->IsRegister()) {
4237 Register length = ToRegister(instr->length());
4238 if (representation.IsSmi()) {
4239 __ cmpp(length, index);
4241 __ cmpl(length, index);
4244 Operand length = ToOperand(instr->length());
4245 if (representation.IsSmi()) {
4246 __ cmpp(length, index);
4248 __ cmpl(length, index);
4252 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4254 __ j(NegateCondition(cc), &done, Label::kNear);
4258 DeoptimizeIf(cc, instr->environment());
4263 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4264 ElementsKind elements_kind = instr->elements_kind();
4265 LOperand* key = instr->key();
4266 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
4267 Register key_reg = ToRegister(key);
4268 Representation key_representation =
4269 instr->hydrogen()->key()->representation();
4270 if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
4271 __ SmiToInteger64(key_reg, key_reg);
4272 } else if (instr->hydrogen()->IsDehoisted()) {
4273 // Sign extend key because it could be a 32 bit negative value
4274 // and the dehoisted address computation happens in 64 bits
4275 __ movsxlq(key_reg, key_reg);
4278 Operand operand(BuildFastArrayOperand(
4281 instr->hydrogen()->key()->representation(),
4283 instr->base_offset()));
4285 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4286 elements_kind == FLOAT32_ELEMENTS) {
4287 XMMRegister value(ToDoubleRegister(instr->value()));
4288 __ cvtsd2ss(value, value);
4289 __ movss(operand, value);
4290 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4291 elements_kind == FLOAT64_ELEMENTS) {
4292 __ movsd(operand, ToDoubleRegister(instr->value()));
4294 Register value(ToRegister(instr->value()));
4295 switch (elements_kind) {
4296 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4297 case EXTERNAL_INT8_ELEMENTS:
4298 case EXTERNAL_UINT8_ELEMENTS:
4300 case UINT8_ELEMENTS:
4301 case UINT8_CLAMPED_ELEMENTS:
4302 __ movb(operand, value);
4304 case EXTERNAL_INT16_ELEMENTS:
4305 case EXTERNAL_UINT16_ELEMENTS:
4306 case INT16_ELEMENTS:
4307 case UINT16_ELEMENTS:
4308 __ movw(operand, value);
4310 case EXTERNAL_INT32_ELEMENTS:
4311 case EXTERNAL_UINT32_ELEMENTS:
4312 case INT32_ELEMENTS:
4313 case UINT32_ELEMENTS:
4314 __ movl(operand, value);
4316 case EXTERNAL_FLOAT32_ELEMENTS:
4317 case EXTERNAL_FLOAT64_ELEMENTS:
4318 case FLOAT32_ELEMENTS:
4319 case FLOAT64_ELEMENTS:
4321 case FAST_SMI_ELEMENTS:
4322 case FAST_DOUBLE_ELEMENTS:
4323 case FAST_HOLEY_ELEMENTS:
4324 case FAST_HOLEY_SMI_ELEMENTS:
4325 case FAST_HOLEY_DOUBLE_ELEMENTS:
4326 case DICTIONARY_ELEMENTS:
4327 case SLOPPY_ARGUMENTS_ELEMENTS:
4335 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4336 XMMRegister value = ToDoubleRegister(instr->value());
4337 LOperand* key = instr->key();
4338 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4339 instr->hydrogen()->IsDehoisted()) {
4340 // Sign extend key because it could be a 32 bit negative value
4341 // and the dehoisted address computation happens in 64 bits
4342 __ movsxlq(ToRegister(key), ToRegister(key));
4344 if (instr->NeedsCanonicalization()) {
4347 __ ucomisd(value, value);
4348 __ j(parity_odd, &have_value, Label::kNear); // NaN.
4350 __ Set(kScratchRegister, BitCast<uint64_t>(
4351 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
4352 __ movq(value, kScratchRegister);
4354 __ bind(&have_value);
4357 Operand double_store_operand = BuildFastArrayOperand(
4360 instr->hydrogen()->key()->representation(),
4361 FAST_DOUBLE_ELEMENTS,
4362 instr->base_offset());
4364 __ movsd(double_store_operand, value);
4368 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4369 HStoreKeyed* hinstr = instr->hydrogen();
4370 LOperand* key = instr->key();
4371 int offset = instr->base_offset();
4372 Representation representation = hinstr->value()->representation();
4374 if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
4375 instr->hydrogen()->IsDehoisted()) {
4376 // Sign extend key because it could be a 32 bit negative value
4377 // and the dehoisted address computation happens in 64 bits
4378 __ movsxlq(ToRegister(key), ToRegister(key));
4380 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4381 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4382 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4383 if (FLAG_debug_code) {
4384 Register scratch = kScratchRegister;
4386 BuildFastArrayOperand(instr->elements(),
4388 instr->hydrogen()->key()->representation(),
4391 Representation::Smi());
4392 __ AssertSmi(scratch);
4394 // Store int value directly to upper half of the smi.
4395 STATIC_ASSERT(kSmiTag == 0);
4396 DCHECK(kSmiTagSize + kSmiShiftSize == 32);
4397 offset += kPointerSize / 2;
4401 BuildFastArrayOperand(instr->elements(),
4403 instr->hydrogen()->key()->representation(),
4406 if (instr->value()->IsRegister()) {
4407 __ Store(operand, ToRegister(instr->value()), representation);
4409 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4410 if (IsInteger32Constant(operand_value)) {
4411 int32_t value = ToInteger32(operand_value);
4412 if (representation.IsSmi()) {
4413 __ Move(operand, Smi::FromInt(value));
4416 __ movl(operand, Immediate(value));
4419 Handle<Object> handle_value = ToHandle(operand_value);
4420 __ Move(operand, handle_value);
4424 if (hinstr->NeedsWriteBarrier()) {
4425 Register elements = ToRegister(instr->elements());
4426 DCHECK(instr->value()->IsRegister());
4427 Register value = ToRegister(instr->value());
4428 DCHECK(!key->IsConstantOperand());
4429 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4430 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4431 // Compute address of modified element and store it into key register.
4432 Register key_reg(ToRegister(key));
4433 __ leap(key_reg, operand);
4434 __ RecordWrite(elements,
4438 EMIT_REMEMBERED_SET,
4440 hinstr->PointersToHereCheckForValue());
4445 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4446 if (instr->is_typed_elements()) {
4447 DoStoreKeyedExternalArray(instr);
4448 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4449 DoStoreKeyedFixedDoubleArray(instr);
4451 DoStoreKeyedFixedArray(instr);
4456 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4457 DCHECK(ToRegister(instr->context()).is(rsi));
4458 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
4459 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
4460 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
4462 Handle<Code> ic = instr->strict_mode() == STRICT
4463 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4464 : isolate()->builtins()->KeyedStoreIC_Initialize();
4465 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4469 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4470 Register object_reg = ToRegister(instr->object());
4472 Handle<Map> from_map = instr->original_map();
4473 Handle<Map> to_map = instr->transitioned_map();
4474 ElementsKind from_kind = instr->from_kind();
4475 ElementsKind to_kind = instr->to_kind();
4477 Label not_applicable;
4478 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4479 __ j(not_equal, ¬_applicable);
4480 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4481 Register new_map_reg = ToRegister(instr->new_map_temp());
4482 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4483 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4485 __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4488 DCHECK(object_reg.is(rax));
4489 DCHECK(ToRegister(instr->context()).is(rsi));
4490 PushSafepointRegistersScope scope(this);
4491 __ Move(rbx, to_map);
4492 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4493 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4495 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4497 __ bind(¬_applicable);
4501 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4502 Register object = ToRegister(instr->object());
4503 Register temp = ToRegister(instr->temp());
4504 Label no_memento_found;
4505 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4506 DeoptimizeIf(equal, instr->environment());
4507 __ bind(&no_memento_found);
4511 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4512 DCHECK(ToRegister(instr->context()).is(rsi));
4513 DCHECK(ToRegister(instr->left()).is(rdx));
4514 DCHECK(ToRegister(instr->right()).is(rax));
4515 StringAddStub stub(isolate(),
4516 instr->hydrogen()->flags(),
4517 instr->hydrogen()->pretenure_flag());
4518 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4522 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4523 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4525 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4526 : LDeferredCode(codegen), instr_(instr) { }
4527 virtual void Generate() V8_OVERRIDE {
4528 codegen()->DoDeferredStringCharCodeAt(instr_);
4530 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4532 LStringCharCodeAt* instr_;
4535 DeferredStringCharCodeAt* deferred =
4536 new(zone()) DeferredStringCharCodeAt(this, instr);
4538 StringCharLoadGenerator::Generate(masm(),
4539 ToRegister(instr->string()),
4540 ToRegister(instr->index()),
4541 ToRegister(instr->result()),
4543 __ bind(deferred->exit());
4547 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4548 Register string = ToRegister(instr->string());
4549 Register result = ToRegister(instr->result());
4551 // TODO(3095996): Get rid of this. For now, we need to make the
4552 // result register contain a valid pointer because it is already
4553 // contained in the register pointer map.
4556 PushSafepointRegistersScope scope(this);
4558 // Push the index as a smi. This is safe because of the checks in
4559 // DoStringCharCodeAt above.
4560 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4561 if (instr->index()->IsConstantOperand()) {
4562 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4563 __ Push(Smi::FromInt(const_index));
4565 Register index = ToRegister(instr->index());
4566 __ Integer32ToSmi(index, index);
4569 CallRuntimeFromDeferred(
4570 Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4572 __ SmiToInteger32(rax, rax);
4573 __ StoreToSafepointRegisterSlot(result, rax);
4577 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4578 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4580 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4581 : LDeferredCode(codegen), instr_(instr) { }
4582 virtual void Generate() V8_OVERRIDE {
4583 codegen()->DoDeferredStringCharFromCode(instr_);
4585 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4587 LStringCharFromCode* instr_;
4590 DeferredStringCharFromCode* deferred =
4591 new(zone()) DeferredStringCharFromCode(this, instr);
4593 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4594 Register char_code = ToRegister(instr->char_code());
4595 Register result = ToRegister(instr->result());
4596 DCHECK(!char_code.is(result));
4598 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4599 __ j(above, deferred->entry());
4600 __ movsxlq(char_code, char_code);
4601 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4602 __ movp(result, FieldOperand(result,
4603 char_code, times_pointer_size,
4604 FixedArray::kHeaderSize));
4605 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4606 __ j(equal, deferred->entry());
4607 __ bind(deferred->exit());
4611 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4612 Register char_code = ToRegister(instr->char_code());
4613 Register result = ToRegister(instr->result());
4615 // TODO(3095996): Get rid of this. For now, we need to make the
4616 // result register contain a valid pointer because it is already
4617 // contained in the register pointer map.
4620 PushSafepointRegistersScope scope(this);
4621 __ Integer32ToSmi(char_code, char_code);
4623 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4624 __ StoreToSafepointRegisterSlot(result, rax);
4628 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4629 LOperand* input = instr->value();
4630 DCHECK(input->IsRegister() || input->IsStackSlot());
4631 LOperand* output = instr->result();
4632 DCHECK(output->IsDoubleRegister());
4633 if (input->IsRegister()) {
4634 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4636 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4641 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4642 LOperand* input = instr->value();
4643 LOperand* output = instr->result();
4645 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4649 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4650 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4652 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4653 : LDeferredCode(codegen), instr_(instr) { }
4654 virtual void Generate() V8_OVERRIDE {
4655 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4656 instr_->temp2(), SIGNED_INT32);
4658 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4660 LNumberTagI* instr_;
4663 LOperand* input = instr->value();
4664 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4665 Register reg = ToRegister(input);
4667 if (SmiValuesAre32Bits()) {
4668 __ Integer32ToSmi(reg, reg);
4670 DCHECK(SmiValuesAre31Bits());
4671 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4672 __ Integer32ToSmi(reg, reg);
4673 __ j(overflow, deferred->entry());
4674 __ bind(deferred->exit());
4679 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4680 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4682 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4683 : LDeferredCode(codegen), instr_(instr) { }
4684 virtual void Generate() V8_OVERRIDE {
4685 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4686 instr_->temp2(), UNSIGNED_INT32);
4688 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4690 LNumberTagU* instr_;
4693 LOperand* input = instr->value();
4694 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4695 Register reg = ToRegister(input);
4697 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4698 __ cmpl(reg, Immediate(Smi::kMaxValue));
4699 __ j(above, deferred->entry());
4700 __ Integer32ToSmi(reg, reg);
4701 __ bind(deferred->exit());
4705 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4709 IntegerSignedness signedness) {
4711 Register reg = ToRegister(value);
4712 Register tmp = ToRegister(temp1);
4713 XMMRegister temp_xmm = ToDoubleRegister(temp2);
4715 // Load value into temp_xmm which will be preserved across potential call to
4716 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4717 // XMM registers on x64).
4718 if (signedness == SIGNED_INT32) {
4719 DCHECK(SmiValuesAre31Bits());
4720 // There was overflow, so bits 30 and 31 of the original integer
4721 // disagree. Try to allocate a heap number in new space and store
4722 // the value in there. If that fails, call the runtime system.
4723 __ SmiToInteger32(reg, reg);
4724 __ xorl(reg, Immediate(0x80000000));
4725 __ cvtlsi2sd(temp_xmm, reg);
4727 DCHECK(signedness == UNSIGNED_INT32);
4728 __ LoadUint32(temp_xmm, reg);
4731 if (FLAG_inline_new) {
4732 __ AllocateHeapNumber(reg, tmp, &slow);
4733 __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4736 // Slow case: Call the runtime system to do the number allocation.
4739 // Put a valid pointer value in the stack slot where the result
4740 // register is stored, as this register is in the pointer map, but contains
4741 // an integer value.
4744 // Preserve the value of all registers.
4745 PushSafepointRegistersScope scope(this);
4747 // NumberTagIU uses the context from the frame, rather than
4748 // the environment's HContext or HInlinedContext value.
4749 // They only call Runtime::kAllocateHeapNumber.
4750 // The corresponding HChange instructions are added in a phase that does
4751 // not have easy access to the local context.
4752 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4753 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4754 RecordSafepointWithRegisters(
4755 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4756 __ StoreToSafepointRegisterSlot(reg, rax);
4759 // Done. Put the value in temp_xmm into the value of the allocated heap
4762 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4766 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4767 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4769 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4770 : LDeferredCode(codegen), instr_(instr) { }
4771 virtual void Generate() V8_OVERRIDE {
4772 codegen()->DoDeferredNumberTagD(instr_);
4774 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4776 LNumberTagD* instr_;
4779 XMMRegister input_reg = ToDoubleRegister(instr->value());
4780 Register reg = ToRegister(instr->result());
4781 Register tmp = ToRegister(instr->temp());
4783 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4784 if (FLAG_inline_new) {
4785 __ AllocateHeapNumber(reg, tmp, deferred->entry());
4787 __ jmp(deferred->entry());
4789 __ bind(deferred->exit());
4790 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4794 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4795 // TODO(3095996): Get rid of this. For now, we need to make the
4796 // result register contain a valid pointer because it is already
4797 // contained in the register pointer map.
4798 Register reg = ToRegister(instr->result());
4799 __ Move(reg, Smi::FromInt(0));
4802 PushSafepointRegistersScope scope(this);
4803 // NumberTagD uses the context from the frame, rather than
4804 // the environment's HContext or HInlinedContext value.
4805 // They only call Runtime::kAllocateHeapNumber.
4806 // The corresponding HChange instructions are added in a phase that does
4807 // not have easy access to the local context.
4808 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4809 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4810 RecordSafepointWithRegisters(
4811 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4812 __ movp(kScratchRegister, rax);
4814 __ movp(reg, kScratchRegister);
4818 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4819 HChange* hchange = instr->hydrogen();
4820 Register input = ToRegister(instr->value());
4821 Register output = ToRegister(instr->result());
4822 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4823 hchange->value()->CheckFlag(HValue::kUint32)) {
4824 Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4825 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
4827 __ Integer32ToSmi(output, input);
4828 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4829 !hchange->value()->CheckFlag(HValue::kUint32)) {
4830 DeoptimizeIf(overflow, instr->environment());
4835 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4836 DCHECK(instr->value()->Equals(instr->result()));
4837 Register input = ToRegister(instr->value());
4838 if (instr->needs_check()) {
4839 Condition is_smi = __ CheckSmi(input);
4840 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
4842 __ AssertSmi(input);
4844 __ SmiToInteger32(input, input);
4848 void LCodeGen::EmitNumberUntagD(Register input_reg,
4849 XMMRegister result_reg,
4850 bool can_convert_undefined_to_nan,
4851 bool deoptimize_on_minus_zero,
4853 NumberUntagDMode mode) {
4854 Label convert, load_smi, done;
4856 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4858 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4860 // Heap number map check.
4861 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4862 Heap::kHeapNumberMapRootIndex);
4864 // On x64 it is safe to load at heap number offset before evaluating the map
4865 // check, since all heap objects are at least two words long.
4866 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4868 if (can_convert_undefined_to_nan) {
4869 __ j(not_equal, &convert, Label::kNear);
4871 DeoptimizeIf(not_equal, env);
4874 if (deoptimize_on_minus_zero) {
4875 XMMRegister xmm_scratch = double_scratch0();
4876 __ xorps(xmm_scratch, xmm_scratch);
4877 __ ucomisd(xmm_scratch, result_reg);
4878 __ j(not_equal, &done, Label::kNear);
4879 __ movmskpd(kScratchRegister, result_reg);
4880 __ testq(kScratchRegister, Immediate(1));
4881 DeoptimizeIf(not_zero, env);
4883 __ jmp(&done, Label::kNear);
4885 if (can_convert_undefined_to_nan) {
4888 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4889 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4890 DeoptimizeIf(not_equal, env);
4892 __ xorps(result_reg, result_reg);
4893 __ divsd(result_reg, result_reg);
4894 __ jmp(&done, Label::kNear);
4897 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4900 // Smi to XMM conversion
4902 __ SmiToInteger32(kScratchRegister, input_reg);
4903 __ Cvtlsi2sd(result_reg, kScratchRegister);
4908 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4909 Register input_reg = ToRegister(instr->value());
4911 if (instr->truncating()) {
4912 Label no_heap_number, check_bools, check_false;
4914 // Heap number map check.
4915 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4916 Heap::kHeapNumberMapRootIndex);
4917 __ j(not_equal, &no_heap_number, Label::kNear);
4918 __ TruncateHeapNumberToI(input_reg, input_reg);
4921 __ bind(&no_heap_number);
4922 // Check for Oddballs. Undefined/False is converted to zero and True to one
4923 // for truncating conversions.
4924 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4925 __ j(not_equal, &check_bools, Label::kNear);
4926 __ Set(input_reg, 0);
4929 __ bind(&check_bools);
4930 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4931 __ j(not_equal, &check_false, Label::kNear);
4932 __ Set(input_reg, 1);
4935 __ bind(&check_false);
4936 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4937 __ RecordComment("Deferred TaggedToI: cannot truncate");
4938 DeoptimizeIf(not_equal, instr->environment());
4939 __ Set(input_reg, 0);
4943 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4944 __ TaggedToI(input_reg, input_reg, xmm_temp,
4945 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
4949 DeoptimizeIf(no_condition, instr->environment());
4954 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4955 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4957 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4958 : LDeferredCode(codegen), instr_(instr) { }
4959 virtual void Generate() V8_OVERRIDE {
4960 codegen()->DoDeferredTaggedToI(instr_, done());
4962 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4967 LOperand* input = instr->value();
4968 DCHECK(input->IsRegister());
4969 DCHECK(input->Equals(instr->result()));
4970 Register input_reg = ToRegister(input);
4972 if (instr->hydrogen()->value()->representation().IsSmi()) {
4973 __ SmiToInteger32(input_reg, input_reg);
4975 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4976 __ JumpIfNotSmi(input_reg, deferred->entry());
4977 __ SmiToInteger32(input_reg, input_reg);
4978 __ bind(deferred->exit());
4983 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4984 LOperand* input = instr->value();
4985 DCHECK(input->IsRegister());
4986 LOperand* result = instr->result();
4987 DCHECK(result->IsDoubleRegister());
4989 Register input_reg = ToRegister(input);
4990 XMMRegister result_reg = ToDoubleRegister(result);
4992 HValue* value = instr->hydrogen()->value();
4993 NumberUntagDMode mode = value->representation().IsSmi()
4994 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4996 EmitNumberUntagD(input_reg, result_reg,
4997 instr->hydrogen()->can_convert_undefined_to_nan(),
4998 instr->hydrogen()->deoptimize_on_minus_zero(),
4999 instr->environment(),
5004 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5005 LOperand* input = instr->value();
5006 DCHECK(input->IsDoubleRegister());
5007 LOperand* result = instr->result();
5008 DCHECK(result->IsRegister());
5010 XMMRegister input_reg = ToDoubleRegister(input);
5011 Register result_reg = ToRegister(result);
5013 if (instr->truncating()) {
5014 __ TruncateDoubleToI(result_reg, input_reg);
5016 Label bailout, done;
5017 XMMRegister xmm_scratch = double_scratch0();
5018 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5019 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5021 __ jmp(&done, Label::kNear);
5023 DeoptimizeIf(no_condition, instr->environment());
5029 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5030 LOperand* input = instr->value();
5031 DCHECK(input->IsDoubleRegister());
5032 LOperand* result = instr->result();
5033 DCHECK(result->IsRegister());
5035 XMMRegister input_reg = ToDoubleRegister(input);
5036 Register result_reg = ToRegister(result);
5038 Label bailout, done;
5039 XMMRegister xmm_scratch = double_scratch0();
5040 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5041 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5043 __ jmp(&done, Label::kNear);
5045 DeoptimizeIf(no_condition, instr->environment());
5048 __ Integer32ToSmi(result_reg, result_reg);
5049 DeoptimizeIf(overflow, instr->environment());
5053 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5054 LOperand* input = instr->value();
5055 Condition cc = masm()->CheckSmi(ToRegister(input));
5056 DeoptimizeIf(NegateCondition(cc), instr->environment());
5060 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5061 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5062 LOperand* input = instr->value();
5063 Condition cc = masm()->CheckSmi(ToRegister(input));
5064 DeoptimizeIf(cc, instr->environment());
5069 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5070 Register input = ToRegister(instr->value());
5072 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5074 if (instr->hydrogen()->is_interval_check()) {
5077 instr->hydrogen()->GetCheckInterval(&first, &last);
5079 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5080 Immediate(static_cast<int8_t>(first)));
5082 // If there is only one type in the interval check for equality.
5083 if (first == last) {
5084 DeoptimizeIf(not_equal, instr->environment());
5086 DeoptimizeIf(below, instr->environment());
5087 // Omit check for the last type.
5088 if (last != LAST_TYPE) {
5089 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5090 Immediate(static_cast<int8_t>(last)));
5091 DeoptimizeIf(above, instr->environment());
5097 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5099 if (IsPowerOf2(mask)) {
5100 DCHECK(tag == 0 || IsPowerOf2(tag));
5101 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5103 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5105 __ movzxbl(kScratchRegister,
5106 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5107 __ andb(kScratchRegister, Immediate(mask));
5108 __ cmpb(kScratchRegister, Immediate(tag));
5109 DeoptimizeIf(not_equal, instr->environment());
5115 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5116 Register reg = ToRegister(instr->value());
5117 __ Cmp(reg, instr->hydrogen()->object().handle());
5118 DeoptimizeIf(not_equal, instr->environment());
5122 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5124 PushSafepointRegistersScope scope(this);
5127 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5128 RecordSafepointWithRegisters(
5129 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5131 __ testp(rax, Immediate(kSmiTagMask));
5133 DeoptimizeIf(zero, instr->environment());
5137 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5138 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5140 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5141 : LDeferredCode(codegen), instr_(instr), object_(object) {
5142 SetExit(check_maps());
5144 virtual void Generate() V8_OVERRIDE {
5145 codegen()->DoDeferredInstanceMigration(instr_, object_);
5147 Label* check_maps() { return &check_maps_; }
5148 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5155 if (instr->hydrogen()->IsStabilityCheck()) {
5156 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5157 for (int i = 0; i < maps->size(); ++i) {
5158 AddStabilityDependency(maps->at(i).handle());
5163 LOperand* input = instr->value();
5164 DCHECK(input->IsRegister());
5165 Register reg = ToRegister(input);
5167 DeferredCheckMaps* deferred = NULL;
5168 if (instr->hydrogen()->HasMigrationTarget()) {
5169 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5170 __ bind(deferred->check_maps());
5173 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5175 for (int i = 0; i < maps->size() - 1; i++) {
5176 Handle<Map> map = maps->at(i).handle();
5177 __ CompareMap(reg, map);
5178 __ j(equal, &success, Label::kNear);
5181 Handle<Map> map = maps->at(maps->size() - 1).handle();
5182 __ CompareMap(reg, map);
5183 if (instr->hydrogen()->HasMigrationTarget()) {
5184 __ j(not_equal, deferred->entry());
5186 DeoptimizeIf(not_equal, instr->environment());
5193 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5194 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5195 XMMRegister xmm_scratch = double_scratch0();
5196 Register result_reg = ToRegister(instr->result());
5197 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5201 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5202 DCHECK(instr->unclamped()->Equals(instr->result()));
5203 Register value_reg = ToRegister(instr->result());
5204 __ ClampUint8(value_reg);
5208 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5209 DCHECK(instr->unclamped()->Equals(instr->result()));
5210 Register input_reg = ToRegister(instr->unclamped());
5211 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5212 XMMRegister xmm_scratch = double_scratch0();
5213 Label is_smi, done, heap_number;
5214 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5215 __ JumpIfSmi(input_reg, &is_smi, dist);
5217 // Check for heap number
5218 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5219 factory()->heap_number_map());
5220 __ j(equal, &heap_number, Label::kNear);
5222 // Check for undefined. Undefined is converted to zero for clamping
5224 __ Cmp(input_reg, factory()->undefined_value());
5225 DeoptimizeIf(not_equal, instr->environment());
5226 __ xorl(input_reg, input_reg);
5227 __ jmp(&done, Label::kNear);
5230 __ bind(&heap_number);
5231 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5232 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5233 __ jmp(&done, Label::kNear);
5237 __ SmiToInteger32(input_reg, input_reg);
5238 __ ClampUint8(input_reg);
5244 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5245 XMMRegister value_reg = ToDoubleRegister(instr->value());
5246 Register result_reg = ToRegister(instr->result());
5247 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5248 __ movq(result_reg, value_reg);
5249 __ shrq(result_reg, Immediate(32));
5251 __ movd(result_reg, value_reg);
5256 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5257 Register hi_reg = ToRegister(instr->hi());
5258 Register lo_reg = ToRegister(instr->lo());
5259 XMMRegister result_reg = ToDoubleRegister(instr->result());
5260 XMMRegister xmm_scratch = double_scratch0();
5261 __ movd(result_reg, hi_reg);
5262 __ psllq(result_reg, 32);
5263 __ movd(xmm_scratch, lo_reg);
5264 __ orps(result_reg, xmm_scratch);
5268 void LCodeGen::DoAllocate(LAllocate* instr) {
5269 class DeferredAllocate V8_FINAL : public LDeferredCode {
5271 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5272 : LDeferredCode(codegen), instr_(instr) { }
5273 virtual void Generate() V8_OVERRIDE {
5274 codegen()->DoDeferredAllocate(instr_);
5276 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5281 DeferredAllocate* deferred =
5282 new(zone()) DeferredAllocate(this, instr);
5284 Register result = ToRegister(instr->result());
5285 Register temp = ToRegister(instr->temp());
5287 // Allocate memory for the object.
5288 AllocationFlags flags = TAG_OBJECT;
5289 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5290 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5292 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5293 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5294 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5295 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5296 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5297 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5298 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5301 if (instr->size()->IsConstantOperand()) {
5302 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5303 if (size <= Page::kMaxRegularHeapObjectSize) {
5304 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5306 __ jmp(deferred->entry());
5309 Register size = ToRegister(instr->size());
5310 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5313 __ bind(deferred->exit());
5315 if (instr->hydrogen()->MustPrefillWithFiller()) {
5316 if (instr->size()->IsConstantOperand()) {
5317 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5318 __ movl(temp, Immediate((size / kPointerSize) - 1));
5320 temp = ToRegister(instr->size());
5321 __ sarp(temp, Immediate(kPointerSizeLog2));
5326 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5327 isolate()->factory()->one_pointer_filler_map());
5329 __ j(not_zero, &loop);
5334 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5335 Register result = ToRegister(instr->result());
5337 // TODO(3095996): Get rid of this. For now, we need to make the
5338 // result register contain a valid pointer because it is already
5339 // contained in the register pointer map.
5340 __ Move(result, Smi::FromInt(0));
5342 PushSafepointRegistersScope scope(this);
5343 if (instr->size()->IsRegister()) {
5344 Register size = ToRegister(instr->size());
5345 DCHECK(!size.is(result));
5346 __ Integer32ToSmi(size, size);
5349 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5350 __ Push(Smi::FromInt(size));
5354 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5355 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5356 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5357 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5358 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5359 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5360 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5362 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5364 __ Push(Smi::FromInt(flags));
5366 CallRuntimeFromDeferred(
5367 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5368 __ StoreToSafepointRegisterSlot(result, rax);
5372 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5373 DCHECK(ToRegister(instr->value()).is(rax));
5375 CallRuntime(Runtime::kToFastProperties, 1, instr);
5379 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5380 DCHECK(ToRegister(instr->context()).is(rsi));
5382 // Registers will be used as follows:
5383 // rcx = literals array.
5384 // rbx = regexp literal.
5385 // rax = regexp literal clone.
5386 int literal_offset =
5387 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5388 __ Move(rcx, instr->hydrogen()->literals());
5389 __ movp(rbx, FieldOperand(rcx, literal_offset));
5390 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
5391 __ j(not_equal, &materialized, Label::kNear);
5393 // Create regexp literal using runtime function
5394 // Result will be in rax.
5396 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
5397 __ Push(instr->hydrogen()->pattern());
5398 __ Push(instr->hydrogen()->flags());
5399 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5402 __ bind(&materialized);
5403 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5404 Label allocated, runtime_allocate;
5405 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
5406 __ jmp(&allocated, Label::kNear);
5408 __ bind(&runtime_allocate);
5410 __ Push(Smi::FromInt(size));
5411 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5414 __ bind(&allocated);
5415 // Copy the content into the newly allocated memory.
5416 // (Unroll copy loop once for better throughput).
5417 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5418 __ movp(rdx, FieldOperand(rbx, i));
5419 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
5420 __ movp(FieldOperand(rax, i), rdx);
5421 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
5423 if ((size % (2 * kPointerSize)) != 0) {
5424 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
5425 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
5430 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5431 DCHECK(ToRegister(instr->context()).is(rsi));
5432 // Use the fast case closure allocation code that allocates in new
5433 // space for nested functions that don't need literals cloning.
5434 bool pretenure = instr->hydrogen()->pretenure();
5435 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5436 FastNewClosureStub stub(isolate(),
5437 instr->hydrogen()->strict_mode(),
5438 instr->hydrogen()->is_generator());
5439 __ Move(rbx, instr->hydrogen()->shared_info());
5440 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5443 __ Push(instr->hydrogen()->shared_info());
5444 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
5445 Heap::kFalseValueRootIndex);
5446 CallRuntime(Runtime::kNewClosure, 3, instr);
5451 void LCodeGen::DoTypeof(LTypeof* instr) {
5452 DCHECK(ToRegister(instr->context()).is(rsi));
5453 LOperand* input = instr->value();
5454 EmitPushTaggedOperand(input);
5455 CallRuntime(Runtime::kTypeof, 1, instr);
5459 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5460 DCHECK(!operand->IsDoubleRegister());
5461 if (operand->IsConstantOperand()) {
5462 __ Push(ToHandle(LConstantOperand::cast(operand)));
5463 } else if (operand->IsRegister()) {
5464 __ Push(ToRegister(operand));
5466 __ Push(ToOperand(operand));
5471 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5472 Register input = ToRegister(instr->value());
5473 Condition final_branch_condition = EmitTypeofIs(instr, input);
5474 if (final_branch_condition != no_condition) {
5475 EmitBranch(instr, final_branch_condition);
5480 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5481 Label* true_label = instr->TrueLabel(chunk_);
5482 Label* false_label = instr->FalseLabel(chunk_);
5483 Handle<String> type_name = instr->type_literal();
5484 int left_block = instr->TrueDestination(chunk_);
5485 int right_block = instr->FalseDestination(chunk_);
5486 int next_block = GetNextEmittedBlock();
5488 Label::Distance true_distance = left_block == next_block ? Label::kNear
5490 Label::Distance false_distance = right_block == next_block ? Label::kNear
5492 Condition final_branch_condition = no_condition;
5493 Factory* factory = isolate()->factory();
5494 if (String::Equals(type_name, factory->number_string())) {
5495 __ JumpIfSmi(input, true_label, true_distance);
5496 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5497 Heap::kHeapNumberMapRootIndex);
5499 final_branch_condition = equal;
5501 } else if (String::Equals(type_name, factory->string_string())) {
5502 __ JumpIfSmi(input, false_label, false_distance);
5503 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5504 __ j(above_equal, false_label, false_distance);
5505 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5506 Immediate(1 << Map::kIsUndetectable));
5507 final_branch_condition = zero;
5509 } else if (String::Equals(type_name, factory->symbol_string())) {
5510 __ JumpIfSmi(input, false_label, false_distance);
5511 __ CmpObjectType(input, SYMBOL_TYPE, input);
5512 final_branch_condition = equal;
5514 } else if (String::Equals(type_name, factory->boolean_string())) {
5515 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5516 __ j(equal, true_label, true_distance);
5517 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5518 final_branch_condition = equal;
5520 } else if (String::Equals(type_name, factory->undefined_string())) {
5521 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5522 __ j(equal, true_label, true_distance);
5523 __ JumpIfSmi(input, false_label, false_distance);
5524 // Check for undetectable objects => true.
5525 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5526 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5527 Immediate(1 << Map::kIsUndetectable));
5528 final_branch_condition = not_zero;
5530 } else if (String::Equals(type_name, factory->function_string())) {
5531 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5532 __ JumpIfSmi(input, false_label, false_distance);
5533 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5534 __ j(equal, true_label, true_distance);
5535 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5536 final_branch_condition = equal;
5538 } else if (String::Equals(type_name, factory->object_string())) {
5539 __ JumpIfSmi(input, false_label, false_distance);
5540 __ CompareRoot(input, Heap::kNullValueRootIndex);
5541 __ j(equal, true_label, true_distance);
5542 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5543 __ j(below, false_label, false_distance);
5544 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5545 __ j(above, false_label, false_distance);
5546 // Check for undetectable objects => false.
5547 __ testb(FieldOperand(input, Map::kBitFieldOffset),
5548 Immediate(1 << Map::kIsUndetectable));
5549 final_branch_condition = zero;
5552 __ jmp(false_label, false_distance);
5555 return final_branch_condition;
5559 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5560 Register temp = ToRegister(instr->temp());
5562 EmitIsConstructCall(temp);
5563 EmitBranch(instr, equal);
5567 void LCodeGen::EmitIsConstructCall(Register temp) {
5568 // Get the frame pointer for the calling frame.
5569 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
5571 // Skip the arguments adaptor frame if it exists.
5572 Label check_frame_marker;
5573 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5574 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
5575 __ j(not_equal, &check_frame_marker, Label::kNear);
5576 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5578 // Check the marker in the calling frame.
5579 __ bind(&check_frame_marker);
5580 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5581 Smi::FromInt(StackFrame::CONSTRUCT));
5585 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5586 if (!info()->IsStub()) {
5587 // Ensure that we have enough space after the previous lazy-bailout
5588 // instruction for patching the code here.
5589 int current_pc = masm()->pc_offset();
5590 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5591 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5592 __ Nop(padding_size);
5595 last_lazy_deopt_pc_ = masm()->pc_offset();
5599 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5600 last_lazy_deopt_pc_ = masm()->pc_offset();
5601 DCHECK(instr->HasEnvironment());
5602 LEnvironment* env = instr->environment();
5603 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5604 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5608 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5609 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5610 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5611 // needed return address), even though the implementation of LAZY and EAGER is
5612 // now identical. When LAZY is eventually completely folded into EAGER, remove
5613 // the special case below.
5614 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5615 type = Deoptimizer::LAZY;
5618 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5619 DeoptimizeIf(no_condition, instr->environment(), type);
5623 void LCodeGen::DoDummy(LDummy* instr) {
5624 // Nothing to see here, move on!
5628 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5629 // Nothing to see here, move on!
5633 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5634 PushSafepointRegistersScope scope(this);
5635 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5636 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5637 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5638 DCHECK(instr->HasEnvironment());
5639 LEnvironment* env = instr->environment();
5640 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5644 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5645 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5647 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5648 : LDeferredCode(codegen), instr_(instr) { }
5649 virtual void Generate() V8_OVERRIDE {
5650 codegen()->DoDeferredStackCheck(instr_);
5652 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5654 LStackCheck* instr_;
5657 DCHECK(instr->HasEnvironment());
5658 LEnvironment* env = instr->environment();
5659 // There is no LLazyBailout instruction for stack-checks. We have to
5660 // prepare for lazy deoptimization explicitly here.
5661 if (instr->hydrogen()->is_function_entry()) {
5662 // Perform stack overflow check.
5664 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5665 __ j(above_equal, &done, Label::kNear);
5667 DCHECK(instr->context()->IsRegister());
5668 DCHECK(ToRegister(instr->context()).is(rsi));
5669 CallCode(isolate()->builtins()->StackCheck(),
5670 RelocInfo::CODE_TARGET,
5674 DCHECK(instr->hydrogen()->is_backwards_branch());
5675 // Perform stack overflow check if this goto needs it before jumping.
5676 DeferredStackCheck* deferred_stack_check =
5677 new(zone()) DeferredStackCheck(this, instr);
5678 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5679 __ j(below, deferred_stack_check->entry());
5680 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5681 __ bind(instr->done_label());
5682 deferred_stack_check->SetExit(instr->done_label());
5683 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5684 // Don't record a deoptimization index for the safepoint here.
5685 // This will be done explicitly when emitting call and the safepoint in
5686 // the deferred code.
5691 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5692 // This is a pseudo-instruction that ensures that the environment here is
5693 // properly registered for deoptimization and records the assembler's PC
5695 LEnvironment* environment = instr->environment();
5697 // If the environment were already registered, we would have no way of
5698 // backpatching it with the spill slot operands.
5699 DCHECK(!environment->HasBeenRegistered());
5700 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5702 GenerateOsrPrologue();
5706 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5707 DCHECK(ToRegister(instr->context()).is(rsi));
5708 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
5709 DeoptimizeIf(equal, instr->environment());
5711 Register null_value = rdi;
5712 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5713 __ cmpp(rax, null_value);
5714 DeoptimizeIf(equal, instr->environment());
5716 Condition cc = masm()->CheckSmi(rax);
5717 DeoptimizeIf(cc, instr->environment());
5719 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5720 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
5721 DeoptimizeIf(below_equal, instr->environment());
5723 Label use_cache, call_runtime;
5724 __ CheckEnumCache(null_value, &call_runtime);
5726 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5727 __ jmp(&use_cache, Label::kNear);
5729 // Get the set of properties to enumerate.
5730 __ bind(&call_runtime);
5732 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5734 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
5735 Heap::kMetaMapRootIndex);
5736 DeoptimizeIf(not_equal, instr->environment());
5737 __ bind(&use_cache);
5741 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5742 Register map = ToRegister(instr->map());
5743 Register result = ToRegister(instr->result());
5744 Label load_cache, done;
5745 __ EnumLength(result, map);
5746 __ Cmp(result, Smi::FromInt(0));
5747 __ j(not_equal, &load_cache, Label::kNear);
5748 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5749 __ jmp(&done, Label::kNear);
5750 __ bind(&load_cache);
5751 __ LoadInstanceDescriptors(map, result);
5753 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5755 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5757 Condition cc = masm()->CheckSmi(result);
5758 DeoptimizeIf(cc, instr->environment());
5762 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5763 Register object = ToRegister(instr->value());
5764 __ cmpp(ToRegister(instr->map()),
5765 FieldOperand(object, HeapObject::kMapOffset));
5766 DeoptimizeIf(not_equal, instr->environment());
5770 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5773 PushSafepointRegistersScope scope(this);
5777 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5778 RecordSafepointWithRegisters(
5779 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5780 __ StoreToSafepointRegisterSlot(object, rax);
5784 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5785 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5787 DeferredLoadMutableDouble(LCodeGen* codegen,
5788 LLoadFieldByIndex* instr,
5791 : LDeferredCode(codegen),
5796 virtual void Generate() V8_OVERRIDE {
5797 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5799 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5801 LLoadFieldByIndex* instr_;
5806 Register object = ToRegister(instr->object());
5807 Register index = ToRegister(instr->index());
5809 DeferredLoadMutableDouble* deferred;
5810 deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5812 Label out_of_object, done;
5813 __ Move(kScratchRegister, Smi::FromInt(1));
5814 __ testp(index, kScratchRegister);
5815 __ j(not_zero, deferred->entry());
5817 __ sarp(index, Immediate(1));
5819 __ SmiToInteger32(index, index);
5820 __ cmpl(index, Immediate(0));
5821 __ j(less, &out_of_object, Label::kNear);
5822 __ movp(object, FieldOperand(object,
5825 JSObject::kHeaderSize));
5826 __ jmp(&done, Label::kNear);
5828 __ bind(&out_of_object);
5829 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5831 // Index is now equal to out of object property index plus 1.
5832 __ movp(object, FieldOperand(object,
5835 FixedArray::kHeaderSize - kPointerSize));
5836 __ bind(deferred->exit());
5841 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5842 Register context = ToRegister(instr->context());
5843 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
5847 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5848 Handle<ScopeInfo> scope_info = instr->scope_info();
5849 __ Push(scope_info);
5850 __ Push(ToRegister(instr->function()));
5851 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5852 RecordSafepoint(Safepoint::kNoLazyDeopt);
5858 } } // namespace v8::internal
5860 #endif // V8_TARGET_ARCH_X64