1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_X64
32 #include "x64/lithium-codegen-x64.h"
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 #include "hydrogen-osr.h"
41 // When invoking builtins, we need to record the safepoint in the middle of
42 // the invoke instruction sequence generated by the macro assembler.
43 class SafepointGenerator V8_FINAL : public CallWrapper {
45 SafepointGenerator(LCodeGen* codegen,
46 LPointerMap* pointers,
47 Safepoint::DeoptMode mode)
51 virtual ~SafepointGenerator() {}
53 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
55 virtual void AfterCall() const V8_OVERRIDE {
56 codegen_->RecordSafepoint(pointers_, deopt_mode_);
61 LPointerMap* pointers_;
62 Safepoint::DeoptMode deopt_mode_;
68 bool LCodeGen::GenerateCode() {
69 LPhase phase("Z_Code generation", chunk());
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // MANUAL indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code> code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 RegisterDependentCodeForEmbeddedMaps(code);
91 PopulateDeoptimizationData(code);
92 info()->CommitDependencies(code);
96 void LChunkBuilder::Abort(BailoutReason reason) {
97 info()->set_bailout_reason(reason);
103 void LCodeGen::MakeSureStackPagesMapped(int offset) {
104 const int kPageSize = 4 * KB;
105 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
106 __ movp(Operand(rsp, offset), rax);
112 void LCodeGen::SaveCallerDoubles() {
113 ASSERT(info()->saves_caller_doubles());
114 ASSERT(NeedsEagerFrame());
115 Comment(";;; Save clobbered callee double registers");
117 BitVector* doubles = chunk()->allocated_double_registers();
118 BitVector::Iterator save_iterator(doubles);
119 while (!save_iterator.Done()) {
120 __ movsd(MemOperand(rsp, count * kDoubleSize),
121 XMMRegister::FromAllocationIndex(save_iterator.Current()));
122 save_iterator.Advance();
128 void LCodeGen::RestoreCallerDoubles() {
129 ASSERT(info()->saves_caller_doubles());
130 ASSERT(NeedsEagerFrame());
131 Comment(";;; Restore clobbered callee double registers");
132 BitVector* doubles = chunk()->allocated_double_registers();
133 BitVector::Iterator save_iterator(doubles);
135 while (!save_iterator.Done()) {
136 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
137 MemOperand(rsp, count * kDoubleSize));
138 save_iterator.Advance();
144 bool LCodeGen::GeneratePrologue() {
145 ASSERT(is_generating());
147 if (info()->IsOptimizing()) {
148 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
151 if (strlen(FLAG_stop_at) > 0 &&
152 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
157 // Classic mode functions need to replace the receiver with the global proxy
158 // when called as functions (without an explicit receiver object).
159 if (info_->this_has_uses() &&
160 info_->is_classic_mode() &&
161 !info_->is_native()) {
163 StackArgumentsAccessor args(rsp, scope()->num_parameters());
164 __ movp(rcx, args.GetReceiverOperand());
166 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
167 __ j(not_equal, &ok, Label::kNear);
169 __ movp(rcx, GlobalObjectOperand());
170 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
172 __ movp(args.GetReceiverOperand(), rcx);
178 info()->set_prologue_offset(masm_->pc_offset());
179 if (NeedsEagerFrame()) {
180 ASSERT(!frame_is_built_);
181 frame_is_built_ = true;
182 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
183 info()->AddNoFrameRange(0, masm_->pc_offset());
186 // Reserve space for the stack slots needed by the code.
187 int slots = GetStackSlotCount();
189 if (FLAG_debug_code) {
190 __ subq(rsp, Immediate(slots * kPointerSize));
192 MakeSureStackPagesMapped(slots * kPointerSize);
196 __ movq(kScratchRegister, kSlotsZapValue);
199 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
202 __ j(not_zero, &loop);
205 __ subq(rsp, Immediate(slots * kPointerSize));
207 MakeSureStackPagesMapped(slots * kPointerSize);
211 if (info()->saves_caller_doubles()) {
216 // Possibly allocate a local context.
217 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
218 if (heap_slots > 0) {
219 Comment(";;; Allocate local context");
220 // Argument to NewContext is the function, which is still in rdi.
221 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
222 FastNewContextStub stub(heap_slots);
226 __ CallRuntime(Runtime::kNewFunctionContext, 1);
228 RecordSafepoint(Safepoint::kNoLazyDeopt);
229 // Context is returned in rax. It replaces the context passed to us.
230 // It's saved in the stack and kept live in rsi.
232 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
234 // Copy any necessary parameters into the context.
235 int num_parameters = scope()->num_parameters();
236 for (int i = 0; i < num_parameters; i++) {
237 Variable* var = scope()->parameter(i);
238 if (var->IsContextSlot()) {
239 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
240 (num_parameters - 1 - i) * kPointerSize;
241 // Load parameter from stack.
242 __ movp(rax, Operand(rbp, parameter_offset));
243 // Store it in the context.
244 int context_offset = Context::SlotOffset(var->index());
245 __ movp(Operand(rsi, context_offset), rax);
246 // Update the write barrier. This clobbers rax and rbx.
247 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
250 Comment(";;; End allocate local context");
254 if (FLAG_trace && info()->IsOptimizing()) {
255 __ CallRuntime(Runtime::kTraceEnter, 0);
257 return !is_aborted();
261 void LCodeGen::GenerateOsrPrologue() {
262 // Generate the OSR entry prologue at the first unknown OSR value, or if there
263 // are none, at the OSR entrypoint instruction.
264 if (osr_pc_offset_ >= 0) return;
266 osr_pc_offset_ = masm()->pc_offset();
268 // Adjust the frame size, subsuming the unoptimized frame into the
270 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
272 __ subq(rsp, Immediate(slots * kPointerSize));
276 bool LCodeGen::GenerateJumpTable() {
278 if (jump_table_.length() > 0) {
279 Comment(";;; -------------------- Jump table --------------------");
281 for (int i = 0; i < jump_table_.length(); i++) {
282 __ bind(&jump_table_[i].label);
283 Address entry = jump_table_[i].address;
284 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
285 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
286 if (id == Deoptimizer::kNotDeoptimizationEntry) {
287 Comment(";;; jump table entry %d.", i);
289 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
291 if (jump_table_[i].needs_frame) {
292 ASSERT(!info()->saves_caller_doubles());
293 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
294 if (needs_frame.is_bound()) {
295 __ jmp(&needs_frame);
297 __ bind(&needs_frame);
298 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
302 // This variant of deopt can only be used with stubs. Since we don't
303 // have a function pointer to install in the stack frame that we're
304 // building, install a special marker there instead.
305 ASSERT(info()->IsStub());
306 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
308 __ movp(rsi, MemOperand(rsp, kPointerSize));
309 __ call(kScratchRegister);
312 if (info()->saves_caller_doubles()) {
313 ASSERT(info()->IsStub());
314 RestoreCallerDoubles();
316 __ call(entry, RelocInfo::RUNTIME_ENTRY);
319 return !is_aborted();
323 bool LCodeGen::GenerateDeferredCode() {
324 ASSERT(is_generating());
325 if (deferred_.length() > 0) {
326 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
327 LDeferredCode* code = deferred_[i];
330 instructions_->at(code->instruction_index())->hydrogen_value();
331 RecordAndWritePosition(value->position());
333 Comment(";;; <@%d,#%d> "
334 "-------------------- Deferred %s --------------------",
335 code->instruction_index(),
336 code->instr()->hydrogen_value()->id(),
337 code->instr()->Mnemonic());
338 __ bind(code->entry());
339 if (NeedsDeferredFrame()) {
340 Comment(";;; Build frame");
341 ASSERT(!frame_is_built_);
342 ASSERT(info()->IsStub());
343 frame_is_built_ = true;
344 // Build the frame in such a way that esi isn't trashed.
345 __ push(rbp); // Caller's frame pointer.
346 __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
347 __ Push(Smi::FromInt(StackFrame::STUB));
348 __ lea(rbp, Operand(rsp, 2 * kPointerSize));
349 Comment(";;; Deferred code");
352 if (NeedsDeferredFrame()) {
353 __ bind(code->done());
354 Comment(";;; Destroy frame");
355 ASSERT(frame_is_built_);
356 frame_is_built_ = false;
360 __ jmp(code->exit());
364 // Deferred code is the last part of the instruction sequence. Mark
365 // the generated code as done unless we bailed out.
366 if (!is_aborted()) status_ = DONE;
367 return !is_aborted();
371 bool LCodeGen::GenerateSafepointTable() {
373 safepoints_.Emit(masm(), GetStackSlotCount());
374 return !is_aborted();
378 Register LCodeGen::ToRegister(int index) const {
379 return Register::FromAllocationIndex(index);
383 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
384 return XMMRegister::FromAllocationIndex(index);
388 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
389 return XMMRegister::FromAllocationIndex(index);
393 Register LCodeGen::ToRegister(LOperand* op) const {
394 ASSERT(op->IsRegister());
395 return ToRegister(op->index());
399 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
400 ASSERT(op->IsDoubleRegister());
401 return ToDoubleRegister(op->index());
405 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
406 ASSERT(op->IsFloat32x4Register());
407 return ToSIMD128Register(op->index());
411 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
412 ASSERT(op->IsInt32x4Register());
413 return ToSIMD128Register(op->index());
417 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
418 ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
419 return ToSIMD128Register(op->index());
423 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
424 return op->IsConstantOperand() &&
425 chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
429 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
430 return op->IsConstantOperand() &&
431 chunk_->LookupLiteralRepresentation(op).IsSmi();
435 bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
436 return op->IsConstantOperand() &&
437 chunk_->LookupLiteralRepresentation(op).IsTagged();
441 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
442 HConstant* constant = chunk_->LookupConstant(op);
443 return constant->Integer32Value();
447 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
448 HConstant* constant = chunk_->LookupConstant(op);
449 return Smi::FromInt(constant->Integer32Value());
453 double LCodeGen::ToDouble(LConstantOperand* op) const {
454 HConstant* constant = chunk_->LookupConstant(op);
455 ASSERT(constant->HasDoubleValue());
456 return constant->DoubleValue();
460 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
461 HConstant* constant = chunk_->LookupConstant(op);
462 ASSERT(constant->HasExternalReferenceValue());
463 return constant->ExternalReferenceValue();
467 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
468 HConstant* constant = chunk_->LookupConstant(op);
469 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
470 return constant->handle(isolate());
474 static int ArgumentsOffsetWithoutFrame(int index) {
476 return -(index + 1) * kPointerSize + kPCOnStackSize;
480 Operand LCodeGen::ToOperand(LOperand* op) const {
481 // Does not handle registers. In X64 assembler, plain registers are not
482 // representable as an Operand.
483 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
484 op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
485 if (NeedsEagerFrame()) {
486 return Operand(rbp, StackSlotOffset(op->index()));
488 // Retrieve parameter without eager stack-frame relative to the
490 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
495 void LCodeGen::WriteTranslation(LEnvironment* environment,
496 Translation* translation) {
497 if (environment == NULL) return;
499 // The translation includes one command per value in the environment.
500 int translation_size = environment->translation_size();
501 // The output frame height does not include the parameters.
502 int height = translation_size - environment->parameter_count();
504 WriteTranslation(environment->outer(), translation);
505 bool has_closure_id = !info()->closure().is_null() &&
506 !info()->closure().is_identical_to(environment->closure());
507 int closure_id = has_closure_id
508 ? DefineDeoptimizationLiteral(environment->closure())
509 : Translation::kSelfLiteralId;
511 switch (environment->frame_type()) {
513 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
516 translation->BeginConstructStubFrame(closure_id, translation_size);
519 ASSERT(translation_size == 1);
521 translation->BeginGetterStubFrame(closure_id);
524 ASSERT(translation_size == 2);
526 translation->BeginSetterStubFrame(closure_id);
528 case ARGUMENTS_ADAPTOR:
529 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
532 translation->BeginCompiledStubFrame();
536 int object_index = 0;
537 int dematerialized_index = 0;
538 for (int i = 0; i < translation_size; ++i) {
539 LOperand* value = environment->values()->at(i);
540 AddToTranslation(environment,
543 environment->HasTaggedValueAt(i),
544 environment->HasUint32ValueAt(i),
546 &dematerialized_index);
551 void LCodeGen::AddToTranslation(LEnvironment* environment,
552 Translation* translation,
556 int* object_index_pointer,
557 int* dematerialized_index_pointer) {
558 if (op == LEnvironment::materialization_marker()) {
559 int object_index = (*object_index_pointer)++;
560 if (environment->ObjectIsDuplicateAt(object_index)) {
561 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
562 translation->DuplicateObject(dupe_of);
565 int object_length = environment->ObjectLengthAt(object_index);
566 if (environment->ObjectIsArgumentsAt(object_index)) {
567 translation->BeginArgumentsObject(object_length);
569 translation->BeginCapturedObject(object_length);
571 int dematerialized_index = *dematerialized_index_pointer;
572 int env_offset = environment->translation_size() + dematerialized_index;
573 *dematerialized_index_pointer += object_length;
574 for (int i = 0; i < object_length; ++i) {
575 LOperand* value = environment->values()->at(env_offset + i);
576 AddToTranslation(environment,
579 environment->HasTaggedValueAt(env_offset + i),
580 environment->HasUint32ValueAt(env_offset + i),
581 object_index_pointer,
582 dematerialized_index_pointer);
587 if (op->IsStackSlot()) {
589 translation->StoreStackSlot(op->index());
590 } else if (is_uint32) {
591 translation->StoreUint32StackSlot(op->index());
593 translation->StoreInt32StackSlot(op->index());
595 } else if (op->IsDoubleStackSlot()) {
596 translation->StoreDoubleStackSlot(op->index());
597 } else if (op->IsFloat32x4StackSlot()) {
598 translation->StoreSIMD128StackSlot(op->index(),
599 Translation::FLOAT32x4_STACK_SLOT);
600 } else if (op->IsInt32x4StackSlot()) {
601 translation->StoreSIMD128StackSlot(op->index(),
602 Translation::INT32x4_STACK_SLOT);
603 } else if (op->IsArgument()) {
605 int src_index = GetStackSlotCount() + op->index();
606 translation->StoreStackSlot(src_index);
607 } else if (op->IsRegister()) {
608 Register reg = ToRegister(op);
610 translation->StoreRegister(reg);
611 } else if (is_uint32) {
612 translation->StoreUint32Register(reg);
614 translation->StoreInt32Register(reg);
616 } else if (op->IsDoubleRegister()) {
617 XMMRegister reg = ToDoubleRegister(op);
618 translation->StoreDoubleRegister(reg);
619 } else if (op->IsFloat32x4Register()) {
620 XMMRegister reg = ToFloat32x4Register(op);
621 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
622 } else if (op->IsInt32x4Register()) {
623 XMMRegister reg = ToInt32x4Register(op);
624 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
625 } else if (op->IsConstantOperand()) {
626 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
627 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
628 translation->StoreLiteral(src_index);
635 void LCodeGen::CallCodeGeneric(Handle<Code> code,
636 RelocInfo::Mode mode,
638 SafepointMode safepoint_mode,
640 ASSERT(instr != NULL);
642 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
644 // Signal that we don't inline smi code before these stubs in the
645 // optimizing code generator.
646 if (code->kind() == Code::BINARY_OP_IC ||
647 code->kind() == Code::COMPARE_IC) {
653 void LCodeGen::CallCode(Handle<Code> code,
654 RelocInfo::Mode mode,
655 LInstruction* instr) {
656 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
660 void LCodeGen::CallRuntime(const Runtime::Function* function,
663 SaveFPRegsMode save_doubles) {
664 ASSERT(instr != NULL);
665 ASSERT(instr->HasPointerMap());
667 __ CallRuntime(function, num_arguments, save_doubles);
669 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
673 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
674 if (context->IsRegister()) {
675 if (!ToRegister(context).is(rsi)) {
676 __ movp(rsi, ToRegister(context));
678 } else if (context->IsStackSlot()) {
679 __ movp(rsi, ToOperand(context));
680 } else if (context->IsConstantOperand()) {
681 HConstant* constant =
682 chunk_->LookupConstant(LConstantOperand::cast(context));
683 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
691 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
695 LoadContextFromDeferred(context);
697 __ CallRuntimeSaveDoubles(id);
698 RecordSafepointWithRegisters(
699 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
703 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
704 Safepoint::DeoptMode mode) {
705 if (!environment->HasBeenRegistered()) {
706 // Physical stack frame layout:
707 // -x ............. -4 0 ..................................... y
708 // [incoming arguments] [spill slots] [pushed outgoing arguments]
710 // Layout of the environment:
711 // 0 ..................................................... size-1
712 // [parameters] [locals] [expression stack including arguments]
714 // Layout of the translation:
715 // 0 ........................................................ size - 1 + 4
716 // [expression stack including arguments] [locals] [4 words] [parameters]
717 // |>------------ translation_size ------------<|
720 int jsframe_count = 0;
721 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
723 if (e->frame_type() == JS_FUNCTION) {
727 Translation translation(&translations_, frame_count, jsframe_count, zone());
728 WriteTranslation(environment, &translation);
729 int deoptimization_index = deoptimizations_.length();
730 int pc_offset = masm()->pc_offset();
731 environment->Register(deoptimization_index,
733 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
734 deoptimizations_.Add(environment, environment->zone());
739 void LCodeGen::DeoptimizeIf(Condition cc,
740 LEnvironment* environment,
741 Deoptimizer::BailoutType bailout_type) {
742 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
743 ASSERT(environment->HasBeenRegistered());
744 int id = environment->deoptimization_index();
745 ASSERT(info()->IsOptimizing() || info()->IsStub());
747 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
749 Abort(kBailoutWasNotPrepared);
753 if (DeoptEveryNTimes()) {
754 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
758 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
759 __ movl(rax, count_operand);
760 __ subl(rax, Immediate(1));
761 __ j(not_zero, &no_deopt, Label::kNear);
762 if (FLAG_trap_on_deopt) __ int3();
763 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
764 __ movl(count_operand, rax);
767 ASSERT(frame_is_built_);
768 __ call(entry, RelocInfo::RUNTIME_ENTRY);
770 __ movl(count_operand, rax);
775 if (info()->ShouldTrapOnDeopt()) {
777 if (cc != no_condition) {
778 __ j(NegateCondition(cc), &done, Label::kNear);
784 ASSERT(info()->IsStub() || frame_is_built_);
785 // Go through jump table if we need to handle condition, build frame, or
786 // restore caller doubles.
787 if (cc == no_condition && frame_is_built_ &&
788 !info()->saves_caller_doubles()) {
789 __ call(entry, RelocInfo::RUNTIME_ENTRY);
791 // We often have several deopts to the same entry, reuse the last
792 // jump entry if this is the case.
793 if (jump_table_.is_empty() ||
794 jump_table_.last().address != entry ||
795 jump_table_.last().needs_frame != !frame_is_built_ ||
796 jump_table_.last().bailout_type != bailout_type) {
797 Deoptimizer::JumpTableEntry table_entry(entry,
800 jump_table_.Add(table_entry, zone());
802 if (cc == no_condition) {
803 __ jmp(&jump_table_.last().label);
805 __ j(cc, &jump_table_.last().label);
811 void LCodeGen::DeoptimizeIf(Condition cc,
812 LEnvironment* environment) {
813 Deoptimizer::BailoutType bailout_type = info()->IsStub()
815 : Deoptimizer::EAGER;
816 DeoptimizeIf(cc, environment, bailout_type);
820 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
821 int length = deoptimizations_.length();
822 if (length == 0) return;
823 Handle<DeoptimizationInputData> data =
824 factory()->NewDeoptimizationInputData(length, TENURED);
826 Handle<ByteArray> translations =
827 translations_.CreateByteArray(isolate()->factory());
828 data->SetTranslationByteArray(*translations);
829 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
831 Handle<FixedArray> literals =
832 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
833 { AllowDeferredHandleDereference copy_handles;
834 for (int i = 0; i < deoptimization_literals_.length(); i++) {
835 literals->set(i, *deoptimization_literals_[i]);
837 data->SetLiteralArray(*literals);
840 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
841 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
843 // Populate the deoptimization entries.
844 for (int i = 0; i < length; i++) {
845 LEnvironment* env = deoptimizations_[i];
846 data->SetAstId(i, env->ast_id());
847 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
848 data->SetArgumentsStackHeight(i,
849 Smi::FromInt(env->arguments_stack_height()));
850 data->SetPc(i, Smi::FromInt(env->pc_offset()));
852 code->set_deoptimization_data(*data);
856 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
857 int result = deoptimization_literals_.length();
858 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
859 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
861 deoptimization_literals_.Add(literal, zone());
866 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
867 ASSERT(deoptimization_literals_.length() == 0);
869 const ZoneList<Handle<JSFunction> >* inlined_closures =
870 chunk()->inlined_closures();
872 for (int i = 0, length = inlined_closures->length();
875 DefineDeoptimizationLiteral(inlined_closures->at(i));
878 inlined_function_count_ = deoptimization_literals_.length();
882 void LCodeGen::RecordSafepointWithLazyDeopt(
883 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
884 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
885 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
887 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
888 RecordSafepointWithRegisters(
889 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
894 void LCodeGen::RecordSafepoint(
895 LPointerMap* pointers,
896 Safepoint::Kind kind,
898 Safepoint::DeoptMode deopt_mode) {
899 ASSERT(kind == expected_safepoint_kind_);
901 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
903 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
904 kind, arguments, deopt_mode);
905 for (int i = 0; i < operands->length(); i++) {
906 LOperand* pointer = operands->at(i);
907 if (pointer->IsStackSlot()) {
908 safepoint.DefinePointerSlot(pointer->index(), zone());
909 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
910 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
916 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
917 Safepoint::DeoptMode deopt_mode) {
918 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
922 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
923 LPointerMap empty_pointers(zone());
924 RecordSafepoint(&empty_pointers, deopt_mode);
928 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
930 Safepoint::DeoptMode deopt_mode) {
931 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
935 void LCodeGen::RecordAndWritePosition(int position) {
936 if (position == RelocInfo::kNoPosition) return;
937 masm()->positions_recorder()->RecordPosition(position);
938 masm()->positions_recorder()->WriteRecordedPositions();
942 static const char* LabelType(LLabel* label) {
943 if (label->is_loop_header()) return " (loop header)";
944 if (label->is_osr_entry()) return " (OSR entry)";
949 void LCodeGen::DoLabel(LLabel* label) {
950 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
951 current_instruction_,
952 label->hydrogen_value()->id(),
955 __ bind(label->label());
956 current_block_ = label->block_id();
961 void LCodeGen::DoParallelMove(LParallelMove* move) {
962 resolver_.Resolve(move);
966 void LCodeGen::DoGap(LGap* gap) {
967 for (int i = LGap::FIRST_INNER_POSITION;
968 i <= LGap::LAST_INNER_POSITION;
970 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
971 LParallelMove* move = gap->GetParallelMove(inner_pos);
972 if (move != NULL) DoParallelMove(move);
977 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
982 void LCodeGen::DoParameter(LParameter* instr) {
987 void LCodeGen::DoCallStub(LCallStub* instr) {
988 ASSERT(ToRegister(instr->context()).is(rsi));
989 ASSERT(ToRegister(instr->result()).is(rax));
990 switch (instr->hydrogen()->major_key()) {
991 case CodeStub::RegExpExec: {
993 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
996 case CodeStub::SubString: {
998 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1001 case CodeStub::StringCompare: {
1002 StringCompareStub stub;
1003 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1012 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1013 GenerateOsrPrologue();
1017 void LCodeGen::DoModI(LModI* instr) {
1018 HMod* hmod = instr->hydrogen();
1019 HValue* left = hmod->left();
1020 HValue* right = hmod->right();
1021 if (hmod->RightIsPowerOf2()) {
1022 // TODO(svenpanne) We should really do the strength reduction on the
1024 Register left_reg = ToRegister(instr->left());
1025 ASSERT(left_reg.is(ToRegister(instr->result())));
1027 // Note: The code below even works when right contains kMinInt.
1028 int32_t divisor = Abs(right->GetInteger32Constant());
1030 Label left_is_not_negative, done;
1031 if (left->CanBeNegative()) {
1032 __ testl(left_reg, left_reg);
1033 __ j(not_sign, &left_is_not_negative, Label::kNear);
1035 __ andl(left_reg, Immediate(divisor - 1));
1037 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1038 DeoptimizeIf(zero, instr->environment());
1040 __ jmp(&done, Label::kNear);
1043 __ bind(&left_is_not_negative);
1044 __ andl(left_reg, Immediate(divisor - 1));
1047 Register left_reg = ToRegister(instr->left());
1048 ASSERT(left_reg.is(rax));
1049 Register right_reg = ToRegister(instr->right());
1050 ASSERT(!right_reg.is(rax));
1051 ASSERT(!right_reg.is(rdx));
1052 Register result_reg = ToRegister(instr->result());
1053 ASSERT(result_reg.is(rdx));
1056 // Check for x % 0, idiv would signal a divide error. We have to
1057 // deopt in this case because we can't return a NaN.
1058 if (right->CanBeZero()) {
1059 __ testl(right_reg, right_reg);
1060 DeoptimizeIf(zero, instr->environment());
1063 // Check for kMinInt % -1, idiv would signal a divide error. We
1064 // have to deopt if we care about -0, because we can't return that.
1065 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1066 Label no_overflow_possible;
1067 __ cmpl(left_reg, Immediate(kMinInt));
1068 __ j(not_zero, &no_overflow_possible, Label::kNear);
1069 __ cmpl(right_reg, Immediate(-1));
1070 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1071 DeoptimizeIf(equal, instr->environment());
1073 __ j(not_equal, &no_overflow_possible, Label::kNear);
1074 __ Set(result_reg, 0);
1075 __ jmp(&done, Label::kNear);
1077 __ bind(&no_overflow_possible);
1080 // Sign extend dividend in eax into edx:eax, since we are using only the low
1081 // 32 bits of the values.
1084 // If we care about -0, test if the dividend is <0 and the result is 0.
1085 if (left->CanBeNegative() &&
1086 hmod->CanBeZero() &&
1087 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1088 Label positive_left;
1089 __ testl(left_reg, left_reg);
1090 __ j(not_sign, &positive_left, Label::kNear);
1091 __ idivl(right_reg);
1092 __ testl(result_reg, result_reg);
1093 DeoptimizeIf(zero, instr->environment());
1094 __ jmp(&done, Label::kNear);
1095 __ bind(&positive_left);
1097 __ idivl(right_reg);
1103 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1104 ASSERT(instr->right()->IsConstantOperand());
1106 const Register dividend = ToRegister(instr->left());
1107 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1108 const Register result = ToRegister(instr->result());
1112 DeoptimizeIf(no_condition, instr->environment());
1116 if (!result.is(dividend)) {
1117 __ movl(result, dividend);
1122 if (!result.is(dividend)) {
1123 __ movl(result, dividend);
1126 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1127 DeoptimizeIf(zero, instr->environment());
1129 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1130 DeoptimizeIf(overflow, instr->environment());
1135 uint32_t divisor_abs = abs(divisor);
1136 if (IsPowerOf2(divisor_abs)) {
1137 int32_t power = WhichPowerOf2(divisor_abs);
1139 __ movsxlq(result, dividend);
1141 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1142 DeoptimizeIf(zero, instr->environment());
1144 __ sar(result, Immediate(power));
1146 if (!result.is(dividend)) {
1147 __ movl(result, dividend);
1149 __ sarl(result, Immediate(power));
1152 Register reg1 = ToRegister(instr->temp());
1153 Register reg2 = ToRegister(instr->result());
1155 // Find b which: 2^b < divisor_abs < 2^(b+1).
1156 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1157 unsigned shift = 32 + b; // Precision +1bit (effectively).
1158 double multiplier_f =
1159 static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
1161 if (multiplier_f - std::floor(multiplier_f) < 0.5) {
1162 multiplier = static_cast<int64_t>(std::floor(multiplier_f));
1164 multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
1166 // The multiplier is a uint32.
1167 ASSERT(multiplier > 0 &&
1168 multiplier < (static_cast<int64_t>(1) << 32));
1169 // The multiply is int64, so sign-extend to r64.
1170 __ movsxlq(reg1, dividend);
1172 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1174 DeoptimizeIf(zero, instr->environment());
1176 __ Set(reg2, multiplier);
1177 // Result just fit in r64, because it's int32 * uint32.
1178 __ imul(reg2, reg1);
1180 __ addq(reg2, Immediate(1 << 30));
1181 __ sar(reg2, Immediate(shift));
1186 void LCodeGen::DoDivI(LDivI* instr) {
1187 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
1188 Register dividend = ToRegister(instr->left());
1190 HConstant::cast(instr->hydrogen()->right())->Integer32Value();
1191 int32_t test_value = 0;
1195 test_value = divisor - 1;
1196 power = WhichPowerOf2(divisor);
1198 // Check for (0 / -x) that will produce negative zero.
1199 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1200 __ testl(dividend, dividend);
1201 DeoptimizeIf(zero, instr->environment());
1203 // Check for (kMinInt / -1).
1204 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1205 __ cmpl(dividend, Immediate(kMinInt));
1206 DeoptimizeIf(zero, instr->environment());
1208 test_value = - divisor - 1;
1209 power = WhichPowerOf2(-divisor);
1212 if (test_value != 0) {
1213 if (instr->hydrogen()->CheckFlag(
1214 HInstruction::kAllUsesTruncatingToInt32)) {
1215 Label done, negative;
1216 __ cmpl(dividend, Immediate(0));
1217 __ j(less, &negative, Label::kNear);
1218 __ sarl(dividend, Immediate(power));
1219 if (divisor < 0) __ negl(dividend);
1220 __ jmp(&done, Label::kNear);
1224 __ sarl(dividend, Immediate(power));
1225 if (divisor > 0) __ negl(dividend);
1227 return; // Don't fall through to "__ neg" below.
1229 // Deoptimize if remainder is not 0.
1230 __ testl(dividend, Immediate(test_value));
1231 DeoptimizeIf(not_zero, instr->environment());
1232 __ sarl(dividend, Immediate(power));
1236 if (divisor < 0) __ negl(dividend);
1241 LOperand* right = instr->right();
1242 ASSERT(ToRegister(instr->result()).is(rax));
1243 ASSERT(ToRegister(instr->left()).is(rax));
1244 ASSERT(!ToRegister(instr->right()).is(rax));
1245 ASSERT(!ToRegister(instr->right()).is(rdx));
1247 Register left_reg = rax;
1250 Register right_reg = ToRegister(right);
1251 if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1252 __ testl(right_reg, right_reg);
1253 DeoptimizeIf(zero, instr->environment());
1256 // Check for (0 / -x) that will produce negative zero.
1257 if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1258 Label left_not_zero;
1259 __ testl(left_reg, left_reg);
1260 __ j(not_zero, &left_not_zero, Label::kNear);
1261 __ testl(right_reg, right_reg);
1262 DeoptimizeIf(sign, instr->environment());
1263 __ bind(&left_not_zero);
1266 // Check for (kMinInt / -1).
1267 if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
1268 Label left_not_min_int;
1269 __ cmpl(left_reg, Immediate(kMinInt));
1270 __ j(not_zero, &left_not_min_int, Label::kNear);
1271 __ cmpl(right_reg, Immediate(-1));
1272 DeoptimizeIf(zero, instr->environment());
1273 __ bind(&left_not_min_int);
1276 // Sign extend to rdx.
1278 __ idivl(right_reg);
1280 if (instr->is_flooring()) {
1283 __ j(zero, &done, Label::kNear);
1284 __ xorl(rdx, right_reg);
1285 __ sarl(rdx, Immediate(31));
1288 } else if (!instr->hydrogen()->CheckFlag(
1289 HInstruction::kAllUsesTruncatingToInt32)) {
1290 // Deoptimize if remainder is not 0.
1292 DeoptimizeIf(not_zero, instr->environment());
1297 void LCodeGen::DoMulI(LMulI* instr) {
1298 Register left = ToRegister(instr->left());
1299 LOperand* right = instr->right();
1301 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1302 if (instr->hydrogen_value()->representation().IsSmi()) {
1303 __ movp(kScratchRegister, left);
1305 __ movl(kScratchRegister, left);
1310 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1311 if (right->IsConstantOperand()) {
1312 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1313 if (right_value == -1) {
1315 } else if (right_value == 0) {
1316 __ xorl(left, left);
1317 } else if (right_value == 2) {
1318 __ addl(left, left);
1319 } else if (!can_overflow) {
1320 // If the multiplication is known to not overflow, we
1321 // can use operations that don't set the overflow flag
1323 switch (right_value) {
1328 __ leal(left, Operand(left, left, times_2, 0));
1331 __ shll(left, Immediate(2));
1334 __ leal(left, Operand(left, left, times_4, 0));
1337 __ shll(left, Immediate(3));
1340 __ leal(left, Operand(left, left, times_8, 0));
1343 __ shll(left, Immediate(4));
1346 __ imull(left, left, Immediate(right_value));
1350 __ imull(left, left, Immediate(right_value));
1352 } else if (right->IsStackSlot()) {
1353 if (instr->hydrogen_value()->representation().IsSmi()) {
1354 __ SmiToInteger64(left, left);
1355 __ imul(left, ToOperand(right));
1357 __ imull(left, ToOperand(right));
1360 if (instr->hydrogen_value()->representation().IsSmi()) {
1361 __ SmiToInteger64(left, left);
1362 __ imul(left, ToRegister(right));
1364 __ imull(left, ToRegister(right));
1369 DeoptimizeIf(overflow, instr->environment());
1372 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1373 // Bail out if the result is supposed to be negative zero.
1375 if (instr->hydrogen_value()->representation().IsSmi()) {
1376 __ testq(left, left);
1378 __ testl(left, left);
1380 __ j(not_zero, &done, Label::kNear);
1381 if (right->IsConstantOperand()) {
1382 // Constant can't be represented as Smi due to immediate size limit.
1383 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1384 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1385 DeoptimizeIf(no_condition, instr->environment());
1386 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1387 __ cmpl(kScratchRegister, Immediate(0));
1388 DeoptimizeIf(less, instr->environment());
1390 } else if (right->IsStackSlot()) {
1391 if (instr->hydrogen_value()->representation().IsSmi()) {
1392 __ or_(kScratchRegister, ToOperand(right));
1394 __ orl(kScratchRegister, ToOperand(right));
1396 DeoptimizeIf(sign, instr->environment());
1398 // Test the non-zero operand for negative sign.
1399 if (instr->hydrogen_value()->representation().IsSmi()) {
1400 __ or_(kScratchRegister, ToRegister(right));
1402 __ orl(kScratchRegister, ToRegister(right));
1404 DeoptimizeIf(sign, instr->environment());
1411 void LCodeGen::DoBitI(LBitI* instr) {
1412 LOperand* left = instr->left();
1413 LOperand* right = instr->right();
1414 ASSERT(left->Equals(instr->result()));
1415 ASSERT(left->IsRegister());
1417 if (right->IsConstantOperand()) {
1418 int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
1419 switch (instr->op()) {
1420 case Token::BIT_AND:
1421 __ andl(ToRegister(left), Immediate(right_operand));
1424 __ orl(ToRegister(left), Immediate(right_operand));
1426 case Token::BIT_XOR:
1427 if (right_operand == int32_t(~0)) {
1428 __ notl(ToRegister(left));
1430 __ xorl(ToRegister(left), Immediate(right_operand));
1437 } else if (right->IsStackSlot()) {
1438 switch (instr->op()) {
1439 case Token::BIT_AND:
1440 __ and_(ToRegister(left), ToOperand(right));
1443 __ or_(ToRegister(left), ToOperand(right));
1445 case Token::BIT_XOR:
1446 __ xor_(ToRegister(left), ToOperand(right));
1453 ASSERT(right->IsRegister());
1454 switch (instr->op()) {
1455 case Token::BIT_AND:
1456 __ and_(ToRegister(left), ToRegister(right));
1459 __ or_(ToRegister(left), ToRegister(right));
1461 case Token::BIT_XOR:
1462 __ xor_(ToRegister(left), ToRegister(right));
1472 void LCodeGen::DoShiftI(LShiftI* instr) {
1473 LOperand* left = instr->left();
1474 LOperand* right = instr->right();
1475 ASSERT(left->Equals(instr->result()));
1476 ASSERT(left->IsRegister());
1477 if (right->IsRegister()) {
1478 ASSERT(ToRegister(right).is(rcx));
1480 switch (instr->op()) {
1482 __ rorl_cl(ToRegister(left));
1485 __ sarl_cl(ToRegister(left));
1488 __ shrl_cl(ToRegister(left));
1489 if (instr->can_deopt()) {
1490 __ testl(ToRegister(left), ToRegister(left));
1491 DeoptimizeIf(negative, instr->environment());
1495 __ shll_cl(ToRegister(left));
1502 int32_t value = ToInteger32(LConstantOperand::cast(right));
1503 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1504 switch (instr->op()) {
1506 if (shift_count != 0) {
1507 __ rorl(ToRegister(left), Immediate(shift_count));
1511 if (shift_count != 0) {
1512 __ sarl(ToRegister(left), Immediate(shift_count));
1516 if (shift_count == 0 && instr->can_deopt()) {
1517 __ testl(ToRegister(left), ToRegister(left));
1518 DeoptimizeIf(negative, instr->environment());
1520 __ shrl(ToRegister(left), Immediate(shift_count));
1524 if (shift_count != 0) {
1525 if (instr->hydrogen_value()->representation().IsSmi()) {
1526 __ shl(ToRegister(left), Immediate(shift_count));
1528 __ shll(ToRegister(left), Immediate(shift_count));
1540 void LCodeGen::DoSubI(LSubI* instr) {
1541 LOperand* left = instr->left();
1542 LOperand* right = instr->right();
1543 ASSERT(left->Equals(instr->result()));
1545 if (right->IsConstantOperand()) {
1546 __ subl(ToRegister(left),
1547 Immediate(ToInteger32(LConstantOperand::cast(right))));
1548 } else if (right->IsRegister()) {
1549 if (instr->hydrogen_value()->representation().IsSmi()) {
1550 __ subq(ToRegister(left), ToRegister(right));
1552 __ subl(ToRegister(left), ToRegister(right));
1555 if (instr->hydrogen_value()->representation().IsSmi()) {
1556 __ subq(ToRegister(left), ToOperand(right));
1558 __ subl(ToRegister(left), ToOperand(right));
1562 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1563 DeoptimizeIf(overflow, instr->environment());
1568 void LCodeGen::DoConstantI(LConstantI* instr) {
1569 __ Set(ToRegister(instr->result()), instr->value());
1573 void LCodeGen::DoConstantS(LConstantS* instr) {
1574 __ Move(ToRegister(instr->result()), instr->value());
1578 void LCodeGen::DoConstantD(LConstantD* instr) {
1579 ASSERT(instr->result()->IsDoubleRegister());
1580 XMMRegister res = ToDoubleRegister(instr->result());
1581 double v = instr->value();
1582 uint64_t int_val = BitCast<uint64_t, double>(v);
1583 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1584 // do so if the constant is -0.0.
1588 Register tmp = ToRegister(instr->temp());
1589 __ Set(tmp, int_val);
1595 void LCodeGen::DoConstantE(LConstantE* instr) {
1596 __ LoadAddress(ToRegister(instr->result()), instr->value());
1600 void LCodeGen::DoConstantT(LConstantT* instr) {
1601 Handle<Object> value = instr->value(isolate());
1602 __ Move(ToRegister(instr->result()), value);
1606 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1607 Register result = ToRegister(instr->result());
1608 Register map = ToRegister(instr->value());
1609 __ EnumLength(result, map);
1613 void LCodeGen::DoDateField(LDateField* instr) {
1614 Register object = ToRegister(instr->date());
1615 Register result = ToRegister(instr->result());
1616 Smi* index = instr->index();
1617 Label runtime, done, not_date_object;
1618 ASSERT(object.is(result));
1619 ASSERT(object.is(rax));
1621 Condition cc = masm()->CheckSmi(object);
1622 DeoptimizeIf(cc, instr->environment());
1623 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1624 DeoptimizeIf(not_equal, instr->environment());
1626 if (index->value() == 0) {
1627 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1629 if (index->value() < JSDate::kFirstUncachedField) {
1630 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1631 Operand stamp_operand = __ ExternalOperand(stamp);
1632 __ movp(kScratchRegister, stamp_operand);
1633 __ cmpq(kScratchRegister, FieldOperand(object,
1634 JSDate::kCacheStampOffset));
1635 __ j(not_equal, &runtime, Label::kNear);
1636 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1637 kPointerSize * index->value()));
1638 __ jmp(&done, Label::kNear);
1641 __ PrepareCallCFunction(2);
1642 __ movp(arg_reg_1, object);
1643 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1644 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1650 Operand LCodeGen::BuildSeqStringOperand(Register string,
1652 String::Encoding encoding) {
1653 if (index->IsConstantOperand()) {
1654 int offset = ToInteger32(LConstantOperand::cast(index));
1655 if (encoding == String::TWO_BYTE_ENCODING) {
1656 offset *= kUC16Size;
1658 STATIC_ASSERT(kCharSize == 1);
1659 return FieldOperand(string, SeqString::kHeaderSize + offset);
1661 return FieldOperand(
1662 string, ToRegister(index),
1663 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1664 SeqString::kHeaderSize);
1668 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1669 String::Encoding encoding = instr->hydrogen()->encoding();
1670 Register result = ToRegister(instr->result());
1671 Register string = ToRegister(instr->string());
1673 if (FLAG_debug_code) {
1675 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1676 __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
1678 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1679 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1680 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1681 __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1682 ? one_byte_seq_type : two_byte_seq_type));
1683 __ Check(equal, kUnexpectedStringType);
1687 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1688 if (encoding == String::ONE_BYTE_ENCODING) {
1689 __ movzxbl(result, operand);
1691 __ movzxwl(result, operand);
1696 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1697 String::Encoding encoding = instr->hydrogen()->encoding();
1698 Register string = ToRegister(instr->string());
1700 if (FLAG_debug_code) {
1701 Register value = ToRegister(instr->value());
1702 Register index = ToRegister(instr->index());
1703 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1704 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1706 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1707 ? one_byte_seq_type : two_byte_seq_type;
1708 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1711 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1712 if (instr->value()->IsConstantOperand()) {
1713 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1714 ASSERT_LE(0, value);
1715 if (encoding == String::ONE_BYTE_ENCODING) {
1716 ASSERT_LE(value, String::kMaxOneByteCharCode);
1717 __ movb(operand, Immediate(value));
1719 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
1720 __ movw(operand, Immediate(value));
1723 Register value = ToRegister(instr->value());
1724 if (encoding == String::ONE_BYTE_ENCODING) {
1725 __ movb(operand, value);
1727 __ movw(operand, value);
1733 void LCodeGen::DoAddI(LAddI* instr) {
1734 LOperand* left = instr->left();
1735 LOperand* right = instr->right();
1737 Representation target_rep = instr->hydrogen()->representation();
1738 bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
1740 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1741 if (right->IsConstantOperand()) {
1742 int32_t offset = ToInteger32(LConstantOperand::cast(right));
1744 __ lea(ToRegister(instr->result()),
1745 MemOperand(ToRegister(left), offset));
1747 __ leal(ToRegister(instr->result()),
1748 MemOperand(ToRegister(left), offset));
1751 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1753 __ lea(ToRegister(instr->result()), address);
1755 __ leal(ToRegister(instr->result()), address);
1759 if (right->IsConstantOperand()) {
1761 __ addq(ToRegister(left),
1762 Immediate(ToInteger32(LConstantOperand::cast(right))));
1764 __ addl(ToRegister(left),
1765 Immediate(ToInteger32(LConstantOperand::cast(right))));
1767 } else if (right->IsRegister()) {
1769 __ addq(ToRegister(left), ToRegister(right));
1771 __ addl(ToRegister(left), ToRegister(right));
1775 __ addq(ToRegister(left), ToOperand(right));
1777 __ addl(ToRegister(left), ToOperand(right));
1780 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1781 DeoptimizeIf(overflow, instr->environment());
1787 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1788 LOperand* left = instr->left();
1789 LOperand* right = instr->right();
1790 ASSERT(left->Equals(instr->result()));
1791 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1792 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1794 Condition condition = (operation == HMathMinMax::kMathMin)
1797 Register left_reg = ToRegister(left);
1798 if (right->IsConstantOperand()) {
1799 Immediate right_imm =
1800 Immediate(ToInteger32(LConstantOperand::cast(right)));
1801 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1802 __ cmpl(left_reg, right_imm);
1803 __ j(condition, &return_left, Label::kNear);
1804 __ movp(left_reg, right_imm);
1805 } else if (right->IsRegister()) {
1806 Register right_reg = ToRegister(right);
1807 if (instr->hydrogen_value()->representation().IsSmi()) {
1808 __ cmpq(left_reg, right_reg);
1810 __ cmpl(left_reg, right_reg);
1812 __ j(condition, &return_left, Label::kNear);
1813 __ movp(left_reg, right_reg);
1815 Operand right_op = ToOperand(right);
1816 if (instr->hydrogen_value()->representation().IsSmi()) {
1817 __ cmpq(left_reg, right_op);
1819 __ cmpl(left_reg, right_op);
1821 __ j(condition, &return_left, Label::kNear);
1822 __ movp(left_reg, right_op);
1824 __ bind(&return_left);
1826 ASSERT(instr->hydrogen()->representation().IsDouble());
1827 Label check_nan_left, check_zero, return_left, return_right;
1828 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1829 XMMRegister left_reg = ToDoubleRegister(left);
1830 XMMRegister right_reg = ToDoubleRegister(right);
1831 __ ucomisd(left_reg, right_reg);
1832 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1833 __ j(equal, &check_zero, Label::kNear); // left == right.
1834 __ j(condition, &return_left, Label::kNear);
1835 __ jmp(&return_right, Label::kNear);
1837 __ bind(&check_zero);
1838 XMMRegister xmm_scratch = double_scratch0();
1839 __ xorps(xmm_scratch, xmm_scratch);
1840 __ ucomisd(left_reg, xmm_scratch);
1841 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1842 // At this point, both left and right are either 0 or -0.
1843 if (operation == HMathMinMax::kMathMin) {
1844 __ orps(left_reg, right_reg);
1846 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1847 __ addsd(left_reg, right_reg);
1849 __ jmp(&return_left, Label::kNear);
1851 __ bind(&check_nan_left);
1852 __ ucomisd(left_reg, left_reg); // NaN check.
1853 __ j(parity_even, &return_left, Label::kNear);
1854 __ bind(&return_right);
1855 __ movaps(left_reg, right_reg);
1857 __ bind(&return_left);
1862 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1863 XMMRegister left = ToDoubleRegister(instr->left());
1864 XMMRegister right = ToDoubleRegister(instr->right());
1865 XMMRegister result = ToDoubleRegister(instr->result());
1866 // All operations except MOD are computed in-place.
1867 ASSERT(instr->op() == Token::MOD || left.is(result));
1868 switch (instr->op()) {
1870 __ addsd(left, right);
1873 __ subsd(left, right);
1876 __ mulsd(left, right);
1879 __ divsd(left, right);
1880 // Don't delete this mov. It may improve performance on some CPUs,
1881 // when there is a mulsd depending on the result
1882 __ movaps(left, left);
1885 XMMRegister xmm_scratch = double_scratch0();
1886 __ PrepareCallCFunction(2);
1887 __ movaps(xmm_scratch, left);
1888 ASSERT(right.is(xmm1));
1890 ExternalReference::mod_two_doubles_operation(isolate()), 2);
1891 __ movaps(result, xmm_scratch);
1901 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1902 ASSERT(ToRegister(instr->context()).is(rsi));
1903 ASSERT(ToRegister(instr->left()).is(rdx));
1904 ASSERT(ToRegister(instr->right()).is(rax));
1905 ASSERT(ToRegister(instr->result()).is(rax));
1907 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1908 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1909 __ nop(); // Signals no inlined code.
1913 template<class InstrType>
1914 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1915 int left_block = instr->TrueDestination(chunk_);
1916 int right_block = instr->FalseDestination(chunk_);
1918 int next_block = GetNextEmittedBlock();
1920 if (right_block == left_block || cc == no_condition) {
1921 EmitGoto(left_block);
1922 } else if (left_block == next_block) {
1923 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1924 } else if (right_block == next_block) {
1925 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1927 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1929 __ jmp(chunk_->GetAssemblyLabel(right_block));
1935 template<class InstrType>
1936 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
1937 int false_block = instr->FalseDestination(chunk_);
1938 __ j(cc, chunk_->GetAssemblyLabel(false_block));
1942 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1947 void LCodeGen::DoBranch(LBranch* instr) {
1948 Representation r = instr->hydrogen()->value()->representation();
1949 if (r.IsInteger32()) {
1950 ASSERT(!info()->IsStub());
1951 Register reg = ToRegister(instr->value());
1953 EmitBranch(instr, not_zero);
1954 } else if (r.IsSmi()) {
1955 ASSERT(!info()->IsStub());
1956 Register reg = ToRegister(instr->value());
1958 EmitBranch(instr, not_zero);
1959 } else if (r.IsDouble()) {
1960 ASSERT(!info()->IsStub());
1961 XMMRegister reg = ToDoubleRegister(instr->value());
1962 XMMRegister xmm_scratch = double_scratch0();
1963 __ xorps(xmm_scratch, xmm_scratch);
1964 __ ucomisd(reg, xmm_scratch);
1965 EmitBranch(instr, not_equal);
1967 ASSERT(r.IsTagged());
1968 Register reg = ToRegister(instr->value());
1969 HType type = instr->hydrogen()->value()->type();
1970 if (type.IsBoolean()) {
1971 ASSERT(!info()->IsStub());
1972 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1973 EmitBranch(instr, equal);
1974 } else if (type.IsSmi()) {
1975 ASSERT(!info()->IsStub());
1976 __ SmiCompare(reg, Smi::FromInt(0));
1977 EmitBranch(instr, not_equal);
1978 } else if (type.IsJSArray()) {
1979 ASSERT(!info()->IsStub());
1980 EmitBranch(instr, no_condition);
1981 } else if (type.IsHeapNumber()) {
1982 ASSERT(!info()->IsStub());
1983 XMMRegister xmm_scratch = double_scratch0();
1984 __ xorps(xmm_scratch, xmm_scratch);
1985 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
1986 EmitBranch(instr, not_equal);
1987 } else if (type.IsString()) {
1988 ASSERT(!info()->IsStub());
1989 __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1990 EmitBranch(instr, not_equal);
1992 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1993 // Avoid deopts in the case where we've never executed this path before.
1994 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1996 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1997 // undefined -> false.
1998 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1999 __ j(equal, instr->FalseLabel(chunk_));
2001 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2003 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2004 __ j(equal, instr->TrueLabel(chunk_));
2006 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2007 __ j(equal, instr->FalseLabel(chunk_));
2009 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2011 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2012 __ j(equal, instr->FalseLabel(chunk_));
2015 if (expected.Contains(ToBooleanStub::SMI)) {
2016 // Smis: 0 -> false, all other -> true.
2017 __ Cmp(reg, Smi::FromInt(0));
2018 __ j(equal, instr->FalseLabel(chunk_));
2019 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2020 } else if (expected.NeedsMap()) {
2021 // If we need a map later and have a Smi -> deopt.
2022 __ testb(reg, Immediate(kSmiTagMask));
2023 DeoptimizeIf(zero, instr->environment());
2026 const Register map = kScratchRegister;
2027 if (expected.NeedsMap()) {
2028 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2030 if (expected.CanBeUndetectable()) {
2031 // Undetectable -> false.
2032 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2033 Immediate(1 << Map::kIsUndetectable));
2034 __ j(not_zero, instr->FalseLabel(chunk_));
2038 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2039 // spec object -> true.
2040 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2041 __ j(above_equal, instr->TrueLabel(chunk_));
2044 if (expected.Contains(ToBooleanStub::STRING)) {
2045 // String value -> false iff empty.
2047 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2048 __ j(above_equal, ¬_string, Label::kNear);
2049 __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2050 __ j(not_zero, instr->TrueLabel(chunk_));
2051 __ jmp(instr->FalseLabel(chunk_));
2052 __ bind(¬_string);
2055 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2056 // Symbol value -> true.
2057 __ CmpInstanceType(map, SYMBOL_TYPE);
2058 __ j(equal, instr->TrueLabel(chunk_));
2061 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2062 // heap number -> false iff +0, -0, or NaN.
2063 Label not_heap_number;
2064 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2065 __ j(not_equal, ¬_heap_number, Label::kNear);
2066 XMMRegister xmm_scratch = double_scratch0();
2067 __ xorps(xmm_scratch, xmm_scratch);
2068 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2069 __ j(zero, instr->FalseLabel(chunk_));
2070 __ jmp(instr->TrueLabel(chunk_));
2071 __ bind(¬_heap_number);
2074 if (!expected.IsGeneric()) {
2075 // We've seen something for the first time -> deopt.
2076 // This can only happen if we are not generic already.
2077 DeoptimizeIf(no_condition, instr->environment());
2084 void LCodeGen::EmitGoto(int block) {
2085 if (!IsNextEmittedBlock(block)) {
2086 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2091 void LCodeGen::DoGoto(LGoto* instr) {
2092 EmitGoto(instr->block_id());
2096 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2097 Condition cond = no_condition;
2100 case Token::EQ_STRICT:
2104 case Token::NE_STRICT:
2108 cond = is_unsigned ? below : less;
2111 cond = is_unsigned ? above : greater;
2114 cond = is_unsigned ? below_equal : less_equal;
2117 cond = is_unsigned ? above_equal : greater_equal;
2120 case Token::INSTANCEOF:
2128 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2129 LOperand* left = instr->left();
2130 LOperand* right = instr->right();
2131 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2133 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2134 // We can statically evaluate the comparison.
2135 double left_val = ToDouble(LConstantOperand::cast(left));
2136 double right_val = ToDouble(LConstantOperand::cast(right));
2137 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2138 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2139 EmitGoto(next_block);
2141 if (instr->is_double()) {
2142 // Don't base result on EFLAGS when a NaN is involved. Instead
2143 // jump to the false block.
2144 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2145 __ j(parity_even, instr->FalseLabel(chunk_));
2148 if (right->IsConstantOperand()) {
2149 value = ToInteger32(LConstantOperand::cast(right));
2150 if (instr->hydrogen_value()->representation().IsSmi()) {
2151 __ Cmp(ToRegister(left), Smi::FromInt(value));
2153 __ cmpl(ToRegister(left), Immediate(value));
2155 } else if (left->IsConstantOperand()) {
2156 value = ToInteger32(LConstantOperand::cast(left));
2157 if (instr->hydrogen_value()->representation().IsSmi()) {
2158 if (right->IsRegister()) {
2159 __ Cmp(ToRegister(right), Smi::FromInt(value));
2161 __ Cmp(ToOperand(right), Smi::FromInt(value));
2163 } else if (right->IsRegister()) {
2164 __ cmpl(ToRegister(right), Immediate(value));
2166 __ cmpl(ToOperand(right), Immediate(value));
2168 // We transposed the operands. Reverse the condition.
2169 cc = ReverseCondition(cc);
2170 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2171 if (right->IsRegister()) {
2172 __ cmpq(ToRegister(left), ToRegister(right));
2174 __ cmpq(ToRegister(left), ToOperand(right));
2177 if (right->IsRegister()) {
2178 __ cmpl(ToRegister(left), ToRegister(right));
2180 __ cmpl(ToRegister(left), ToOperand(right));
2184 EmitBranch(instr, cc);
2189 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2190 Register left = ToRegister(instr->left());
2192 if (instr->right()->IsConstantOperand()) {
2193 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2194 __ Cmp(left, right);
2196 Register right = ToRegister(instr->right());
2197 __ cmpq(left, right);
2199 EmitBranch(instr, equal);
2203 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2204 if (instr->hydrogen()->representation().IsTagged()) {
2205 Register input_reg = ToRegister(instr->object());
2206 __ Cmp(input_reg, factory()->the_hole_value());
2207 EmitBranch(instr, equal);
2211 XMMRegister input_reg = ToDoubleRegister(instr->object());
2212 __ ucomisd(input_reg, input_reg);
2213 EmitFalseBranch(instr, parity_odd);
2215 __ subq(rsp, Immediate(kDoubleSize));
2216 __ movsd(MemOperand(rsp, 0), input_reg);
2217 __ addq(rsp, Immediate(kDoubleSize));
2219 int offset = sizeof(kHoleNanUpper32);
2220 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2221 EmitBranch(instr, equal);
2225 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2226 Representation rep = instr->hydrogen()->value()->representation();
2227 ASSERT(!rep.IsInteger32());
2229 if (rep.IsDouble()) {
2230 XMMRegister value = ToDoubleRegister(instr->value());
2231 XMMRegister xmm_scratch = double_scratch0();
2232 __ xorps(xmm_scratch, xmm_scratch);
2233 __ ucomisd(xmm_scratch, value);
2234 EmitFalseBranch(instr, not_equal);
2235 __ movmskpd(kScratchRegister, value);
2236 __ testl(kScratchRegister, Immediate(1));
2237 EmitBranch(instr, not_zero);
2239 Register value = ToRegister(instr->value());
2240 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2241 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2242 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2243 Immediate(0x80000000));
2244 EmitFalseBranch(instr, not_equal);
2245 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2246 Immediate(0x00000000));
2247 EmitBranch(instr, equal);
2252 Condition LCodeGen::EmitIsObject(Register input,
2253 Label* is_not_object,
2255 ASSERT(!input.is(kScratchRegister));
2257 __ JumpIfSmi(input, is_not_object);
2259 __ CompareRoot(input, Heap::kNullValueRootIndex);
2260 __ j(equal, is_object);
2262 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2263 // Undetectable objects behave like undefined.
2264 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2265 Immediate(1 << Map::kIsUndetectable));
2266 __ j(not_zero, is_not_object);
2268 __ movzxbl(kScratchRegister,
2269 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2270 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2271 __ j(below, is_not_object);
2272 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2277 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2278 Register reg = ToRegister(instr->value());
2280 Condition true_cond = EmitIsObject(
2281 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2283 EmitBranch(instr, true_cond);
2287 Condition LCodeGen::EmitIsString(Register input,
2289 Label* is_not_string,
2290 SmiCheck check_needed = INLINE_SMI_CHECK) {
2291 if (check_needed == INLINE_SMI_CHECK) {
2292 __ JumpIfSmi(input, is_not_string);
2295 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2301 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2302 Register reg = ToRegister(instr->value());
2303 Register temp = ToRegister(instr->temp());
2305 SmiCheck check_needed =
2306 instr->hydrogen()->value()->IsHeapObject()
2307 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2309 Condition true_cond = EmitIsString(
2310 reg, temp, instr->FalseLabel(chunk_), check_needed);
2312 EmitBranch(instr, true_cond);
2316 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2318 if (instr->value()->IsRegister()) {
2319 Register input = ToRegister(instr->value());
2320 is_smi = masm()->CheckSmi(input);
2322 Operand input = ToOperand(instr->value());
2323 is_smi = masm()->CheckSmi(input);
2325 EmitBranch(instr, is_smi);
2329 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2330 Register input = ToRegister(instr->value());
2331 Register temp = ToRegister(instr->temp());
2333 if (!instr->hydrogen()->value()->IsHeapObject()) {
2334 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2336 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2337 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2338 Immediate(1 << Map::kIsUndetectable));
2339 EmitBranch(instr, not_zero);
2343 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2344 ASSERT(ToRegister(instr->context()).is(rsi));
2345 Token::Value op = instr->op();
2347 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2348 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2350 Condition condition = TokenToCondition(op, false);
2353 EmitBranch(instr, condition);
2357 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2358 InstanceType from = instr->from();
2359 InstanceType to = instr->to();
2360 if (from == FIRST_TYPE) return to;
2361 ASSERT(from == to || to == LAST_TYPE);
2366 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2367 InstanceType from = instr->from();
2368 InstanceType to = instr->to();
2369 if (from == to) return equal;
2370 if (to == LAST_TYPE) return above_equal;
2371 if (from == FIRST_TYPE) return below_equal;
2377 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2378 Register input = ToRegister(instr->value());
2380 if (!instr->hydrogen()->value()->IsHeapObject()) {
2381 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2384 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2385 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2389 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2390 Register input = ToRegister(instr->value());
2391 Register result = ToRegister(instr->result());
2393 __ AssertString(input);
2395 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2396 ASSERT(String::kHashShift >= kSmiTagSize);
2397 __ IndexFromHash(result, result);
2401 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2402 LHasCachedArrayIndexAndBranch* instr) {
2403 Register input = ToRegister(instr->value());
2405 __ testl(FieldOperand(input, String::kHashFieldOffset),
2406 Immediate(String::kContainsCachedArrayIndexMask));
2407 EmitBranch(instr, equal);
2411 // Branches to a label or falls through with the answer in the z flag.
2412 // Trashes the temp register.
2413 void LCodeGen::EmitClassOfTest(Label* is_true,
2415 Handle<String> class_name,
2419 ASSERT(!input.is(temp));
2420 ASSERT(!input.is(temp2));
2421 ASSERT(!temp.is(temp2));
2423 __ JumpIfSmi(input, is_false);
2425 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2426 // Assuming the following assertions, we can use the same compares to test
2427 // for both being a function type and being in the object type range.
2428 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2429 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2430 FIRST_SPEC_OBJECT_TYPE + 1);
2431 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2432 LAST_SPEC_OBJECT_TYPE - 1);
2433 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2434 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2435 __ j(below, is_false);
2436 __ j(equal, is_true);
2437 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2438 __ j(equal, is_true);
2440 // Faster code path to avoid two compares: subtract lower bound from the
2441 // actual type and do a signed compare with the width of the type range.
2442 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2443 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2444 __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2445 __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2446 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2447 __ j(above, is_false);
2450 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2451 // Check if the constructor in the map is a function.
2452 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2454 // Objects with a non-function constructor have class 'Object'.
2455 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2456 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2457 __ j(not_equal, is_true);
2459 __ j(not_equal, is_false);
2462 // temp now contains the constructor function. Grab the
2463 // instance class name from there.
2464 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2465 __ movp(temp, FieldOperand(temp,
2466 SharedFunctionInfo::kInstanceClassNameOffset));
2467 // The class name we are testing against is internalized since it's a literal.
2468 // The name in the constructor is internalized because of the way the context
2469 // is booted. This routine isn't expected to work for random API-created
2470 // classes and it doesn't have to because you can't access it with natives
2471 // syntax. Since both sides are internalized it is sufficient to use an
2472 // identity comparison.
2473 ASSERT(class_name->IsInternalizedString());
2474 __ Cmp(temp, class_name);
2475 // End with the answer in the z flag.
2479 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2480 Register input = ToRegister(instr->value());
2481 Register temp = ToRegister(instr->temp());
2482 Register temp2 = ToRegister(instr->temp2());
2483 Handle<String> class_name = instr->hydrogen()->class_name();
2485 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2486 class_name, input, temp, temp2);
2488 EmitBranch(instr, equal);
2492 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2493 Register reg = ToRegister(instr->value());
2495 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2496 EmitBranch(instr, equal);
2500 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2501 ASSERT(ToRegister(instr->context()).is(rsi));
2502 InstanceofStub stub(InstanceofStub::kNoFlags);
2503 __ push(ToRegister(instr->left()));
2504 __ push(ToRegister(instr->right()));
2505 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2506 Label true_value, done;
2508 __ j(zero, &true_value, Label::kNear);
2509 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2510 __ jmp(&done, Label::kNear);
2511 __ bind(&true_value);
2512 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2517 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2518 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2520 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2521 LInstanceOfKnownGlobal* instr)
2522 : LDeferredCode(codegen), instr_(instr) { }
2523 virtual void Generate() V8_OVERRIDE {
2524 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2526 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2527 Label* map_check() { return &map_check_; }
2529 LInstanceOfKnownGlobal* instr_;
2533 ASSERT(ToRegister(instr->context()).is(rsi));
2534 DeferredInstanceOfKnownGlobal* deferred;
2535 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2537 Label done, false_result;
2538 Register object = ToRegister(instr->value());
2540 // A Smi is not an instance of anything.
2541 __ JumpIfSmi(object, &false_result, Label::kNear);
2543 // This is the inlined call site instanceof cache. The two occurences of the
2544 // hole value will be patched to the last map/result pair generated by the
2547 // Use a temp register to avoid memory operands with variable lengths.
2548 Register map = ToRegister(instr->temp());
2549 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2550 __ bind(deferred->map_check()); // Label for calculating code patching.
2551 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2552 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2553 __ cmpq(map, Operand(kScratchRegister, 0));
2554 __ j(not_equal, &cache_miss, Label::kNear);
2555 // Patched to load either true or false.
2556 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2558 // Check that the code size between patch label and patch sites is invariant.
2559 Label end_of_patched_code;
2560 __ bind(&end_of_patched_code);
2563 __ jmp(&done, Label::kNear);
2565 // The inlined call site cache did not match. Check for null and string
2566 // before calling the deferred code.
2567 __ bind(&cache_miss); // Null is not an instance of anything.
2568 __ CompareRoot(object, Heap::kNullValueRootIndex);
2569 __ j(equal, &false_result, Label::kNear);
2571 // String values are not instances of anything.
2572 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2574 __ bind(&false_result);
2575 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2577 __ bind(deferred->exit());
2582 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2585 PushSafepointRegistersScope scope(this);
2586 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2587 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2588 InstanceofStub stub(flags);
2590 __ push(ToRegister(instr->value()));
2591 __ Push(instr->function());
2593 static const int kAdditionalDelta = 10;
2595 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2597 __ push_imm32(delta);
2599 // We are pushing three values on the stack but recording a
2600 // safepoint with two arguments because stub is going to
2601 // remove the third argument from the stack before jumping
2602 // to instanceof builtin on the slow path.
2603 CallCodeGeneric(stub.GetCode(isolate()),
2604 RelocInfo::CODE_TARGET,
2606 RECORD_SAFEPOINT_WITH_REGISTERS,
2608 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2609 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2610 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2611 // Move result to a register that survives the end of the
2612 // PushSafepointRegisterScope.
2613 __ movp(kScratchRegister, rax);
2615 __ testq(kScratchRegister, kScratchRegister);
2618 __ j(not_zero, &load_false, Label::kNear);
2619 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2620 __ jmp(&done, Label::kNear);
2621 __ bind(&load_false);
2622 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2627 void LCodeGen::DoCmpT(LCmpT* instr) {
2628 ASSERT(ToRegister(instr->context()).is(rsi));
2629 Token::Value op = instr->op();
2631 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2632 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2634 Condition condition = TokenToCondition(op, false);
2635 Label true_value, done;
2637 __ j(condition, &true_value, Label::kNear);
2638 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2639 __ jmp(&done, Label::kNear);
2640 __ bind(&true_value);
2641 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2646 void LCodeGen::DoReturn(LReturn* instr) {
2647 if (FLAG_trace && info()->IsOptimizing()) {
2648 // Preserve the return value on the stack and rely on the runtime call
2649 // to return the value in the same register. We're leaving the code
2650 // managed by the register allocator and tearing down the frame, it's
2651 // safe to write to the context register.
2653 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2654 __ CallRuntime(Runtime::kTraceExit, 1);
2656 if (info()->saves_caller_doubles()) {
2657 RestoreCallerDoubles();
2659 int no_frame_start = -1;
2660 if (NeedsEagerFrame()) {
2663 no_frame_start = masm_->pc_offset();
2665 if (instr->has_constant_parameter_count()) {
2666 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2669 Register reg = ToRegister(instr->parameter_count());
2670 // The argument count parameter is a smi
2671 __ SmiToInteger32(reg, reg);
2672 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2673 __ PopReturnAddressTo(return_addr_reg);
2674 __ shl(reg, Immediate(kPointerSizeLog2));
2676 __ jmp(return_addr_reg);
2678 if (no_frame_start != -1) {
2679 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2684 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2685 Register result = ToRegister(instr->result());
2686 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2687 if (instr->hydrogen()->RequiresHoleCheck()) {
2688 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2689 DeoptimizeIf(equal, instr->environment());
2694 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2695 ASSERT(ToRegister(instr->context()).is(rsi));
2696 ASSERT(ToRegister(instr->global_object()).is(rax));
2697 ASSERT(ToRegister(instr->result()).is(rax));
2699 __ Move(rcx, instr->name());
2700 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2701 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2702 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2706 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2707 Register value = ToRegister(instr->value());
2708 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2710 // If the cell we are storing to contains the hole it could have
2711 // been deleted from the property dictionary. In that case, we need
2712 // to update the property details in the property dictionary to mark
2713 // it as no longer deleted. We deoptimize in that case.
2714 if (instr->hydrogen()->RequiresHoleCheck()) {
2715 // We have a temp because CompareRoot might clobber kScratchRegister.
2716 Register cell = ToRegister(instr->temp());
2717 ASSERT(!value.is(cell));
2718 __ Move(cell, cell_handle, RelocInfo::CELL);
2719 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2720 DeoptimizeIf(equal, instr->environment());
2722 __ movp(Operand(cell, 0), value);
2725 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2726 __ movp(Operand(kScratchRegister, 0), value);
2728 // Cells are always rescanned, so no write barrier here.
2732 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2733 Register context = ToRegister(instr->context());
2734 Register result = ToRegister(instr->result());
2735 __ movp(result, ContextOperand(context, instr->slot_index()));
2736 if (instr->hydrogen()->RequiresHoleCheck()) {
2737 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2738 if (instr->hydrogen()->DeoptimizesOnHole()) {
2739 DeoptimizeIf(equal, instr->environment());
2742 __ j(not_equal, &is_not_hole, Label::kNear);
2743 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2744 __ bind(&is_not_hole);
2750 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2751 Register context = ToRegister(instr->context());
2752 Register value = ToRegister(instr->value());
2754 Operand target = ContextOperand(context, instr->slot_index());
2756 Label skip_assignment;
2757 if (instr->hydrogen()->RequiresHoleCheck()) {
2758 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2759 if (instr->hydrogen()->DeoptimizesOnHole()) {
2760 DeoptimizeIf(equal, instr->environment());
2762 __ j(not_equal, &skip_assignment);
2765 __ movp(target, value);
2767 if (instr->hydrogen()->NeedsWriteBarrier()) {
2768 SmiCheck check_needed =
2769 instr->hydrogen()->value()->IsHeapObject()
2770 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2771 int offset = Context::SlotOffset(instr->slot_index());
2772 Register scratch = ToRegister(instr->temp());
2773 __ RecordWriteContextSlot(context,
2778 EMIT_REMEMBERED_SET,
2782 __ bind(&skip_assignment);
2786 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2787 HObjectAccess access = instr->hydrogen()->access();
2788 int offset = access.offset();
2790 if (access.IsExternalMemory()) {
2791 Register result = ToRegister(instr->result());
2792 if (instr->object()->IsConstantOperand()) {
2793 ASSERT(result.is(rax));
2794 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2796 Register object = ToRegister(instr->object());
2797 __ Load(result, MemOperand(object, offset), access.representation());
2802 Register object = ToRegister(instr->object());
2803 if (instr->hydrogen()->representation().IsDouble()) {
2804 XMMRegister result = ToDoubleRegister(instr->result());
2805 __ movsd(result, FieldOperand(object, offset));
2809 Register result = ToRegister(instr->result());
2810 if (!access.IsInobject()) {
2811 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2815 Representation representation = access.representation();
2816 if (representation.IsSmi() &&
2817 instr->hydrogen()->representation().IsInteger32()) {
2818 // Read int value directly from upper half of the smi.
2819 STATIC_ASSERT(kSmiTag == 0);
2820 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2821 offset += kPointerSize / 2;
2822 representation = Representation::Integer32();
2824 __ Load(result, FieldOperand(object, offset), representation);
2828 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2829 ASSERT(ToRegister(instr->context()).is(rsi));
2830 ASSERT(ToRegister(instr->object()).is(rax));
2831 ASSERT(ToRegister(instr->result()).is(rax));
2833 __ Move(rcx, instr->name());
2834 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2835 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2839 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2840 Register function = ToRegister(instr->function());
2841 Register result = ToRegister(instr->result());
2843 // Check that the function really is a function.
2844 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2845 DeoptimizeIf(not_equal, instr->environment());
2847 // Check whether the function has an instance prototype.
2849 __ testb(FieldOperand(result, Map::kBitFieldOffset),
2850 Immediate(1 << Map::kHasNonInstancePrototype));
2851 __ j(not_zero, &non_instance, Label::kNear);
2853 // Get the prototype or initial map from the function.
2855 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2857 // Check that the function has a prototype or an initial map.
2858 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2859 DeoptimizeIf(equal, instr->environment());
2861 // If the function does not have an initial map, we're done.
2863 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2864 __ j(not_equal, &done, Label::kNear);
2866 // Get the prototype from the initial map.
2867 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2868 __ jmp(&done, Label::kNear);
2870 // Non-instance prototype: Fetch prototype from constructor field
2871 // in the function's map.
2872 __ bind(&non_instance);
2873 __ movp(result, FieldOperand(result, Map::kConstructorOffset));
2880 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2881 Register result = ToRegister(instr->result());
2882 __ LoadRoot(result, instr->index());
2886 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2887 Register arguments = ToRegister(instr->arguments());
2888 Register result = ToRegister(instr->result());
2890 if (instr->length()->IsConstantOperand() &&
2891 instr->index()->IsConstantOperand()) {
2892 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2893 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2894 StackArgumentsAccessor args(arguments, const_length,
2895 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2896 __ movp(result, args.GetArgumentOperand(const_index));
2898 Register length = ToRegister(instr->length());
2899 // There are two words between the frame pointer and the last argument.
2900 // Subtracting from length accounts for one of them add one more.
2901 if (instr->index()->IsRegister()) {
2902 __ subl(length, ToRegister(instr->index()));
2904 __ subl(length, ToOperand(instr->index()));
2906 StackArgumentsAccessor args(arguments, length,
2907 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2908 __ movp(result, args.GetArgumentOperand(0));
2913 void LCodeGen::HandleExternalArrayOpRequiresPreScale(
2915 ElementsKind elements_kind) {
2916 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
2917 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
2918 static_cast<int>(maximal_scale_factor);
2919 ASSERT(pre_shift_size > 0);
2920 __ shl(ToRegister(key), Immediate(pre_shift_size));
2925 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2926 ElementsKind elements_kind = instr->elements_kind();
2927 LOperand* key = instr->key();
2928 if (!key->IsConstantOperand()) {
2929 Register key_reg = ToRegister(key);
2930 // Even though the HLoad/StoreKeyed (in this case) instructions force
2931 // the input representation for the key to be an integer, the input
2932 // gets replaced during bound check elimination with the index argument
2933 // to the bounds check, which can be tagged, so that case must be
2934 // handled here, too.
2935 if (instr->hydrogen()->IsDehoisted()) {
2936 // Sign extend key because it could be a 32 bit negative value
2937 // and the dehoisted address computation happens in 64 bits
2938 __ movsxlq(key_reg, key_reg);
2941 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
2943 int base_offset = instr->is_fixed_typed_array()
2944 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
2946 Operand operand(BuildFastArrayOperand(
2951 instr->additional_index()));
2953 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
2954 elements_kind == FLOAT32_ELEMENTS) {
2955 XMMRegister result(ToDoubleRegister(instr->result()));
2956 __ movss(result, operand);
2957 __ cvtss2sd(result, result);
2958 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
2959 elements_kind == FLOAT64_ELEMENTS) {
2960 __ movsd(ToDoubleRegister(instr->result()), operand);
2961 } else if (IsSIMD128ElementsKind(elements_kind)) {
2962 __ movups(ToSIMD128Register(instr->result()), operand);
2964 Register result(ToRegister(instr->result()));
2965 switch (elements_kind) {
2966 case EXTERNAL_INT8_ELEMENTS:
2968 __ movsxbq(result, operand);
2970 case EXTERNAL_UINT8_ELEMENTS:
2971 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
2972 case UINT8_ELEMENTS:
2973 case UINT8_CLAMPED_ELEMENTS:
2974 __ movzxbq(result, operand);
2976 case EXTERNAL_INT16_ELEMENTS:
2977 case INT16_ELEMENTS:
2978 __ movsxwq(result, operand);
2980 case EXTERNAL_UINT16_ELEMENTS:
2981 case UINT16_ELEMENTS:
2982 __ movzxwq(result, operand);
2984 case EXTERNAL_INT32_ELEMENTS:
2985 case INT32_ELEMENTS:
2986 __ movsxlq(result, operand);
2988 case EXTERNAL_UINT32_ELEMENTS:
2989 case UINT32_ELEMENTS:
2990 __ movl(result, operand);
2991 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2992 __ testl(result, result);
2993 DeoptimizeIf(negative, instr->environment());
2996 case EXTERNAL_FLOAT32_ELEMENTS:
2997 case EXTERNAL_FLOAT64_ELEMENTS:
2998 case EXTERNAL_FLOAT32x4_ELEMENTS:
2999 case EXTERNAL_INT32x4_ELEMENTS:
3000 case FLOAT32_ELEMENTS:
3001 case FLOAT64_ELEMENTS:
3002 case FLOAT32x4_ELEMENTS:
3003 case INT32x4_ELEMENTS:
3005 case FAST_SMI_ELEMENTS:
3006 case FAST_DOUBLE_ELEMENTS:
3007 case FAST_HOLEY_ELEMENTS:
3008 case FAST_HOLEY_SMI_ELEMENTS:
3009 case FAST_HOLEY_DOUBLE_ELEMENTS:
3010 case DICTIONARY_ELEMENTS:
3011 case NON_STRICT_ARGUMENTS_ELEMENTS:
3019 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3020 XMMRegister result(ToDoubleRegister(instr->result()));
3021 LOperand* key = instr->key();
3022 if (!key->IsConstantOperand()) {
3023 Register key_reg = ToRegister(key);
3024 // Even though the HLoad/StoreKeyed instructions force the input
3025 // representation for the key to be an integer, the input gets replaced
3026 // during bound check elimination with the index argument to the bounds
3027 // check, which can be tagged, so that case must be handled here, too.
3028 if (instr->hydrogen()->IsDehoisted()) {
3029 // Sign extend key because it could be a 32 bit negative value
3030 // and the dehoisted address computation happens in 64 bits
3031 __ movsxlq(key_reg, key_reg);
3035 if (instr->hydrogen()->RequiresHoleCheck()) {
3036 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3037 sizeof(kHoleNanLower32);
3038 Operand hole_check_operand = BuildFastArrayOperand(
3041 FAST_DOUBLE_ELEMENTS,
3043 instr->additional_index());
3044 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3045 DeoptimizeIf(equal, instr->environment());
3048 Operand double_load_operand = BuildFastArrayOperand(
3051 FAST_DOUBLE_ELEMENTS,
3052 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3053 instr->additional_index());
3054 __ movsd(result, double_load_operand);
3058 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3059 HLoadKeyed* hinstr = instr->hydrogen();
3060 Register result = ToRegister(instr->result());
3061 LOperand* key = instr->key();
3062 if (!key->IsConstantOperand()) {
3063 Register key_reg = ToRegister(key);
3064 // Even though the HLoad/StoreKeyedFastElement instructions force
3065 // the input representation for the key to be an integer, the input
3066 // gets replaced during bound check elimination with the index
3067 // argument to the bounds check, which can be tagged, so that
3068 // case must be handled here, too.
3069 if (hinstr->IsDehoisted()) {
3070 // Sign extend key because it could be a 32 bit negative value
3071 // and the dehoisted address computation happens in 64 bits
3072 __ movsxlq(key_reg, key_reg);
3076 bool requires_hole_check = hinstr->RequiresHoleCheck();
3077 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3078 Representation representation = hinstr->representation();
3080 if (representation.IsInteger32() &&
3081 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3082 ASSERT(!requires_hole_check);
3083 // Read int value directly from upper half of the smi.
3084 STATIC_ASSERT(kSmiTag == 0);
3085 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3086 offset += kPointerSize / 2;
3090 BuildFastArrayOperand(instr->elements(),
3094 instr->additional_index()),
3097 // Check for the hole value.
3098 if (requires_hole_check) {
3099 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3100 Condition smi = __ CheckSmi(result);
3101 DeoptimizeIf(NegateCondition(smi), instr->environment());
3103 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3104 DeoptimizeIf(equal, instr->environment());
3110 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3111 if (instr->is_typed_elements()) {
3112 DoLoadKeyedExternalArray(instr);
3113 } else if (instr->hydrogen()->representation().IsDouble()) {
3114 DoLoadKeyedFixedDoubleArray(instr);
3116 DoLoadKeyedFixedArray(instr);
3121 Operand LCodeGen::BuildFastArrayOperand(
3122 LOperand* elements_pointer,
3124 ElementsKind elements_kind,
3126 uint32_t additional_index) {
3127 Register elements_pointer_reg = ToRegister(elements_pointer);
3128 int shift_size = ElementsKindToShiftSize(elements_kind);
3129 if (key->IsConstantOperand()) {
3130 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3131 if (constant_value & 0xF0000000) {
3132 Abort(kArrayIndexConstantValueTooBig);
3135 return Operand(elements_pointer_reg,
3136 ((constant_value + additional_index) << shift_size)
3139 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3140 // Make sure the key is pre-scaled against maximal_scale_factor.
3141 shift_size = static_cast<int>(maximal_scale_factor);
3143 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3144 return Operand(elements_pointer_reg,
3147 offset + (additional_index << shift_size));
3152 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3153 ASSERT(ToRegister(instr->context()).is(rsi));
3154 ASSERT(ToRegister(instr->object()).is(rdx));
3155 ASSERT(ToRegister(instr->key()).is(rax));
3157 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3158 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3162 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3163 Register result = ToRegister(instr->result());
3165 if (instr->hydrogen()->from_inlined()) {
3166 __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3168 // Check for arguments adapter frame.
3169 Label done, adapted;
3170 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3171 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3172 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3173 __ j(equal, &adapted, Label::kNear);
3175 // No arguments adaptor frame.
3176 __ movp(result, rbp);
3177 __ jmp(&done, Label::kNear);
3179 // Arguments adaptor frame present.
3181 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3183 // Result is the frame pointer for the frame if not adapted and for the real
3184 // frame below the adaptor frame if adapted.
3190 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3191 Register result = ToRegister(instr->result());
3195 // If no arguments adaptor frame the number of arguments is fixed.
3196 if (instr->elements()->IsRegister()) {
3197 __ cmpq(rbp, ToRegister(instr->elements()));
3199 __ cmpq(rbp, ToOperand(instr->elements()));
3201 __ movl(result, Immediate(scope()->num_parameters()));
3202 __ j(equal, &done, Label::kNear);
3204 // Arguments adaptor frame present. Get argument length from there.
3205 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3206 __ SmiToInteger32(result,
3208 ArgumentsAdaptorFrameConstants::kLengthOffset));
3210 // Argument length is in result register.
3215 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3216 Register receiver = ToRegister(instr->receiver());
3217 Register function = ToRegister(instr->function());
3219 // If the receiver is null or undefined, we have to pass the global
3220 // object as a receiver to normal functions. Values have to be
3221 // passed unchanged to builtins and strict-mode functions.
3222 Label global_object, receiver_ok;
3223 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3225 if (!instr->hydrogen()->known_function()) {
3226 // Do not transform the receiver to object for strict mode
3228 __ movp(kScratchRegister,
3229 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3230 __ testb(FieldOperand(kScratchRegister,
3231 SharedFunctionInfo::kStrictModeByteOffset),
3232 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3233 __ j(not_equal, &receiver_ok, dist);
3235 // Do not transform the receiver to object for builtins.
3236 __ testb(FieldOperand(kScratchRegister,
3237 SharedFunctionInfo::kNativeByteOffset),
3238 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3239 __ j(not_equal, &receiver_ok, dist);
3242 // Normal function. Replace undefined or null with global receiver.
3243 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3244 __ j(equal, &global_object, Label::kNear);
3245 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3246 __ j(equal, &global_object, Label::kNear);
3248 // The receiver should be a JS object.
3249 Condition is_smi = __ CheckSmi(receiver);
3250 DeoptimizeIf(is_smi, instr->environment());
3251 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3252 DeoptimizeIf(below, instr->environment());
3254 __ jmp(&receiver_ok, Label::kNear);
3255 __ bind(&global_object);
3256 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3259 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3261 FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
3263 __ bind(&receiver_ok);
3267 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3268 Register receiver = ToRegister(instr->receiver());
3269 Register function = ToRegister(instr->function());
3270 Register length = ToRegister(instr->length());
3271 Register elements = ToRegister(instr->elements());
3272 ASSERT(receiver.is(rax)); // Used for parameter count.
3273 ASSERT(function.is(rdi)); // Required by InvokeFunction.
3274 ASSERT(ToRegister(instr->result()).is(rax));
3276 // Copy the arguments to this function possibly from the
3277 // adaptor frame below it.
3278 const uint32_t kArgumentsLimit = 1 * KB;
3279 __ cmpq(length, Immediate(kArgumentsLimit));
3280 DeoptimizeIf(above, instr->environment());
3283 __ movp(receiver, length);
3285 // Loop through the arguments pushing them onto the execution
3288 // length is a small non-negative integer, due to the test above.
3289 __ testl(length, length);
3290 __ j(zero, &invoke, Label::kNear);
3292 StackArgumentsAccessor args(elements, length,
3293 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3294 __ push(args.GetArgumentOperand(0));
3296 __ j(not_zero, &loop);
3298 // Invoke the function.
3300 ASSERT(instr->HasPointerMap());
3301 LPointerMap* pointers = instr->pointer_map();
3302 SafepointGenerator safepoint_generator(
3303 this, pointers, Safepoint::kLazyDeopt);
3304 ParameterCount actual(rax);
3305 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3309 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3310 LOperand* argument = instr->value();
3311 EmitPushTaggedOperand(argument);
3315 void LCodeGen::DoDrop(LDrop* instr) {
3316 __ Drop(instr->count());
3320 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3321 Register result = ToRegister(instr->result());
3322 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3326 void LCodeGen::DoContext(LContext* instr) {
3327 Register result = ToRegister(instr->result());
3328 if (info()->IsOptimizing()) {
3329 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3331 // If there is no frame, the context must be in rsi.
3332 ASSERT(result.is(rsi));
3337 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3338 ASSERT(ToRegister(instr->context()).is(rsi));
3339 __ push(rsi); // The context is the first argument.
3340 __ Push(instr->hydrogen()->pairs());
3341 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3342 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3346 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3347 int formal_parameter_count,
3349 LInstruction* instr,
3350 RDIState rdi_state) {
3351 bool dont_adapt_arguments =
3352 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3353 bool can_invoke_directly =
3354 dont_adapt_arguments || formal_parameter_count == arity;
3356 LPointerMap* pointers = instr->pointer_map();
3358 if (can_invoke_directly) {
3359 if (rdi_state == RDI_UNINITIALIZED) {
3360 __ Move(rdi, function);
3364 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3366 // Set rax to arguments count if adaption is not needed. Assumes that rax
3367 // is available to write to at this point.
3368 if (dont_adapt_arguments) {
3373 if (function.is_identical_to(info()->closure())) {
3376 __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3379 // Set up deoptimization.
3380 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3382 // We need to adapt arguments.
3383 SafepointGenerator generator(
3384 this, pointers, Safepoint::kLazyDeopt);
3385 ParameterCount count(arity);
3386 ParameterCount expected(formal_parameter_count);
3387 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3392 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3393 ASSERT(ToRegister(instr->result()).is(rax));
3395 LPointerMap* pointers = instr->pointer_map();
3396 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3398 if (instr->target()->IsConstantOperand()) {
3399 LConstantOperand* target = LConstantOperand::cast(instr->target());
3400 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3401 generator.BeforeCall(__ CallSize(code));
3402 __ call(code, RelocInfo::CODE_TARGET);
3404 ASSERT(instr->target()->IsRegister());
3405 Register target = ToRegister(instr->target());
3406 generator.BeforeCall(__ CallSize(target));
3407 __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3410 generator.AfterCall();
3414 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3415 ASSERT(ToRegister(instr->function()).is(rdi));
3416 ASSERT(ToRegister(instr->result()).is(rax));
3418 if (instr->hydrogen()->pass_argument_count()) {
3419 __ Set(rax, instr->arity());
3423 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3425 LPointerMap* pointers = instr->pointer_map();
3426 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3428 bool is_self_call = false;
3429 if (instr->hydrogen()->function()->IsConstant()) {
3430 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3431 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3432 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3433 is_self_call = jsfun.is_identical_to(info()->closure());
3439 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3440 generator.BeforeCall(__ CallSize(target));
3443 generator.AfterCall();
3447 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3448 Register input_reg = ToRegister(instr->value());
3449 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3450 Heap::kHeapNumberMapRootIndex);
3451 DeoptimizeIf(not_equal, instr->environment());
3453 Label slow, allocated, done;
3454 Register tmp = input_reg.is(rax) ? rcx : rax;
3455 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3457 // Preserve the value of all registers.
3458 PushSafepointRegistersScope scope(this);
3460 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3461 // Check the sign of the argument. If the argument is positive, just
3462 // return it. We do not need to patch the stack since |input| and
3463 // |result| are the same register and |input| will be restored
3464 // unchanged by popping safepoint registers.
3465 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3468 __ AllocateHeapNumber(tmp, tmp2, &slow);
3469 __ jmp(&allocated, Label::kNear);
3471 // Slow case: Call the runtime system to do the number allocation.
3473 CallRuntimeFromDeferred(
3474 Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3475 // Set the pointer to the new heap number in tmp.
3476 if (!tmp.is(rax)) __ movp(tmp, rax);
3477 // Restore input_reg after call to runtime.
3478 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3480 __ bind(&allocated);
3481 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3482 __ shl(tmp2, Immediate(1));
3483 __ shr(tmp2, Immediate(1));
3484 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3485 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3491 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3492 Register input_reg = ToRegister(instr->value());
3493 __ testl(input_reg, input_reg);
3495 __ j(not_sign, &is_positive, Label::kNear);
3496 __ negl(input_reg); // Sets flags.
3497 DeoptimizeIf(negative, instr->environment());
3498 __ bind(&is_positive);
3502 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3503 Register input_reg = ToRegister(instr->value());
3504 __ testq(input_reg, input_reg);
3506 __ j(not_sign, &is_positive, Label::kNear);
3507 __ neg(input_reg); // Sets flags.
3508 DeoptimizeIf(negative, instr->environment());
3509 __ bind(&is_positive);
3513 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3514 // Class for deferred case.
3515 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3517 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3518 : LDeferredCode(codegen), instr_(instr) { }
3519 virtual void Generate() V8_OVERRIDE {
3520 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3522 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3527 ASSERT(instr->value()->Equals(instr->result()));
3528 Representation r = instr->hydrogen()->value()->representation();
3531 XMMRegister scratch = double_scratch0();
3532 XMMRegister input_reg = ToDoubleRegister(instr->value());
3533 __ xorps(scratch, scratch);
3534 __ subsd(scratch, input_reg);
3535 __ andps(input_reg, scratch);
3536 } else if (r.IsInteger32()) {
3537 EmitIntegerMathAbs(instr);
3538 } else if (r.IsSmi()) {
3539 EmitSmiMathAbs(instr);
3540 } else { // Tagged case.
3541 DeferredMathAbsTaggedHeapNumber* deferred =
3542 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3543 Register input_reg = ToRegister(instr->value());
3545 __ JumpIfNotSmi(input_reg, deferred->entry());
3546 EmitSmiMathAbs(instr);
3547 __ bind(deferred->exit());
3552 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3553 XMMRegister xmm_scratch = double_scratch0();
3554 Register output_reg = ToRegister(instr->result());
3555 XMMRegister input_reg = ToDoubleRegister(instr->value());
3557 if (CpuFeatures::IsSupported(SSE4_1)) {
3558 CpuFeatureScope scope(masm(), SSE4_1);
3559 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3560 // Deoptimize if minus zero.
3561 __ movq(output_reg, input_reg);
3562 __ subq(output_reg, Immediate(1));
3563 DeoptimizeIf(overflow, instr->environment());
3565 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3566 __ cvttsd2si(output_reg, xmm_scratch);
3567 __ cmpl(output_reg, Immediate(0x80000000));
3568 DeoptimizeIf(equal, instr->environment());
3570 Label negative_sign, done;
3571 // Deoptimize on unordered.
3572 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3573 __ ucomisd(input_reg, xmm_scratch);
3574 DeoptimizeIf(parity_even, instr->environment());
3575 __ j(below, &negative_sign, Label::kNear);
3577 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3578 // Check for negative zero.
3579 Label positive_sign;
3580 __ j(above, &positive_sign, Label::kNear);
3581 __ movmskpd(output_reg, input_reg);
3582 __ testq(output_reg, Immediate(1));
3583 DeoptimizeIf(not_zero, instr->environment());
3584 __ Set(output_reg, 0);
3585 __ jmp(&done, Label::kNear);
3586 __ bind(&positive_sign);
3589 // Use truncating instruction (OK because input is positive).
3590 __ cvttsd2si(output_reg, input_reg);
3591 // Overflow is signalled with minint.
3592 __ cmpl(output_reg, Immediate(0x80000000));
3593 DeoptimizeIf(equal, instr->environment());
3594 __ jmp(&done, Label::kNear);
3596 // Non-zero negative reaches here.
3597 __ bind(&negative_sign);
3598 // Truncate, then compare and compensate.
3599 __ cvttsd2si(output_reg, input_reg);
3600 __ Cvtlsi2sd(xmm_scratch, output_reg);
3601 __ ucomisd(input_reg, xmm_scratch);
3602 __ j(equal, &done, Label::kNear);
3603 __ subl(output_reg, Immediate(1));
3604 DeoptimizeIf(overflow, instr->environment());
3611 void LCodeGen::DoMathRound(LMathRound* instr) {
3612 const XMMRegister xmm_scratch = double_scratch0();
3613 Register output_reg = ToRegister(instr->result());
3614 XMMRegister input_reg = ToDoubleRegister(instr->value());
3615 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3616 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3618 Label done, round_to_zero, below_one_half, do_not_compensate, restore;
3619 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3620 __ movq(kScratchRegister, one_half);
3621 __ movq(xmm_scratch, kScratchRegister);
3622 __ ucomisd(xmm_scratch, input_reg);
3623 __ j(above, &below_one_half, Label::kNear);
3625 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3626 __ addsd(xmm_scratch, input_reg);
3627 __ cvttsd2si(output_reg, xmm_scratch);
3628 // Overflow is signalled with minint.
3629 __ cmpl(output_reg, Immediate(0x80000000));
3630 __ RecordComment("D2I conversion overflow");
3631 DeoptimizeIf(equal, instr->environment());
3632 __ jmp(&done, dist);
3634 __ bind(&below_one_half);
3635 __ movq(kScratchRegister, minus_one_half);
3636 __ movq(xmm_scratch, kScratchRegister);
3637 __ ucomisd(xmm_scratch, input_reg);
3638 __ j(below_equal, &round_to_zero, Label::kNear);
3640 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3641 // compare and compensate.
3642 __ movq(kScratchRegister, input_reg); // Back up input_reg.
3643 __ subsd(input_reg, xmm_scratch);
3644 __ cvttsd2si(output_reg, input_reg);
3645 // Catch minint due to overflow, and to prevent overflow when compensating.
3646 __ cmpl(output_reg, Immediate(0x80000000));
3647 __ RecordComment("D2I conversion overflow");
3648 DeoptimizeIf(equal, instr->environment());
3650 __ Cvtlsi2sd(xmm_scratch, output_reg);
3651 __ ucomisd(input_reg, xmm_scratch);
3652 __ j(equal, &restore, Label::kNear);
3653 __ subl(output_reg, Immediate(1));
3654 // No overflow because we already ruled out minint.
3656 __ movq(input_reg, kScratchRegister); // Restore input_reg.
3657 __ jmp(&done, dist);
3659 __ bind(&round_to_zero);
3660 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3661 // we can ignore the difference between a result of -0 and +0.
3662 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3663 __ movq(output_reg, input_reg);
3664 __ testq(output_reg, output_reg);
3665 __ RecordComment("Minus zero");
3666 DeoptimizeIf(negative, instr->environment());
3668 __ Set(output_reg, 0);
3673 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3674 XMMRegister input_reg = ToDoubleRegister(instr->value());
3675 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3676 __ sqrtsd(input_reg, input_reg);
3680 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3681 XMMRegister xmm_scratch = double_scratch0();
3682 XMMRegister input_reg = ToDoubleRegister(instr->value());
3683 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3685 // Note that according to ECMA-262 15.8.2.13:
3686 // Math.pow(-Infinity, 0.5) == Infinity
3687 // Math.sqrt(-Infinity) == NaN
3689 // Check base for -Infinity. According to IEEE-754, double-precision
3690 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3691 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3692 __ movq(xmm_scratch, kScratchRegister);
3693 __ ucomisd(xmm_scratch, input_reg);
3694 // Comparing -Infinity with NaN results in "unordered", which sets the
3695 // zero flag as if both were equal. However, it also sets the carry flag.
3696 __ j(not_equal, &sqrt, Label::kNear);
3697 __ j(carry, &sqrt, Label::kNear);
3698 // If input is -Infinity, return Infinity.
3699 __ xorps(input_reg, input_reg);
3700 __ subsd(input_reg, xmm_scratch);
3701 __ jmp(&done, Label::kNear);
3705 __ xorps(xmm_scratch, xmm_scratch);
3706 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3707 __ sqrtsd(input_reg, input_reg);
3712 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
3713 switch (instr->op()) {
3714 case kFloat32x4Zero: {
3715 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3716 __ xorps(result_reg, result_reg);
3719 case kInt32x4Zero: {
3720 XMMRegister result_reg = ToInt32x4Register(instr->result());
3721 __ xorps(result_reg, result_reg);
3731 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
3733 switch (instr->op()) {
3734 case kSIMD128Change: {
3735 Comment(";;; deoptimize: can not perform representation change"
3736 "for float32x4 or int32x4");
3737 DeoptimizeIf(no_condition, instr->environment());
3742 case kFloat32x4Reciprocal:
3743 case kFloat32x4ReciprocalSqrt:
3744 case kFloat32x4Sqrt: {
3745 ASSERT(instr->value()->Equals(instr->result()));
3746 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3747 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3748 switch (instr->op()) {
3750 __ absps(input_reg);
3753 __ negateps(input_reg);
3755 case kFloat32x4Reciprocal:
3756 __ rcpps(input_reg, input_reg);
3758 case kFloat32x4ReciprocalSqrt:
3759 __ rsqrtps(input_reg, input_reg);
3761 case kFloat32x4Sqrt:
3762 __ sqrtps(input_reg, input_reg);
3772 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3773 XMMRegister input_reg = ToInt32x4Register(instr->value());
3774 switch (instr->op()) {
3776 __ notps(input_reg);
3779 __ pnegd(input_reg);
3787 case kFloat32x4BitsToInt32x4:
3788 case kFloat32x4ToInt32x4: {
3789 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3790 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3791 XMMRegister result_reg = ToInt32x4Register(instr->result());
3792 if (instr->op() == kFloat32x4BitsToInt32x4) {
3793 if (!result_reg.is(input_reg)) {
3794 __ movaps(result_reg, input_reg);
3797 ASSERT(instr->op() == kFloat32x4ToInt32x4);
3798 __ cvtps2dq(result_reg, input_reg);
3802 case kInt32x4BitsToFloat32x4:
3803 case kInt32x4ToFloat32x4: {
3804 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3805 XMMRegister input_reg = ToInt32x4Register(instr->value());
3806 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3807 if (instr->op() == kInt32x4BitsToFloat32x4) {
3808 if (!result_reg.is(input_reg)) {
3809 __ movaps(result_reg, input_reg);
3812 ASSERT(instr->op() == kInt32x4ToFloat32x4);
3813 __ cvtdq2ps(result_reg, input_reg);
3817 case kFloat32x4Splat: {
3818 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
3819 XMMRegister input_reg = ToDoubleRegister(instr->value());
3820 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3821 XMMRegister xmm_scratch = xmm0;
3822 __ xorps(xmm_scratch, xmm_scratch);
3823 __ cvtsd2ss(xmm_scratch, input_reg);
3824 __ shufps(xmm_scratch, xmm_scratch, 0x0);
3825 __ movaps(result_reg, xmm_scratch);
3828 case kInt32x4Splat: {
3829 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3830 Register input_reg = ToRegister(instr->value());
3831 XMMRegister result_reg = ToInt32x4Register(instr->result());
3832 __ movd(result_reg, input_reg);
3833 __ shufps(result_reg, result_reg, 0x0);
3836 case kInt32x4GetSignMask: {
3837 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3838 XMMRegister input_reg = ToInt32x4Register(instr->value());
3839 Register result = ToRegister(instr->result());
3840 __ movmskps(result, input_reg);
3843 case kFloat32x4GetSignMask: {
3844 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3845 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3846 Register result = ToRegister(instr->result());
3847 __ movmskps(result, input_reg);
3850 case kFloat32x4GetW:
3852 case kFloat32x4GetZ:
3854 case kFloat32x4GetY:
3856 case kFloat32x4GetX: {
3857 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3858 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3859 XMMRegister result = ToDoubleRegister(instr->result());
3860 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
3862 if (select == 0x0) {
3863 __ xorps(xmm_scratch, xmm_scratch);
3864 __ cvtss2sd(xmm_scratch, input_reg);
3865 if (!xmm_scratch.is(result)) {
3866 __ movaps(result, xmm_scratch);
3869 __ pshufd(xmm_scratch, input_reg, select);
3870 if (!xmm_scratch.is(result)) {
3871 __ xorps(result, result);
3873 __ cvtss2sd(result, xmm_scratch);
3881 case kInt32x4GetFlagX:
3882 case kInt32x4GetFlagY:
3883 case kInt32x4GetFlagZ:
3884 case kInt32x4GetFlagW: {
3885 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3887 switch (instr->op()) {
3888 case kInt32x4GetFlagX:
3892 case kInt32x4GetFlagY:
3897 case kInt32x4GetFlagZ:
3902 case kInt32x4GetFlagW:
3911 XMMRegister input_reg = ToInt32x4Register(instr->value());
3912 Register result = ToRegister(instr->result());
3913 if (select == 0x0) {
3914 __ movd(result, input_reg);
3916 if (CpuFeatures::IsSupported(SSE4_1)) {
3917 CpuFeatureScope scope(masm(), SSE4_1);
3918 __ extractps(result, input_reg, select);
3920 XMMRegister xmm_scratch = xmm0;
3921 __ pshufd(xmm_scratch, input_reg, select);
3922 __ movd(result, xmm_scratch);
3927 Label false_value, done;
3928 __ testl(result, result);
3929 __ j(zero, &false_value, Label::kNear);
3930 __ LoadRoot(result, Heap::kTrueValueRootIndex);
3931 __ jmp(&done, Label::kNear);
3932 __ bind(&false_value);
3933 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3945 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
3946 uint8_t imm8 = 0; // for with operation
3947 switch (instr->op()) {
3953 case kFloat32x4Max: {
3954 ASSERT(instr->left()->Equals(instr->result()));
3955 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
3956 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
3957 XMMRegister left_reg = ToFloat32x4Register(instr->left());
3958 XMMRegister right_reg = ToFloat32x4Register(instr->right());
3959 switch (instr->op()) {
3961 __ addps(left_reg, right_reg);
3964 __ subps(left_reg, right_reg);
3967 __ mulps(left_reg, right_reg);
3970 __ divps(left_reg, right_reg);
3973 __ minps(left_reg, right_reg);
3976 __ maxps(left_reg, right_reg);
3984 case kFloat32x4Scale: {
3985 ASSERT(instr->left()->Equals(instr->result()));
3986 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
3987 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
3988 XMMRegister left_reg = ToFloat32x4Register(instr->left());
3989 XMMRegister right_reg = ToDoubleRegister(instr->right());
3990 XMMRegister scratch_reg = xmm0;
3991 __ xorps(scratch_reg, scratch_reg);
3992 __ cvtsd2ss(scratch_reg, right_reg);
3993 __ shufps(scratch_reg, scratch_reg, 0x0);
3994 __ mulps(left_reg, scratch_reg);
3997 case kFloat32x4Shuffle: {
3998 ASSERT(instr->left()->Equals(instr->result()));
3999 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4000 if (instr->hydrogen()->right()->IsConstant() &&
4001 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4002 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4003 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4004 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4005 __ shufps(left_reg, left_reg, select);
4008 Comment(";;; deoptimize: non-constant selector for shuffle");
4009 DeoptimizeIf(no_condition, instr->environment());
4013 case kInt32x4Shuffle: {
4014 ASSERT(instr->left()->Equals(instr->result()));
4015 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4016 if (instr->hydrogen()->right()->IsConstant() &&
4017 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4018 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4019 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4020 XMMRegister left_reg = ToInt32x4Register(instr->left());
4021 __ pshufd(left_reg, left_reg, select);
4024 Comment(";;; deoptimize: non-constant selector for shuffle");
4025 DeoptimizeIf(no_condition, instr->environment());
4029 case kInt32x4ShiftLeft:
4030 case kInt32x4ShiftRight:
4031 case kInt32x4ShiftRightArithmetic: {
4032 ASSERT(instr->left()->Equals(instr->result()));
4033 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4034 if (instr->hydrogen()->right()->IsConstant() &&
4035 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4036 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4037 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
4038 XMMRegister left_reg = ToInt32x4Register(instr->left());
4039 switch (instr->op()) {
4040 case kInt32x4ShiftLeft:
4041 __ pslld(left_reg, shift);
4043 case kInt32x4ShiftRight:
4044 __ psrld(left_reg, shift);
4046 case kInt32x4ShiftRightArithmetic:
4047 __ psrad(left_reg, shift);
4054 XMMRegister left_reg = ToInt32x4Register(instr->left());
4055 Register shift = ToRegister(instr->right());
4056 XMMRegister xmm_scratch = double_scratch0();
4057 __ movd(xmm_scratch, shift);
4058 switch (instr->op()) {
4059 case kInt32x4ShiftLeft:
4060 __ pslld(left_reg, xmm_scratch);
4062 case kInt32x4ShiftRight:
4063 __ psrld(left_reg, xmm_scratch);
4065 case kInt32x4ShiftRightArithmetic:
4066 __ psrad(left_reg, xmm_scratch);
4074 case kFloat32x4LessThan:
4075 case kFloat32x4LessThanOrEqual:
4076 case kFloat32x4Equal:
4077 case kFloat32x4NotEqual:
4078 case kFloat32x4GreaterThanOrEqual:
4079 case kFloat32x4GreaterThan: {
4080 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4081 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4082 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4083 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4084 XMMRegister result_reg = ToInt32x4Register(instr->result());
4085 switch (instr->op()) {
4086 case kFloat32x4LessThan:
4087 if (result_reg.is(left_reg)) {
4088 __ cmpltps(result_reg, right_reg);
4089 } else if (result_reg.is(right_reg)) {
4090 __ cmpnltps(result_reg, left_reg);
4092 __ movaps(result_reg, left_reg);
4093 __ cmpltps(result_reg, right_reg);
4096 case kFloat32x4LessThanOrEqual:
4097 if (result_reg.is(left_reg)) {
4098 __ cmpleps(result_reg, right_reg);
4099 } else if (result_reg.is(right_reg)) {
4100 __ cmpnleps(result_reg, left_reg);
4102 __ movaps(result_reg, left_reg);
4103 __ cmpleps(result_reg, right_reg);
4106 case kFloat32x4Equal:
4107 if (result_reg.is(left_reg)) {
4108 __ cmpeqps(result_reg, right_reg);
4109 } else if (result_reg.is(right_reg)) {
4110 __ cmpeqps(result_reg, left_reg);
4112 __ movaps(result_reg, left_reg);
4113 __ cmpeqps(result_reg, right_reg);
4116 case kFloat32x4NotEqual:
4117 if (result_reg.is(left_reg)) {
4118 __ cmpneqps(result_reg, right_reg);
4119 } else if (result_reg.is(right_reg)) {
4120 __ cmpneqps(result_reg, left_reg);
4122 __ movaps(result_reg, left_reg);
4123 __ cmpneqps(result_reg, right_reg);
4126 case kFloat32x4GreaterThanOrEqual:
4127 if (result_reg.is(left_reg)) {
4128 __ cmpnltps(result_reg, right_reg);
4129 } else if (result_reg.is(right_reg)) {
4130 __ cmpltps(result_reg, left_reg);
4132 __ movaps(result_reg, left_reg);
4133 __ cmpnltps(result_reg, right_reg);
4136 case kFloat32x4GreaterThan:
4137 if (result_reg.is(left_reg)) {
4138 __ cmpnleps(result_reg, right_reg);
4139 } else if (result_reg.is(right_reg)) {
4140 __ cmpleps(result_reg, left_reg);
4142 __ movaps(result_reg, left_reg);
4143 __ cmpnleps(result_reg, right_reg);
4158 case kInt32x4GreaterThan:
4160 case kInt32x4LessThan: {
4161 ASSERT(instr->left()->Equals(instr->result()));
4162 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4163 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
4164 XMMRegister left_reg = ToInt32x4Register(instr->left());
4165 XMMRegister right_reg = ToInt32x4Register(instr->right());
4166 switch (instr->op()) {
4168 __ andps(left_reg, right_reg);
4171 __ orps(left_reg, right_reg);
4174 __ xorps(left_reg, right_reg);
4177 __ paddd(left_reg, right_reg);
4180 __ psubd(left_reg, right_reg);
4183 if (CpuFeatures::IsSupported(SSE4_1)) {
4184 CpuFeatureScope scope(masm(), SSE4_1);
4185 __ pmulld(left_reg, right_reg);
4187 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
4188 XMMRegister xmm_scratch = xmm0;
4189 __ movaps(xmm_scratch, left_reg);
4190 __ pmuludq(left_reg, right_reg);
4191 __ psrldq(xmm_scratch, 4);
4192 __ psrldq(right_reg, 4);
4193 __ pmuludq(xmm_scratch, right_reg);
4194 __ pshufd(left_reg, left_reg, 8);
4195 __ pshufd(xmm_scratch, xmm_scratch, 8);
4196 __ punpackldq(left_reg, xmm_scratch);
4199 case kInt32x4GreaterThan:
4200 __ pcmpgtd(left_reg, right_reg);
4203 __ pcmpeqd(left_reg, right_reg);
4205 case kInt32x4LessThan: {
4206 XMMRegister xmm_scratch = xmm0;
4207 __ movaps(xmm_scratch, right_reg);
4208 __ pcmpgtd(xmm_scratch, left_reg);
4209 __ movaps(left_reg, xmm_scratch);
4218 case kFloat32x4WithW:
4220 case kFloat32x4WithZ:
4222 case kFloat32x4WithY:
4224 case kFloat32x4WithX: {
4225 ASSERT(instr->left()->Equals(instr->result()));
4226 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4227 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4228 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4229 XMMRegister right_reg = ToDoubleRegister(instr->right());
4230 XMMRegister xmm_scratch = xmm0;
4231 __ xorps(xmm_scratch, xmm_scratch);
4232 __ cvtsd2ss(xmm_scratch, right_reg);
4233 if (CpuFeatures::IsSupported(SSE4_1)) {
4235 CpuFeatureScope scope(masm(), SSE4_1);
4236 __ insertps(left_reg, xmm_scratch, imm8);
4238 __ subq(rsp, Immediate(kFloat32x4Size));
4239 __ movups(Operand(rsp, 0), left_reg);
4240 __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
4241 __ movups(left_reg, Operand(rsp, 0));
4242 __ addq(rsp, Immediate(kFloat32x4Size));
4252 case kInt32x4WithX: {
4253 ASSERT(instr->left()->Equals(instr->result()));
4254 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4255 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
4256 XMMRegister left_reg = ToInt32x4Register(instr->left());
4257 Register right_reg = ToRegister(instr->right());
4258 if (CpuFeatures::IsSupported(SSE4_1)) {
4259 CpuFeatureScope scope(masm(), SSE4_1);
4260 __ pinsrd(left_reg, right_reg, imm8);
4262 __ subq(rsp, Immediate(kInt32x4Size));
4263 __ movdqu(Operand(rsp, 0), left_reg);
4264 __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
4265 __ movdqu(left_reg, Operand(rsp, 0));
4266 __ addq(rsp, Immediate(kInt32x4Size));
4270 case kInt32x4WithFlagW:
4272 case kInt32x4WithFlagZ:
4274 case kInt32x4WithFlagY:
4276 case kInt32x4WithFlagX: {
4277 ASSERT(instr->left()->Equals(instr->result()));
4278 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4279 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
4280 HType type = instr->hydrogen()->right()->type();
4281 XMMRegister left_reg = ToInt32x4Register(instr->left());
4282 Register right_reg = ToRegister(instr->right());
4283 Label load_false_value, done;
4284 if (type.IsBoolean()) {
4285 __ subq(rsp, Immediate(kInt32x4Size));
4286 __ movups(Operand(rsp, 0), left_reg);
4287 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
4288 __ j(not_equal, &load_false_value, Label::kNear);
4290 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
4291 DeoptimizeIf(no_condition, instr->environment());
4295 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
4296 __ jmp(&done, Label::kNear);
4297 __ bind(&load_false_value);
4298 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
4300 __ movups(left_reg, Operand(rsp, 0));
4301 __ addq(rsp, Immediate(kInt32x4Size));
4311 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
4312 switch (instr->op()) {
4313 case kInt32x4Select: {
4314 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
4315 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4316 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4318 XMMRegister mask_reg = ToInt32x4Register(instr->first());
4319 XMMRegister left_reg = ToFloat32x4Register(instr->second());
4320 XMMRegister right_reg = ToFloat32x4Register(instr->third());
4321 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4322 XMMRegister temp_reg = xmm0;
4325 __ movaps(temp_reg, mask_reg);
4328 // temp_reg = temp_reg & falseValue.
4329 __ andps(temp_reg, right_reg);
4331 if (!result_reg.is(mask_reg)) {
4332 if (result_reg.is(left_reg)) {
4333 // result_reg = result_reg & trueValue.
4334 __ andps(result_reg, mask_reg);
4335 // out = result_reg | temp_reg.
4336 __ orps(result_reg, temp_reg);
4338 __ movaps(result_reg, mask_reg);
4339 // result_reg = result_reg & trueValue.
4340 __ andps(result_reg, left_reg);
4341 // out = result_reg | temp_reg.
4342 __ orps(result_reg, temp_reg);
4345 // result_reg = result_reg & trueValue.
4346 __ andps(result_reg, left_reg);
4347 // out = result_reg | temp_reg.
4348 __ orps(result_reg, temp_reg);
4352 case kFloat32x4ShuffleMix: {
4353 ASSERT(instr->first()->Equals(instr->result()));
4354 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4355 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4356 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
4357 if (instr->hydrogen()->third()->IsConstant() &&
4358 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
4359 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
4360 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4361 XMMRegister first_reg = ToFloat32x4Register(instr->first());
4362 XMMRegister second_reg = ToFloat32x4Register(instr->second());
4363 __ shufps(first_reg, second_reg, select);
4366 Comment(";;; deoptimize: non-constant selector for shuffle");
4367 DeoptimizeIf(no_condition, instr->environment());
4371 case kFloat32x4Clamp: {
4372 ASSERT(instr->first()->Equals(instr->result()));
4373 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4374 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4375 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4377 XMMRegister value_reg = ToFloat32x4Register(instr->first());
4378 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
4379 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
4380 __ minps(value_reg, upper_reg);
4381 __ maxps(value_reg, lower_reg);
4391 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
4392 switch (instr->op()) {
4393 case kFloat32x4Constructor: {
4394 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
4395 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
4396 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
4397 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
4398 XMMRegister x_reg = ToDoubleRegister(instr->x());
4399 XMMRegister y_reg = ToDoubleRegister(instr->y());
4400 XMMRegister z_reg = ToDoubleRegister(instr->z());
4401 XMMRegister w_reg = ToDoubleRegister(instr->w());
4402 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4403 __ subq(rsp, Immediate(kFloat32x4Size));
4404 __ xorps(xmm0, xmm0);
4405 __ cvtsd2ss(xmm0, x_reg);
4406 __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
4407 __ xorps(xmm0, xmm0);
4408 __ cvtsd2ss(xmm0, y_reg);
4409 __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
4410 __ xorps(xmm0, xmm0);
4411 __ cvtsd2ss(xmm0, z_reg);
4412 __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
4413 __ xorps(xmm0, xmm0);
4414 __ cvtsd2ss(xmm0, w_reg);
4415 __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
4416 __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
4417 __ addq(rsp, Immediate(kFloat32x4Size));
4420 case kInt32x4Constructor: {
4421 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
4422 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
4423 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
4424 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
4425 Register x_reg = ToRegister(instr->x());
4426 Register y_reg = ToRegister(instr->y());
4427 Register z_reg = ToRegister(instr->z());
4428 Register w_reg = ToRegister(instr->w());
4429 XMMRegister result_reg = ToInt32x4Register(instr->result());
4430 __ subq(rsp, Immediate(kInt32x4Size));
4431 __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
4432 __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
4433 __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
4434 __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
4435 __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
4436 __ addq(rsp, Immediate(kInt32x4Size));
4439 case kInt32x4Bool: {
4440 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
4441 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
4442 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
4443 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
4444 HType x_type = instr->hydrogen()->x()->type();
4445 HType y_type = instr->hydrogen()->y()->type();
4446 HType z_type = instr->hydrogen()->z()->type();
4447 HType w_type = instr->hydrogen()->w()->type();
4448 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
4449 !z_type.IsBoolean() || !w_type.IsBoolean()) {
4450 Comment(";;; deoptimize: other types for int32x4.bool.");
4451 DeoptimizeIf(no_condition, instr->environment());
4454 XMMRegister result_reg = ToInt32x4Register(instr->result());
4455 Register x_reg = ToRegister(instr->x());
4456 Register y_reg = ToRegister(instr->y());
4457 Register z_reg = ToRegister(instr->z());
4458 Register w_reg = ToRegister(instr->w());
4459 Label load_false_x, done_x, load_false_y, done_y,
4460 load_false_z, done_z, load_false_w, done_w;
4461 __ subq(rsp, Immediate(kInt32x4Size));
4463 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
4464 __ j(not_equal, &load_false_x, Label::kNear);
4465 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
4466 __ jmp(&done_x, Label::kNear);
4467 __ bind(&load_false_x);
4468 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
4471 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
4472 __ j(not_equal, &load_false_y, Label::kNear);
4473 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
4474 __ jmp(&done_y, Label::kNear);
4475 __ bind(&load_false_y);
4476 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
4479 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
4480 __ j(not_equal, &load_false_z, Label::kNear);
4481 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
4482 __ jmp(&done_z, Label::kNear);
4483 __ bind(&load_false_z);
4484 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
4487 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
4488 __ j(not_equal, &load_false_w, Label::kNear);
4489 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
4490 __ jmp(&done_w, Label::kNear);
4491 __ bind(&load_false_w);
4492 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
4495 __ movups(result_reg, Operand(rsp, 0));
4496 __ addq(rsp, Immediate(kInt32x4Size));
4506 void LCodeGen::DoPower(LPower* instr) {
4507 Representation exponent_type = instr->hydrogen()->right()->representation();
4508 // Having marked this as a call, we can use any registers.
4509 // Just make sure that the input/output registers are the expected ones.
4511 Register exponent = rdx;
4512 ASSERT(!instr->right()->IsRegister() ||
4513 ToRegister(instr->right()).is(exponent));
4514 ASSERT(!instr->right()->IsDoubleRegister() ||
4515 ToDoubleRegister(instr->right()).is(xmm1));
4516 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4517 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4519 if (exponent_type.IsSmi()) {
4520 MathPowStub stub(MathPowStub::TAGGED);
4522 } else if (exponent_type.IsTagged()) {
4524 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
4525 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
4526 DeoptimizeIf(not_equal, instr->environment());
4528 MathPowStub stub(MathPowStub::TAGGED);
4530 } else if (exponent_type.IsInteger32()) {
4531 MathPowStub stub(MathPowStub::INTEGER);
4534 ASSERT(exponent_type.IsDouble());
4535 MathPowStub stub(MathPowStub::DOUBLE);
4541 void LCodeGen::DoMathExp(LMathExp* instr) {
4542 XMMRegister input = ToDoubleRegister(instr->value());
4543 XMMRegister result = ToDoubleRegister(instr->result());
4544 XMMRegister temp0 = double_scratch0();
4545 Register temp1 = ToRegister(instr->temp1());
4546 Register temp2 = ToRegister(instr->temp2());
4548 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4552 void LCodeGen::DoMathLog(LMathLog* instr) {
4553 ASSERT(instr->value()->Equals(instr->result()));
4554 XMMRegister input_reg = ToDoubleRegister(instr->value());
4555 XMMRegister xmm_scratch = double_scratch0();
4556 Label positive, done, zero;
4557 __ xorps(xmm_scratch, xmm_scratch);
4558 __ ucomisd(input_reg, xmm_scratch);
4559 __ j(above, &positive, Label::kNear);
4560 __ j(not_carry, &zero, Label::kNear);
4561 ExternalReference nan =
4562 ExternalReference::address_of_canonical_non_hole_nan();
4563 Operand nan_operand = masm()->ExternalOperand(nan);
4564 __ movsd(input_reg, nan_operand);
4565 __ jmp(&done, Label::kNear);
4567 ExternalReference ninf =
4568 ExternalReference::address_of_negative_infinity();
4569 Operand ninf_operand = masm()->ExternalOperand(ninf);
4570 __ movsd(input_reg, ninf_operand);
4571 __ jmp(&done, Label::kNear);
4574 __ subq(rsp, Immediate(kDoubleSize));
4575 __ movsd(Operand(rsp, 0), input_reg);
4576 __ fld_d(Operand(rsp, 0));
4578 __ fstp_d(Operand(rsp, 0));
4579 __ movsd(input_reg, Operand(rsp, 0));
4580 __ addq(rsp, Immediate(kDoubleSize));
4585 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4586 ASSERT(ToRegister(instr->context()).is(rsi));
4587 ASSERT(ToRegister(instr->function()).is(rdi));
4588 ASSERT(instr->HasPointerMap());
4590 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4591 if (known_function.is_null()) {
4592 LPointerMap* pointers = instr->pointer_map();
4593 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4594 ParameterCount count(instr->arity());
4595 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4597 CallKnownFunction(known_function,
4598 instr->hydrogen()->formal_parameter_count(),
4601 RDI_CONTAINS_TARGET);
4606 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4607 ASSERT(ToRegister(instr->context()).is(rsi));
4608 ASSERT(ToRegister(instr->function()).is(rdi));
4609 ASSERT(ToRegister(instr->result()).is(rax));
4611 int arity = instr->arity();
4612 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4613 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4617 void LCodeGen::DoCallNew(LCallNew* instr) {
4618 ASSERT(ToRegister(instr->context()).is(rsi));
4619 ASSERT(ToRegister(instr->constructor()).is(rdi));
4620 ASSERT(ToRegister(instr->result()).is(rax));
4622 __ Set(rax, instr->arity());
4623 // No cell in ebx for construct type feedback in optimized code
4624 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4625 __ Move(rbx, undefined_value);
4626 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4627 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4631 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4632 ASSERT(ToRegister(instr->context()).is(rsi));
4633 ASSERT(ToRegister(instr->constructor()).is(rdi));
4634 ASSERT(ToRegister(instr->result()).is(rax));
4636 __ Set(rax, instr->arity());
4637 __ Move(rbx, factory()->undefined_value());
4638 ElementsKind kind = instr->hydrogen()->elements_kind();
4639 AllocationSiteOverrideMode override_mode =
4640 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4641 ? DISABLE_ALLOCATION_SITES
4644 if (instr->arity() == 0) {
4645 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4646 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4647 } else if (instr->arity() == 1) {
4649 if (IsFastPackedElementsKind(kind)) {
4651 // We might need a change here
4652 // look at the first argument
4653 __ movp(rcx, Operand(rsp, 0));
4655 __ j(zero, &packed_case, Label::kNear);
4657 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4658 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4659 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4660 __ jmp(&done, Label::kNear);
4661 __ bind(&packed_case);
4664 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4665 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4668 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4669 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4674 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4675 ASSERT(ToRegister(instr->context()).is(rsi));
4676 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4680 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4681 Register function = ToRegister(instr->function());
4682 Register code_object = ToRegister(instr->code_object());
4683 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4684 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4688 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4689 Register result = ToRegister(instr->result());
4690 Register base = ToRegister(instr->base_object());
4691 if (instr->offset()->IsConstantOperand()) {
4692 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4693 __ lea(result, Operand(base, ToInteger32(offset)));
4695 Register offset = ToRegister(instr->offset());
4696 __ lea(result, Operand(base, offset, times_1, 0));
4701 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4702 HStoreNamedField* hinstr = instr->hydrogen();
4703 Representation representation = instr->representation();
4705 HObjectAccess access = hinstr->access();
4706 int offset = access.offset();
4708 if (access.IsExternalMemory()) {
4709 ASSERT(!hinstr->NeedsWriteBarrier());
4710 Register value = ToRegister(instr->value());
4711 if (instr->object()->IsConstantOperand()) {
4712 ASSERT(value.is(rax));
4713 ASSERT(!access.representation().IsSpecialization());
4714 LConstantOperand* object = LConstantOperand::cast(instr->object());
4715 __ store_rax(ToExternalReference(object));
4717 Register object = ToRegister(instr->object());
4718 __ Store(MemOperand(object, offset), value, representation);
4723 Register object = ToRegister(instr->object());
4724 Handle<Map> transition = instr->transition();
4726 if (FLAG_track_fields && representation.IsSmi()) {
4727 if (instr->value()->IsConstantOperand()) {
4728 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4729 if (!IsInteger32Constant(operand_value) &&
4730 !IsSmiConstant(operand_value)) {
4731 DeoptimizeIf(no_condition, instr->environment());
4734 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4735 if (instr->value()->IsConstantOperand()) {
4736 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4737 if (IsInteger32Constant(operand_value)) {
4738 DeoptimizeIf(no_condition, instr->environment());
4741 if (!hinstr->value()->type().IsHeapObject()) {
4742 Register value = ToRegister(instr->value());
4743 Condition cc = masm()->CheckSmi(value);
4744 DeoptimizeIf(cc, instr->environment());
4747 } else if (representation.IsDouble()) {
4748 ASSERT(transition.is_null());
4749 ASSERT(access.IsInobject());
4750 ASSERT(!hinstr->NeedsWriteBarrier());
4751 XMMRegister value = ToDoubleRegister(instr->value());
4752 __ movsd(FieldOperand(object, offset), value);
4756 if (!transition.is_null()) {
4757 if (!hinstr->NeedsWriteBarrierForMap()) {
4758 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4760 Register temp = ToRegister(instr->temp());
4761 __ Move(kScratchRegister, transition);
4762 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4763 // Update the write barrier for the map field.
4764 __ RecordWriteField(object,
4765 HeapObject::kMapOffset,
4769 OMIT_REMEMBERED_SET,
4775 SmiCheck check_needed = hinstr->value()->IsHeapObject()
4776 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4778 Register write_register = object;
4779 if (!access.IsInobject()) {
4780 write_register = ToRegister(instr->temp());
4781 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4784 if (representation.IsSmi() &&
4785 hinstr->value()->representation().IsInteger32()) {
4786 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4787 // Store int value directly to upper half of the smi.
4788 STATIC_ASSERT(kSmiTag == 0);
4789 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4790 offset += kPointerSize / 2;
4791 representation = Representation::Integer32();
4794 Operand operand = FieldOperand(write_register, offset);
4796 if (instr->value()->IsRegister()) {
4797 Register value = ToRegister(instr->value());
4798 __ Store(operand, value, representation);
4800 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4801 if (IsInteger32Constant(operand_value)) {
4802 ASSERT(!hinstr->NeedsWriteBarrier());
4803 int32_t value = ToInteger32(operand_value);
4804 if (representation.IsSmi()) {
4805 __ Move(operand, Smi::FromInt(value));
4808 __ movl(operand, Immediate(value));
4812 Handle<Object> handle_value = ToHandle(operand_value);
4813 ASSERT(!hinstr->NeedsWriteBarrier());
4814 __ Move(operand, handle_value);
4818 if (hinstr->NeedsWriteBarrier()) {
4819 Register value = ToRegister(instr->value());
4820 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4821 // Update the write barrier for the object for in-object properties.
4822 __ RecordWriteField(write_register,
4827 EMIT_REMEMBERED_SET,
4833 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4834 ASSERT(ToRegister(instr->context()).is(rsi));
4835 ASSERT(ToRegister(instr->object()).is(rdx));
4836 ASSERT(ToRegister(instr->value()).is(rax));
4838 __ Move(rcx, instr->hydrogen()->name());
4839 Handle<Code> ic = StoreIC::initialize_stub(isolate(),
4840 instr->strict_mode_flag());
4841 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4845 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4846 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4848 __ j(NegateCondition(cc), &done, Label::kNear);
4852 DeoptimizeIf(cc, check->environment());
4857 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4858 if (instr->hydrogen()->skip_check()) return;
4860 if (instr->length()->IsRegister()) {
4861 Register reg = ToRegister(instr->length());
4862 if (!instr->hydrogen()->length()->representation().IsSmi()) {
4863 __ AssertZeroExtended(reg);
4865 if (instr->index()->IsConstantOperand()) {
4866 int32_t constant_index =
4867 ToInteger32(LConstantOperand::cast(instr->index()));
4868 if (instr->hydrogen()->length()->representation().IsSmi()) {
4869 __ Cmp(reg, Smi::FromInt(constant_index));
4871 __ cmpq(reg, Immediate(constant_index));
4874 Register reg2 = ToRegister(instr->index());
4875 if (!instr->hydrogen()->index()->representation().IsSmi()) {
4876 __ AssertZeroExtended(reg2);
4881 Operand length = ToOperand(instr->length());
4882 if (instr->index()->IsConstantOperand()) {
4883 int32_t constant_index =
4884 ToInteger32(LConstantOperand::cast(instr->index()));
4885 if (instr->hydrogen()->length()->representation().IsSmi()) {
4886 __ Cmp(length, Smi::FromInt(constant_index));
4888 __ cmpq(length, Immediate(constant_index));
4891 __ cmpq(length, ToRegister(instr->index()));
4894 Condition condition =
4895 instr->hydrogen()->allow_equality() ? below : below_equal;
4896 ApplyCheckIf(condition, instr);
4900 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4901 ElementsKind elements_kind = instr->elements_kind();
4902 LOperand* key = instr->key();
4903 if (!key->IsConstantOperand()) {
4904 Register key_reg = ToRegister(key);
4905 // Even though the HLoad/StoreKeyedFastElement instructions force
4906 // the input representation for the key to be an integer, the input
4907 // gets replaced during bound check elimination with the index
4908 // argument to the bounds check, which can be tagged, so that case
4909 // must be handled here, too.
4910 if (instr->hydrogen()->IsDehoisted()) {
4911 // Sign extend key because it could be a 32 bit negative value
4912 // and the dehoisted address computation happens in 64 bits
4913 __ movsxlq(key_reg, key_reg);
4916 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
4918 int base_offset = instr->is_fixed_typed_array()
4919 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4921 Operand operand(BuildFastArrayOperand(
4926 instr->additional_index()));
4928 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4929 elements_kind == FLOAT32_ELEMENTS) {
4930 XMMRegister value(ToDoubleRegister(instr->value()));
4931 __ cvtsd2ss(value, value);
4932 __ movss(operand, value);
4933 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4934 elements_kind == FLOAT64_ELEMENTS) {
4935 __ movsd(operand, ToDoubleRegister(instr->value()));
4936 } else if (IsSIMD128ElementsKind(elements_kind)) {
4937 __ movups(operand, ToSIMD128Register(instr->value()));
4939 Register value(ToRegister(instr->value()));
4940 switch (elements_kind) {
4941 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4942 case EXTERNAL_INT8_ELEMENTS:
4943 case EXTERNAL_UINT8_ELEMENTS:
4945 case UINT8_ELEMENTS:
4946 case UINT8_CLAMPED_ELEMENTS:
4947 __ movb(operand, value);
4949 case EXTERNAL_INT16_ELEMENTS:
4950 case EXTERNAL_UINT16_ELEMENTS:
4951 case INT16_ELEMENTS:
4952 case UINT16_ELEMENTS:
4953 __ movw(operand, value);
4955 case EXTERNAL_INT32_ELEMENTS:
4956 case EXTERNAL_UINT32_ELEMENTS:
4957 case INT32_ELEMENTS:
4958 case UINT32_ELEMENTS:
4959 __ movl(operand, value);
4961 case EXTERNAL_FLOAT32_ELEMENTS:
4962 case EXTERNAL_FLOAT32x4_ELEMENTS:
4963 case EXTERNAL_INT32x4_ELEMENTS:
4964 case EXTERNAL_FLOAT64_ELEMENTS:
4965 case FLOAT32_ELEMENTS:
4966 case FLOAT64_ELEMENTS:
4967 case FLOAT32x4_ELEMENTS:
4968 case INT32x4_ELEMENTS:
4970 case FAST_SMI_ELEMENTS:
4971 case FAST_DOUBLE_ELEMENTS:
4972 case FAST_HOLEY_ELEMENTS:
4973 case FAST_HOLEY_SMI_ELEMENTS:
4974 case FAST_HOLEY_DOUBLE_ELEMENTS:
4975 case DICTIONARY_ELEMENTS:
4976 case NON_STRICT_ARGUMENTS_ELEMENTS:
4984 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4985 XMMRegister value = ToDoubleRegister(instr->value());
4986 LOperand* key = instr->key();
4987 if (!key->IsConstantOperand()) {
4988 Register key_reg = ToRegister(key);
4989 // Even though the HLoad/StoreKeyedFastElement instructions force
4990 // the input representation for the key to be an integer, the
4991 // input gets replaced during bound check elimination with the index
4992 // argument to the bounds check, which can be tagged, so that case
4993 // must be handled here, too.
4994 if (instr->hydrogen()->IsDehoisted()) {
4995 // Sign extend key because it could be a 32 bit negative value
4996 // and the dehoisted address computation happens in 64 bits
4997 __ movsxlq(key_reg, key_reg);
5001 if (instr->NeedsCanonicalization()) {
5004 __ ucomisd(value, value);
5005 __ j(parity_odd, &have_value, Label::kNear); // NaN.
5007 __ Set(kScratchRegister, BitCast<uint64_t>(
5008 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
5009 __ movq(value, kScratchRegister);
5011 __ bind(&have_value);
5014 Operand double_store_operand = BuildFastArrayOperand(
5017 FAST_DOUBLE_ELEMENTS,
5018 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
5019 instr->additional_index());
5021 __ movsd(double_store_operand, value);
5025 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
5026 HStoreKeyed* hinstr = instr->hydrogen();
5027 LOperand* key = instr->key();
5028 if (!key->IsConstantOperand()) {
5029 Register key_reg = ToRegister(key);
5030 // Even though the HLoad/StoreKeyedFastElement instructions force
5031 // the input representation for the key to be an integer, the
5032 // input gets replaced during bound check elimination with the index
5033 // argument to the bounds check, which can be tagged, so that case
5034 // must be handled here, too.
5035 if (hinstr->IsDehoisted()) {
5036 // Sign extend key because it could be a 32 bit negative value
5037 // and the dehoisted address computation happens in 64 bits
5038 __ movsxlq(key_reg, key_reg);
5042 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
5043 Representation representation = hinstr->value()->representation();
5045 if (representation.IsInteger32()) {
5046 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5047 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
5048 // Store int value directly to upper half of the smi.
5049 STATIC_ASSERT(kSmiTag == 0);
5050 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5051 offset += kPointerSize / 2;
5055 BuildFastArrayOperand(instr->elements(),
5059 instr->additional_index());
5061 if (instr->value()->IsRegister()) {
5062 __ Store(operand, ToRegister(instr->value()), representation);
5064 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5065 if (IsInteger32Constant(operand_value)) {
5066 int32_t value = ToInteger32(operand_value);
5067 if (representation.IsSmi()) {
5068 __ Move(operand, Smi::FromInt(value));
5071 __ movl(operand, Immediate(value));
5074 Handle<Object> handle_value = ToHandle(operand_value);
5075 __ Move(operand, handle_value);
5079 if (hinstr->NeedsWriteBarrier()) {
5080 Register elements = ToRegister(instr->elements());
5081 ASSERT(instr->value()->IsRegister());
5082 Register value = ToRegister(instr->value());
5083 ASSERT(!key->IsConstantOperand());
5084 SmiCheck check_needed = hinstr->value()->IsHeapObject()
5085 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5086 // Compute address of modified element and store it into key register.
5087 Register key_reg(ToRegister(key));
5088 __ lea(key_reg, operand);
5089 __ RecordWrite(elements,
5093 EMIT_REMEMBERED_SET,
5099 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
5100 if (instr->is_typed_elements()) {
5101 DoStoreKeyedExternalArray(instr);
5102 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
5103 DoStoreKeyedFixedDoubleArray(instr);
5105 DoStoreKeyedFixedArray(instr);
5110 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5111 ASSERT(ToRegister(instr->context()).is(rsi));
5112 ASSERT(ToRegister(instr->object()).is(rdx));
5113 ASSERT(ToRegister(instr->key()).is(rcx));
5114 ASSERT(ToRegister(instr->value()).is(rax));
5116 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
5117 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5118 : isolate()->builtins()->KeyedStoreIC_Initialize();
5119 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5123 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5124 Register object_reg = ToRegister(instr->object());
5126 Handle<Map> from_map = instr->original_map();
5127 Handle<Map> to_map = instr->transitioned_map();
5128 ElementsKind from_kind = instr->from_kind();
5129 ElementsKind to_kind = instr->to_kind();
5131 Label not_applicable;
5132 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
5133 __ j(not_equal, ¬_applicable);
5134 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5135 Register new_map_reg = ToRegister(instr->new_map_temp());
5136 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
5137 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
5139 ASSERT_NE(instr->temp(), NULL);
5140 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
5141 ToRegister(instr->temp()), kDontSaveFPRegs);
5143 ASSERT(ToRegister(instr->context()).is(rsi));
5144 PushSafepointRegistersScope scope(this);
5145 if (!object_reg.is(rax)) {
5146 __ movp(rax, object_reg);
5148 __ Move(rbx, to_map);
5149 TransitionElementsKindStub stub(from_kind, to_kind);
5151 RecordSafepointWithRegisters(
5152 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5154 __ bind(¬_applicable);
5158 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5159 Register object = ToRegister(instr->object());
5160 Register temp = ToRegister(instr->temp());
5161 Label no_memento_found;
5162 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
5163 DeoptimizeIf(equal, instr->environment());
5164 __ bind(&no_memento_found);
5168 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5169 ASSERT(ToRegister(instr->context()).is(rsi));
5170 ASSERT(ToRegister(instr->left()).is(rdx));
5171 ASSERT(ToRegister(instr->right()).is(rax));
5172 StringAddStub stub(instr->hydrogen()->flags(),
5173 instr->hydrogen()->pretenure_flag());
5174 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5178 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5179 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
5181 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5182 : LDeferredCode(codegen), instr_(instr) { }
5183 virtual void Generate() V8_OVERRIDE {
5184 codegen()->DoDeferredStringCharCodeAt(instr_);
5186 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5188 LStringCharCodeAt* instr_;
5191 DeferredStringCharCodeAt* deferred =
5192 new(zone()) DeferredStringCharCodeAt(this, instr);
5194 StringCharLoadGenerator::Generate(masm(),
5195 ToRegister(instr->string()),
5196 ToRegister(instr->index()),
5197 ToRegister(instr->result()),
5199 __ bind(deferred->exit());
5203 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5204 Register string = ToRegister(instr->string());
5205 Register result = ToRegister(instr->result());
5207 // TODO(3095996): Get rid of this. For now, we need to make the
5208 // result register contain a valid pointer because it is already
5209 // contained in the register pointer map.
5212 PushSafepointRegistersScope scope(this);
5214 // Push the index as a smi. This is safe because of the checks in
5215 // DoStringCharCodeAt above.
5216 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5217 if (instr->index()->IsConstantOperand()) {
5218 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
5219 __ Push(Smi::FromInt(const_index));
5221 Register index = ToRegister(instr->index());
5222 __ Integer32ToSmi(index, index);
5225 CallRuntimeFromDeferred(
5226 Runtime::kStringCharCodeAt, 2, instr, instr->context());
5228 __ SmiToInteger32(rax, rax);
5229 __ StoreToSafepointRegisterSlot(result, rax);
5233 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5234 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5236 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5237 : LDeferredCode(codegen), instr_(instr) { }
5238 virtual void Generate() V8_OVERRIDE {
5239 codegen()->DoDeferredStringCharFromCode(instr_);
5241 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5243 LStringCharFromCode* instr_;
5246 DeferredStringCharFromCode* deferred =
5247 new(zone()) DeferredStringCharFromCode(this, instr);
5249 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5250 Register char_code = ToRegister(instr->char_code());
5251 Register result = ToRegister(instr->result());
5252 ASSERT(!char_code.is(result));
5254 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
5255 __ j(above, deferred->entry());
5256 __ movsxlq(char_code, char_code);
5257 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5258 __ movp(result, FieldOperand(result,
5259 char_code, times_pointer_size,
5260 FixedArray::kHeaderSize));
5261 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5262 __ j(equal, deferred->entry());
5263 __ bind(deferred->exit());
5267 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5268 Register char_code = ToRegister(instr->char_code());
5269 Register result = ToRegister(instr->result());
5271 // TODO(3095996): Get rid of this. For now, we need to make the
5272 // result register contain a valid pointer because it is already
5273 // contained in the register pointer map.
5276 PushSafepointRegistersScope scope(this);
5277 __ Integer32ToSmi(char_code, char_code);
5279 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5280 __ StoreToSafepointRegisterSlot(result, rax);
5284 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5285 LOperand* input = instr->value();
5286 ASSERT(input->IsRegister() || input->IsStackSlot());
5287 LOperand* output = instr->result();
5288 ASSERT(output->IsDoubleRegister());
5289 if (input->IsRegister()) {
5290 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
5292 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
5297 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
5298 LOperand* input = instr->value();
5299 ASSERT(input->IsRegister());
5300 LOperand* output = instr->result();
5301 __ Integer32ToSmi(ToRegister(output), ToRegister(input));
5302 if (!instr->hydrogen()->value()->HasRange() ||
5303 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5304 DeoptimizeIf(overflow, instr->environment());
5309 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5310 LOperand* input = instr->value();
5311 LOperand* output = instr->result();
5312 LOperand* temp = instr->temp();
5314 __ LoadUint32(ToDoubleRegister(output),
5316 ToDoubleRegister(temp));
5320 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5321 LOperand* input = instr->value();
5322 ASSERT(input->IsRegister());
5323 LOperand* output = instr->result();
5324 if (!instr->hydrogen()->value()->HasRange() ||
5325 !instr->hydrogen()->value()->range()->IsInSmiRange() ||
5326 instr->hydrogen()->value()->range()->upper() == kMaxInt) {
5327 // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
5328 // interval, so we treat kMaxInt as a sentinel for this entire interval.
5329 __ testl(ToRegister(input), Immediate(0x80000000));
5330 DeoptimizeIf(not_zero, instr->environment());
5332 __ Integer32ToSmi(ToRegister(output), ToRegister(input));
5336 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5337 LOperand* input = instr->value();
5338 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5339 Register reg = ToRegister(input);
5341 __ Integer32ToSmi(reg, reg);
5345 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5346 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5348 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5349 : LDeferredCode(codegen), instr_(instr) { }
5350 virtual void Generate() V8_OVERRIDE {
5351 codegen()->DoDeferredNumberTagU(instr_);
5353 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5355 LNumberTagU* instr_;
5358 LOperand* input = instr->value();
5359 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5360 Register reg = ToRegister(input);
5362 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
5363 __ cmpl(reg, Immediate(Smi::kMaxValue));
5364 __ j(above, deferred->entry());
5365 __ Integer32ToSmi(reg, reg);
5366 __ bind(deferred->exit());
5370 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
5372 Register reg = ToRegister(instr->value());
5373 Register tmp = reg.is(rax) ? rcx : rax;
5374 XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
5376 // Preserve the value of all registers.
5377 PushSafepointRegistersScope scope(this);
5380 // Load value into temp_xmm which will be preserved across potential call to
5381 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
5382 // XMM registers on x64).
5383 XMMRegister xmm_scratch = double_scratch0();
5384 __ LoadUint32(temp_xmm, reg, xmm_scratch);
5386 if (FLAG_inline_new) {
5387 __ AllocateHeapNumber(reg, tmp, &slow);
5388 __ jmp(&done, Label::kNear);
5391 // Slow case: Call the runtime system to do the number allocation.
5394 // Put a valid pointer value in the stack slot where the result
5395 // register is stored, as this register is in the pointer map, but contains an
5397 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
5399 // NumberTagU uses the context from the frame, rather than
5400 // the environment's HContext or HInlinedContext value.
5401 // They only call Runtime::kAllocateHeapNumber.
5402 // The corresponding HChange instructions are added in a phase that does
5403 // not have easy access to the local context.
5404 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5405 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5406 RecordSafepointWithRegisters(
5407 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5409 if (!reg.is(rax)) __ movp(reg, rax);
5411 // Done. Put the value in temp_xmm into the value of the allocated heap
5414 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
5415 __ StoreToSafepointRegisterSlot(reg, reg);
5419 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5420 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5422 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5423 : LDeferredCode(codegen), instr_(instr) { }
5424 virtual void Generate() V8_OVERRIDE {
5425 codegen()->DoDeferredNumberTagD(instr_);
5427 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5429 LNumberTagD* instr_;
5432 XMMRegister input_reg = ToDoubleRegister(instr->value());
5433 Register reg = ToRegister(instr->result());
5434 Register tmp = ToRegister(instr->temp());
5436 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
5437 if (FLAG_inline_new) {
5438 __ AllocateHeapNumber(reg, tmp, deferred->entry());
5440 __ jmp(deferred->entry());
5442 __ bind(deferred->exit());
5443 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5447 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5448 // TODO(3095996): Get rid of this. For now, we need to make the
5449 // result register contain a valid pointer because it is already
5450 // contained in the register pointer map.
5451 Register reg = ToRegister(instr->result());
5452 __ Move(reg, Smi::FromInt(0));
5455 PushSafepointRegistersScope scope(this);
5456 // NumberTagD uses the context from the frame, rather than
5457 // the environment's HContext or HInlinedContext value.
5458 // They only call Runtime::kAllocateHeapNumber.
5459 // The corresponding HChange instructions are added in a phase that does
5460 // not have easy access to the local context.
5461 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5462 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5463 RecordSafepointWithRegisters(
5464 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5465 __ movp(kScratchRegister, rax);
5467 __ movp(reg, kScratchRegister);
5471 void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
5472 Runtime::FunctionId id) {
5473 // TODO(3095996): Get rid of this. For now, we need to make the
5474 // result register contain a valid pointer because it is already
5475 // contained in the register pointer map.
5476 Register reg = ToRegister(instr->result());
5477 __ Move(reg, Smi::FromInt(0));
5480 PushSafepointRegistersScope scope(this);
5481 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5482 __ CallRuntimeSaveDoubles(id);
5483 RecordSafepointWithRegisters(
5484 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5485 __ movp(kScratchRegister, rax);
5487 __ movp(reg, kScratchRegister);
5492 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
5493 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
5495 DeferredSIMD128ToTagged(LCodeGen* codegen,
5496 LSIMD128ToTagged* instr,
5497 Runtime::FunctionId id)
5498 : LDeferredCode(codegen), instr_(instr), id_(id) { }
5499 virtual void Generate() V8_OVERRIDE {
5500 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
5502 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5504 LSIMD128ToTagged* instr_;
5505 Runtime::FunctionId id_;
5508 XMMRegister input_reg = ToSIMD128Register(instr->value());
5509 Register reg = ToRegister(instr->result());
5510 Register tmp = ToRegister(instr->temp());
5512 DeferredSIMD128ToTagged* deferred =
5513 new(zone()) DeferredSIMD128ToTagged(this, instr,
5514 static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
5515 if (FLAG_inline_new) {
5516 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
5517 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5519 __ jmp(deferred->entry());
5521 __ bind(deferred->exit());
5522 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
5526 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
5527 if (instr->value()->IsFloat32x4Register()) {
5528 HandleSIMD128ToTagged<Float32x4>(instr);
5530 ASSERT(instr->value()->IsInt32x4Register());
5531 HandleSIMD128ToTagged<Int32x4>(instr);
5536 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5537 ASSERT(instr->value()->Equals(instr->result()));
5538 Register input = ToRegister(instr->value());
5539 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
5540 __ Integer32ToSmi(input, input);
5544 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5545 ASSERT(instr->value()->Equals(instr->result()));
5546 Register input = ToRegister(instr->value());
5547 if (instr->needs_check()) {
5548 Condition is_smi = __ CheckSmi(input);
5549 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5551 __ AssertSmi(input);
5553 __ SmiToInteger32(input, input);
5557 void LCodeGen::EmitNumberUntagD(Register input_reg,
5558 XMMRegister result_reg,
5559 bool can_convert_undefined_to_nan,
5560 bool deoptimize_on_minus_zero,
5562 NumberUntagDMode mode) {
5563 Label convert, load_smi, done;
5565 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5567 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5569 // Heap number map check.
5570 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5571 Heap::kHeapNumberMapRootIndex);
5573 // On x64 it is safe to load at heap number offset before evaluating the map
5574 // check, since all heap objects are at least two words long.
5575 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5577 if (can_convert_undefined_to_nan) {
5578 __ j(not_equal, &convert, Label::kNear);
5580 DeoptimizeIf(not_equal, env);
5583 if (deoptimize_on_minus_zero) {
5584 XMMRegister xmm_scratch = double_scratch0();
5585 __ xorps(xmm_scratch, xmm_scratch);
5586 __ ucomisd(xmm_scratch, result_reg);
5587 __ j(not_equal, &done, Label::kNear);
5588 __ movmskpd(kScratchRegister, result_reg);
5589 __ testq(kScratchRegister, Immediate(1));
5590 DeoptimizeIf(not_zero, env);
5592 __ jmp(&done, Label::kNear);
5594 if (can_convert_undefined_to_nan) {
5597 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5598 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5599 DeoptimizeIf(not_equal, env);
5601 __ xorps(result_reg, result_reg);
5602 __ divsd(result_reg, result_reg);
5603 __ jmp(&done, Label::kNear);
5606 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5609 // Smi to XMM conversion
5611 __ SmiToInteger32(kScratchRegister, input_reg);
5612 __ Cvtlsi2sd(result_reg, kScratchRegister);
5617 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5618 Register input_reg = ToRegister(instr->value());
5620 if (instr->truncating()) {
5621 Label no_heap_number, check_bools, check_false;
5623 // Heap number map check.
5624 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5625 Heap::kHeapNumberMapRootIndex);
5626 __ j(not_equal, &no_heap_number, Label::kNear);
5627 __ TruncateHeapNumberToI(input_reg, input_reg);
5630 __ bind(&no_heap_number);
5631 // Check for Oddballs. Undefined/False is converted to zero and True to one
5632 // for truncating conversions.
5633 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5634 __ j(not_equal, &check_bools, Label::kNear);
5635 __ Set(input_reg, 0);
5638 __ bind(&check_bools);
5639 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5640 __ j(not_equal, &check_false, Label::kNear);
5641 __ Set(input_reg, 1);
5644 __ bind(&check_false);
5645 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5646 __ RecordComment("Deferred TaggedToI: cannot truncate");
5647 DeoptimizeIf(not_equal, instr->environment());
5648 __ Set(input_reg, 0);
5652 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5653 __ TaggedToI(input_reg, input_reg, xmm_temp,
5654 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5658 DeoptimizeIf(no_condition, instr->environment());
5663 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5664 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5666 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5667 : LDeferredCode(codegen), instr_(instr) { }
5668 virtual void Generate() V8_OVERRIDE {
5669 codegen()->DoDeferredTaggedToI(instr_, done());
5671 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5676 LOperand* input = instr->value();
5677 ASSERT(input->IsRegister());
5678 ASSERT(input->Equals(instr->result()));
5679 Register input_reg = ToRegister(input);
5681 if (instr->hydrogen()->value()->representation().IsSmi()) {
5682 __ SmiToInteger32(input_reg, input_reg);
5684 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5685 __ JumpIfNotSmi(input_reg, deferred->entry());
5686 __ SmiToInteger32(input_reg, input_reg);
5687 __ bind(deferred->exit());
5692 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5693 LOperand* input = instr->value();
5694 ASSERT(input->IsRegister());
5695 LOperand* result = instr->result();
5696 ASSERT(result->IsDoubleRegister());
5698 Register input_reg = ToRegister(input);
5699 XMMRegister result_reg = ToDoubleRegister(result);
5701 HValue* value = instr->hydrogen()->value();
5702 NumberUntagDMode mode = value->representation().IsSmi()
5703 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5705 EmitNumberUntagD(input_reg, result_reg,
5706 instr->hydrogen()->can_convert_undefined_to_nan(),
5707 instr->hydrogen()->deoptimize_on_minus_zero(),
5708 instr->environment(),
5714 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
5715 LOperand* input = instr->value();
5716 ASSERT(input->IsRegister());
5717 LOperand* result = instr->result();
5718 ASSERT(result->IsSIMD128Register());
5720 Register input_reg = ToRegister(input);
5721 XMMRegister result_reg = ToSIMD128Register(result);
5723 Condition cc = masm()->CheckSmi(input_reg);
5724 DeoptimizeIf(cc, instr->environment());
5725 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5726 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5727 DeoptimizeIf(not_equal, instr->environment());
5728 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
5732 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
5733 if (instr->representation().IsFloat32x4()) {
5734 HandleTaggedToSIMD128<Float32x4>(instr);
5736 ASSERT(instr->representation().IsInt32x4());
5737 HandleTaggedToSIMD128<Int32x4>(instr);
5742 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5743 LOperand* input = instr->value();
5744 ASSERT(input->IsDoubleRegister());
5745 LOperand* result = instr->result();
5746 ASSERT(result->IsRegister());
5748 XMMRegister input_reg = ToDoubleRegister(input);
5749 Register result_reg = ToRegister(result);
5751 if (instr->truncating()) {
5752 __ TruncateDoubleToI(result_reg, input_reg);
5754 Label bailout, done;
5755 XMMRegister xmm_scratch = double_scratch0();
5756 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5757 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5759 __ jmp(&done, Label::kNear);
5761 DeoptimizeIf(no_condition, instr->environment());
5767 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5768 LOperand* input = instr->value();
5769 ASSERT(input->IsDoubleRegister());
5770 LOperand* result = instr->result();
5771 ASSERT(result->IsRegister());
5773 XMMRegister input_reg = ToDoubleRegister(input);
5774 Register result_reg = ToRegister(result);
5776 Label bailout, done;
5777 XMMRegister xmm_scratch = double_scratch0();
5778 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5779 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5781 __ jmp(&done, Label::kNear);
5783 DeoptimizeIf(no_condition, instr->environment());
5786 __ Integer32ToSmi(result_reg, result_reg);
5787 DeoptimizeIf(overflow, instr->environment());
5791 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5792 LOperand* input = instr->value();
5793 Condition cc = masm()->CheckSmi(ToRegister(input));
5794 DeoptimizeIf(NegateCondition(cc), instr->environment());
5798 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5799 if (!instr->hydrogen()->value()->IsHeapObject()) {
5800 LOperand* input = instr->value();
5801 Condition cc = masm()->CheckSmi(ToRegister(input));
5802 DeoptimizeIf(cc, instr->environment());
5807 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5808 Register input = ToRegister(instr->value());
5810 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5812 if (instr->hydrogen()->is_interval_check()) {
5815 instr->hydrogen()->GetCheckInterval(&first, &last);
5817 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5818 Immediate(static_cast<int8_t>(first)));
5820 // If there is only one type in the interval check for equality.
5821 if (first == last) {
5822 DeoptimizeIf(not_equal, instr->environment());
5824 DeoptimizeIf(below, instr->environment());
5825 // Omit check for the last type.
5826 if (last != LAST_TYPE) {
5827 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5828 Immediate(static_cast<int8_t>(last)));
5829 DeoptimizeIf(above, instr->environment());
5835 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5837 if (IsPowerOf2(mask)) {
5838 ASSERT(tag == 0 || IsPowerOf2(tag));
5839 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5841 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5843 __ movzxbl(kScratchRegister,
5844 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5845 __ andb(kScratchRegister, Immediate(mask));
5846 __ cmpb(kScratchRegister, Immediate(tag));
5847 DeoptimizeIf(not_equal, instr->environment());
5853 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5854 Register reg = ToRegister(instr->value());
5855 __ Cmp(reg, instr->hydrogen()->object().handle());
5856 DeoptimizeIf(not_equal, instr->environment());
5860 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5862 PushSafepointRegistersScope scope(this);
5865 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5866 RecordSafepointWithRegisters(
5867 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5869 __ testq(rax, Immediate(kSmiTagMask));
5871 DeoptimizeIf(zero, instr->environment());
5875 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5876 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5878 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5879 : LDeferredCode(codegen), instr_(instr), object_(object) {
5880 SetExit(check_maps());
5882 virtual void Generate() V8_OVERRIDE {
5883 codegen()->DoDeferredInstanceMigration(instr_, object_);
5885 Label* check_maps() { return &check_maps_; }
5886 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5893 if (instr->hydrogen()->CanOmitMapChecks()) return;
5895 LOperand* input = instr->value();
5896 ASSERT(input->IsRegister());
5897 Register reg = ToRegister(input);
5899 DeferredCheckMaps* deferred = NULL;
5900 if (instr->hydrogen()->has_migration_target()) {
5901 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5902 __ bind(deferred->check_maps());
5905 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5907 for (int i = 0; i < map_set.size() - 1; i++) {
5908 Handle<Map> map = map_set.at(i).handle();
5909 __ CompareMap(reg, map);
5910 __ j(equal, &success, Label::kNear);
5913 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5914 __ CompareMap(reg, map);
5915 if (instr->hydrogen()->has_migration_target()) {
5916 __ j(not_equal, deferred->entry());
5918 DeoptimizeIf(not_equal, instr->environment());
5925 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5926 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5927 XMMRegister xmm_scratch = double_scratch0();
5928 Register result_reg = ToRegister(instr->result());
5929 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5933 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5934 ASSERT(instr->unclamped()->Equals(instr->result()));
5935 Register value_reg = ToRegister(instr->result());
5936 __ ClampUint8(value_reg);
5940 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5941 ASSERT(instr->unclamped()->Equals(instr->result()));
5942 Register input_reg = ToRegister(instr->unclamped());
5943 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5944 XMMRegister xmm_scratch = double_scratch0();
5945 Label is_smi, done, heap_number;
5946 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5947 __ JumpIfSmi(input_reg, &is_smi, dist);
5949 // Check for heap number
5950 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5951 factory()->heap_number_map());
5952 __ j(equal, &heap_number, Label::kNear);
5954 // Check for undefined. Undefined is converted to zero for clamping
5956 __ Cmp(input_reg, factory()->undefined_value());
5957 DeoptimizeIf(not_equal, instr->environment());
5958 __ movp(input_reg, Immediate(0));
5959 __ jmp(&done, Label::kNear);
5962 __ bind(&heap_number);
5963 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5964 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5965 __ jmp(&done, Label::kNear);
5969 __ SmiToInteger32(input_reg, input_reg);
5970 __ ClampUint8(input_reg);
5976 void LCodeGen::DoAllocate(LAllocate* instr) {
5977 class DeferredAllocate V8_FINAL : public LDeferredCode {
5979 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5980 : LDeferredCode(codegen), instr_(instr) { }
5981 virtual void Generate() V8_OVERRIDE {
5982 codegen()->DoDeferredAllocate(instr_);
5984 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5989 DeferredAllocate* deferred =
5990 new(zone()) DeferredAllocate(this, instr);
5992 Register result = ToRegister(instr->result());
5993 Register temp = ToRegister(instr->temp());
5995 // Allocate memory for the object.
5996 AllocationFlags flags = TAG_OBJECT;
5997 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5998 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6000 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6001 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6002 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6003 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6004 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6005 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6006 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6009 if (instr->size()->IsConstantOperand()) {
6010 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6011 if (size <= Page::kMaxRegularHeapObjectSize) {
6012 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6014 __ jmp(deferred->entry());
6017 Register size = ToRegister(instr->size());
6018 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6021 __ bind(deferred->exit());
6023 if (instr->hydrogen()->MustPrefillWithFiller()) {
6024 if (instr->size()->IsConstantOperand()) {
6025 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6026 __ movl(temp, Immediate((size / kPointerSize) - 1));
6028 temp = ToRegister(instr->size());
6029 __ sar(temp, Immediate(kPointerSizeLog2));
6034 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
6035 isolate()->factory()->one_pointer_filler_map());
6037 __ j(not_zero, &loop);
6042 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6043 Register result = ToRegister(instr->result());
6045 // TODO(3095996): Get rid of this. For now, we need to make the
6046 // result register contain a valid pointer because it is already
6047 // contained in the register pointer map.
6048 __ Move(result, Smi::FromInt(0));
6050 PushSafepointRegistersScope scope(this);
6051 if (instr->size()->IsRegister()) {
6052 Register size = ToRegister(instr->size());
6053 ASSERT(!size.is(result));
6054 __ Integer32ToSmi(size, size);
6057 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6058 __ Push(Smi::FromInt(size));
6062 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6063 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6064 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6065 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6066 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6067 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6068 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6070 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6072 __ Push(Smi::FromInt(flags));
6074 CallRuntimeFromDeferred(
6075 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
6076 __ StoreToSafepointRegisterSlot(result, rax);
6080 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6081 ASSERT(ToRegister(instr->value()).is(rax));
6083 CallRuntime(Runtime::kToFastProperties, 1, instr);
6087 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6088 ASSERT(ToRegister(instr->context()).is(rsi));
6090 // Registers will be used as follows:
6091 // rcx = literals array.
6092 // rbx = regexp literal.
6093 // rax = regexp literal clone.
6094 int literal_offset =
6095 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6096 __ Move(rcx, instr->hydrogen()->literals());
6097 __ movp(rbx, FieldOperand(rcx, literal_offset));
6098 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
6099 __ j(not_equal, &materialized, Label::kNear);
6101 // Create regexp literal using runtime function
6102 // Result will be in rax.
6104 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
6105 __ Push(instr->hydrogen()->pattern());
6106 __ Push(instr->hydrogen()->flags());
6107 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
6110 __ bind(&materialized);
6111 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6112 Label allocated, runtime_allocate;
6113 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
6114 __ jmp(&allocated, Label::kNear);
6116 __ bind(&runtime_allocate);
6118 __ Push(Smi::FromInt(size));
6119 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
6122 __ bind(&allocated);
6123 // Copy the content into the newly allocated memory.
6124 // (Unroll copy loop once for better throughput).
6125 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6126 __ movp(rdx, FieldOperand(rbx, i));
6127 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
6128 __ movp(FieldOperand(rax, i), rdx);
6129 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
6131 if ((size % (2 * kPointerSize)) != 0) {
6132 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
6133 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
6138 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6139 ASSERT(ToRegister(instr->context()).is(rsi));
6140 // Use the fast case closure allocation code that allocates in new
6141 // space for nested functions that don't need literals cloning.
6142 bool pretenure = instr->hydrogen()->pretenure();
6143 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6144 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
6145 instr->hydrogen()->is_generator());
6146 __ Move(rbx, instr->hydrogen()->shared_info());
6147 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6150 __ Push(instr->hydrogen()->shared_info());
6151 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
6152 Heap::kFalseValueRootIndex);
6153 CallRuntime(Runtime::kNewClosure, 3, instr);
6158 void LCodeGen::DoTypeof(LTypeof* instr) {
6159 ASSERT(ToRegister(instr->context()).is(rsi));
6160 LOperand* input = instr->value();
6161 EmitPushTaggedOperand(input);
6162 CallRuntime(Runtime::kTypeof, 1, instr);
6166 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
6167 ASSERT(!operand->IsDoubleRegister());
6168 if (operand->IsConstantOperand()) {
6169 __ Push(ToHandle(LConstantOperand::cast(operand)));
6170 } else if (operand->IsRegister()) {
6171 __ push(ToRegister(operand));
6173 __ push(ToOperand(operand));
6178 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6179 Register input = ToRegister(instr->value());
6180 Condition final_branch_condition = EmitTypeofIs(instr, input);
6181 if (final_branch_condition != no_condition) {
6182 EmitBranch(instr, final_branch_condition);
6187 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6188 Label* true_label = instr->TrueLabel(chunk_);
6189 Label* false_label = instr->FalseLabel(chunk_);
6190 Handle<String> type_name = instr->type_literal();
6191 int left_block = instr->TrueDestination(chunk_);
6192 int right_block = instr->FalseDestination(chunk_);
6193 int next_block = GetNextEmittedBlock();
6195 Label::Distance true_distance = left_block == next_block ? Label::kNear
6197 Label::Distance false_distance = right_block == next_block ? Label::kNear
6199 Condition final_branch_condition = no_condition;
6200 if (type_name->Equals(heap()->number_string())) {
6201 __ JumpIfSmi(input, true_label, true_distance);
6202 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
6203 Heap::kHeapNumberMapRootIndex);
6205 final_branch_condition = equal;
6207 } else if (type_name->Equals(heap()->float32x4_string())) {
6208 __ JumpIfSmi(input, false_label, false_distance);
6209 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6210 final_branch_condition = equal;
6212 } else if (type_name->Equals(heap()->int32x4_string())) {
6213 __ JumpIfSmi(input, false_label, false_distance);
6214 __ CmpObjectType(input, INT32x4_TYPE, input);
6215 final_branch_condition = equal;
6217 } else if (type_name->Equals(heap()->string_string())) {
6218 __ JumpIfSmi(input, false_label, false_distance);
6219 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6220 __ j(above_equal, false_label, false_distance);
6221 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6222 Immediate(1 << Map::kIsUndetectable));
6223 final_branch_condition = zero;
6225 } else if (type_name->Equals(heap()->symbol_string())) {
6226 __ JumpIfSmi(input, false_label, false_distance);
6227 __ CmpObjectType(input, SYMBOL_TYPE, input);
6228 final_branch_condition = equal;
6230 } else if (type_name->Equals(heap()->boolean_string())) {
6231 __ CompareRoot(input, Heap::kTrueValueRootIndex);
6232 __ j(equal, true_label, true_distance);
6233 __ CompareRoot(input, Heap::kFalseValueRootIndex);
6234 final_branch_condition = equal;
6236 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6237 __ CompareRoot(input, Heap::kNullValueRootIndex);
6238 final_branch_condition = equal;
6240 } else if (type_name->Equals(heap()->undefined_string())) {
6241 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
6242 __ j(equal, true_label, true_distance);
6243 __ JumpIfSmi(input, false_label, false_distance);
6244 // Check for undetectable objects => true.
6245 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
6246 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6247 Immediate(1 << Map::kIsUndetectable));
6248 final_branch_condition = not_zero;
6250 } else if (type_name->Equals(heap()->function_string())) {
6251 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6252 __ JumpIfSmi(input, false_label, false_distance);
6253 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6254 __ j(equal, true_label, true_distance);
6255 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6256 final_branch_condition = equal;
6258 } else if (type_name->Equals(heap()->object_string())) {
6259 __ JumpIfSmi(input, false_label, false_distance);
6260 if (!FLAG_harmony_typeof) {
6261 __ CompareRoot(input, Heap::kNullValueRootIndex);
6262 __ j(equal, true_label, true_distance);
6264 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6265 __ j(below, false_label, false_distance);
6266 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6267 __ j(above, false_label, false_distance);
6268 // Check for undetectable objects => false.
6269 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6270 Immediate(1 << Map::kIsUndetectable));
6271 final_branch_condition = zero;
6274 __ jmp(false_label, false_distance);
6277 return final_branch_condition;
6281 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6282 Register temp = ToRegister(instr->temp());
6284 EmitIsConstructCall(temp);
6285 EmitBranch(instr, equal);
6289 void LCodeGen::EmitIsConstructCall(Register temp) {
6290 // Get the frame pointer for the calling frame.
6291 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6293 // Skip the arguments adaptor frame if it exists.
6294 Label check_frame_marker;
6295 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6296 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6297 __ j(not_equal, &check_frame_marker, Label::kNear);
6298 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6300 // Check the marker in the calling frame.
6301 __ bind(&check_frame_marker);
6302 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6303 Smi::FromInt(StackFrame::CONSTRUCT));
6307 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6308 if (!info()->IsStub()) {
6309 // Ensure that we have enough space after the previous lazy-bailout
6310 // instruction for patching the code here.
6311 int current_pc = masm()->pc_offset();
6312 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6313 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6314 __ Nop(padding_size);
6317 last_lazy_deopt_pc_ = masm()->pc_offset();
6321 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6322 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6323 ASSERT(instr->HasEnvironment());
6324 LEnvironment* env = instr->environment();
6325 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6326 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6330 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6331 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6332 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6333 // needed return address), even though the implementation of LAZY and EAGER is
6334 // now identical. When LAZY is eventually completely folded into EAGER, remove
6335 // the special case below.
6336 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6337 type = Deoptimizer::LAZY;
6340 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6341 DeoptimizeIf(no_condition, instr->environment(), type);
6345 void LCodeGen::DoDummy(LDummy* instr) {
6346 // Nothing to see here, move on!
6350 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6351 // Nothing to see here, move on!
6355 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6356 PushSafepointRegistersScope scope(this);
6357 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6358 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6359 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
6360 ASSERT(instr->HasEnvironment());
6361 LEnvironment* env = instr->environment();
6362 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6366 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6367 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6369 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6370 : LDeferredCode(codegen), instr_(instr) { }
6371 virtual void Generate() V8_OVERRIDE {
6372 codegen()->DoDeferredStackCheck(instr_);
6374 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6376 LStackCheck* instr_;
6379 ASSERT(instr->HasEnvironment());
6380 LEnvironment* env = instr->environment();
6381 // There is no LLazyBailout instruction for stack-checks. We have to
6382 // prepare for lazy deoptimization explicitly here.
6383 if (instr->hydrogen()->is_function_entry()) {
6384 // Perform stack overflow check.
6386 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6387 __ j(above_equal, &done, Label::kNear);
6389 ASSERT(instr->context()->IsRegister());
6390 ASSERT(ToRegister(instr->context()).is(rsi));
6391 CallCode(isolate()->builtins()->StackCheck(),
6392 RelocInfo::CODE_TARGET,
6394 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6396 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6397 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6399 ASSERT(instr->hydrogen()->is_backwards_branch());
6400 // Perform stack overflow check if this goto needs it before jumping.
6401 DeferredStackCheck* deferred_stack_check =
6402 new(zone()) DeferredStackCheck(this, instr);
6403 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6404 __ j(below, deferred_stack_check->entry());
6405 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6406 __ bind(instr->done_label());
6407 deferred_stack_check->SetExit(instr->done_label());
6408 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6409 // Don't record a deoptimization index for the safepoint here.
6410 // This will be done explicitly when emitting call and the safepoint in
6411 // the deferred code.
6416 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6417 // This is a pseudo-instruction that ensures that the environment here is
6418 // properly registered for deoptimization and records the assembler's PC
6420 LEnvironment* environment = instr->environment();
6422 // If the environment were already registered, we would have no way of
6423 // backpatching it with the spill slot operands.
6424 ASSERT(!environment->HasBeenRegistered());
6425 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6427 GenerateOsrPrologue();
6431 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6432 ASSERT(ToRegister(instr->context()).is(rsi));
6433 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
6434 DeoptimizeIf(equal, instr->environment());
6436 Register null_value = rdi;
6437 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6438 __ cmpq(rax, null_value);
6439 DeoptimizeIf(equal, instr->environment());
6441 Condition cc = masm()->CheckSmi(rax);
6442 DeoptimizeIf(cc, instr->environment());
6444 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6445 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
6446 DeoptimizeIf(below_equal, instr->environment());
6448 Label use_cache, call_runtime;
6449 __ CheckEnumCache(null_value, &call_runtime);
6451 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
6452 __ jmp(&use_cache, Label::kNear);
6454 // Get the set of properties to enumerate.
6455 __ bind(&call_runtime);
6457 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6459 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
6460 Heap::kMetaMapRootIndex);
6461 DeoptimizeIf(not_equal, instr->environment());
6462 __ bind(&use_cache);
6466 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6467 Register map = ToRegister(instr->map());
6468 Register result = ToRegister(instr->result());
6469 Label load_cache, done;
6470 __ EnumLength(result, map);
6471 __ Cmp(result, Smi::FromInt(0));
6472 __ j(not_equal, &load_cache, Label::kNear);
6473 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
6474 __ jmp(&done, Label::kNear);
6475 __ bind(&load_cache);
6476 __ LoadInstanceDescriptors(map, result);
6478 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6480 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6482 Condition cc = masm()->CheckSmi(result);
6483 DeoptimizeIf(cc, instr->environment());
6487 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6488 Register object = ToRegister(instr->value());
6489 __ cmpq(ToRegister(instr->map()),
6490 FieldOperand(object, HeapObject::kMapOffset));
6491 DeoptimizeIf(not_equal, instr->environment());
6495 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6496 Register object = ToRegister(instr->object());
6497 Register index = ToRegister(instr->index());
6499 Label out_of_object, done;
6500 __ SmiToInteger32(index, index);
6501 __ cmpl(index, Immediate(0));
6502 __ j(less, &out_of_object, Label::kNear);
6503 __ movp(object, FieldOperand(object,
6506 JSObject::kHeaderSize));
6507 __ jmp(&done, Label::kNear);
6509 __ bind(&out_of_object);
6510 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6512 // Index is now equal to out of object property index plus 1.
6513 __ movp(object, FieldOperand(object,
6516 FixedArray::kHeaderSize - kPointerSize));
6523 } } // namespace v8::internal
6525 #endif // V8_TARGET_ARCH_X64