1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_X64
32 #include "x64/lithium-codegen-x64.h"
33 #include "code-stubs.h"
34 #include "stub-cache.h"
35 #include "hydrogen-osr.h"
41 // When invoking builtins, we need to record the safepoint in the middle of
42 // the invoke instruction sequence generated by the macro assembler.
43 class SafepointGenerator V8_FINAL : public CallWrapper {
45 SafepointGenerator(LCodeGen* codegen,
46 LPointerMap* pointers,
47 Safepoint::DeoptMode mode)
51 virtual ~SafepointGenerator() {}
53 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
55 virtual void AfterCall() const V8_OVERRIDE {
56 codegen_->RecordSafepoint(pointers_, deopt_mode_);
61 LPointerMap* pointers_;
62 Safepoint::DeoptMode deopt_mode_;
68 bool LCodeGen::GenerateCode() {
69 LPhase phase("Z_Code generation", chunk());
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // MANUAL indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::MANUAL);
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code> code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
91 PopulateDeoptimizationData(code);
92 info()->CommitDependencies(code);
96 void LChunkBuilder::Abort(BailoutReason reason) {
97 info()->set_bailout_reason(reason);
103 void LCodeGen::MakeSureStackPagesMapped(int offset) {
104 const int kPageSize = 4 * KB;
105 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
106 __ movp(Operand(rsp, offset), rax);
112 void LCodeGen::SaveCallerDoubles() {
113 ASSERT(info()->saves_caller_doubles());
114 ASSERT(NeedsEagerFrame());
115 Comment(";;; Save clobbered callee double registers");
117 BitVector* doubles = chunk()->allocated_double_registers();
118 BitVector::Iterator save_iterator(doubles);
119 while (!save_iterator.Done()) {
120 __ movsd(MemOperand(rsp, count * kDoubleSize),
121 XMMRegister::FromAllocationIndex(save_iterator.Current()));
122 save_iterator.Advance();
128 void LCodeGen::RestoreCallerDoubles() {
129 ASSERT(info()->saves_caller_doubles());
130 ASSERT(NeedsEagerFrame());
131 Comment(";;; Restore clobbered callee double registers");
132 BitVector* doubles = chunk()->allocated_double_registers();
133 BitVector::Iterator save_iterator(doubles);
135 while (!save_iterator.Done()) {
136 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
137 MemOperand(rsp, count * kDoubleSize));
138 save_iterator.Advance();
144 bool LCodeGen::GeneratePrologue() {
145 ASSERT(is_generating());
147 if (info()->IsOptimizing()) {
148 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
151 if (strlen(FLAG_stop_at) > 0 &&
152 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
157 // Sloppy mode functions need to replace the receiver with the global proxy
158 // when called as functions (without an explicit receiver object).
159 if (info_->this_has_uses() &&
160 info_->strict_mode() == SLOPPY &&
161 !info_->is_native()) {
163 StackArgumentsAccessor args(rsp, scope()->num_parameters());
164 __ movp(rcx, args.GetReceiverOperand());
166 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
167 __ j(not_equal, &ok, Label::kNear);
169 __ movp(rcx, GlobalObjectOperand());
170 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
172 __ movp(args.GetReceiverOperand(), rcx);
178 info()->set_prologue_offset(masm_->pc_offset());
179 if (NeedsEagerFrame()) {
180 ASSERT(!frame_is_built_);
181 frame_is_built_ = true;
182 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
183 info()->AddNoFrameRange(0, masm_->pc_offset());
186 // Reserve space for the stack slots needed by the code.
187 int slots = GetStackSlotCount();
189 if (FLAG_debug_code) {
190 __ subp(rsp, Immediate(slots * kPointerSize));
192 MakeSureStackPagesMapped(slots * kPointerSize);
196 __ movq(kScratchRegister, kSlotsZapValue);
199 __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
202 __ j(not_zero, &loop);
205 __ subp(rsp, Immediate(slots * kPointerSize));
207 MakeSureStackPagesMapped(slots * kPointerSize);
211 if (info()->saves_caller_doubles()) {
216 // Possibly allocate a local context.
217 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
218 if (heap_slots > 0) {
219 Comment(";;; Allocate local context");
220 // Argument to NewContext is the function, which is still in rdi.
221 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
222 FastNewContextStub stub(heap_slots);
226 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
228 RecordSafepoint(Safepoint::kNoLazyDeopt);
229 // Context is returned in rax. It replaces the context passed to us.
230 // It's saved in the stack and kept live in rsi.
232 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
234 // Copy any necessary parameters into the context.
235 int num_parameters = scope()->num_parameters();
236 for (int i = 0; i < num_parameters; i++) {
237 Variable* var = scope()->parameter(i);
238 if (var->IsContextSlot()) {
239 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
240 (num_parameters - 1 - i) * kPointerSize;
241 // Load parameter from stack.
242 __ movp(rax, Operand(rbp, parameter_offset));
243 // Store it in the context.
244 int context_offset = Context::SlotOffset(var->index());
245 __ movp(Operand(rsi, context_offset), rax);
246 // Update the write barrier. This clobbers rax and rbx.
247 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
250 Comment(";;; End allocate local context");
254 if (FLAG_trace && info()->IsOptimizing()) {
255 __ CallRuntime(Runtime::kTraceEnter, 0);
257 return !is_aborted();
261 void LCodeGen::GenerateOsrPrologue() {
262 // Generate the OSR entry prologue at the first unknown OSR value, or if there
263 // are none, at the OSR entrypoint instruction.
264 if (osr_pc_offset_ >= 0) return;
266 osr_pc_offset_ = masm()->pc_offset();
268 // Adjust the frame size, subsuming the unoptimized frame into the
270 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
272 __ subp(rsp, Immediate(slots * kPointerSize));
276 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
277 if (instr->IsCall()) {
278 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
280 if (!instr->IsLazyBailout() && !instr->IsGap()) {
281 safepoints_.BumpLastLazySafepointIndex();
286 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
287 if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
288 if (instr->result()->IsRegister()) {
289 Register result_reg = ToRegister(instr->result());
290 __ movsxlq(result_reg, result_reg);
292 // Sign extend the 32bit result in the stack slots.
293 ASSERT(instr->result()->IsStackSlot());
294 Operand src = ToOperand(instr->result());
295 __ movsxlq(kScratchRegister, src);
296 __ movq(src, kScratchRegister);
302 bool LCodeGen::GenerateJumpTable() {
304 if (jump_table_.length() > 0) {
305 Comment(";;; -------------------- Jump table --------------------");
307 for (int i = 0; i < jump_table_.length(); i++) {
308 __ bind(&jump_table_[i].label);
309 Address entry = jump_table_[i].address;
310 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
311 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
312 if (id == Deoptimizer::kNotDeoptimizationEntry) {
313 Comment(";;; jump table entry %d.", i);
315 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
317 if (jump_table_[i].needs_frame) {
318 ASSERT(!info()->saves_caller_doubles());
319 __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
320 if (needs_frame.is_bound()) {
321 __ jmp(&needs_frame);
323 __ bind(&needs_frame);
324 __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
328 // This variant of deopt can only be used with stubs. Since we don't
329 // have a function pointer to install in the stack frame that we're
330 // building, install a special marker there instead.
331 ASSERT(info()->IsStub());
332 __ Move(rsi, Smi::FromInt(StackFrame::STUB));
334 __ movp(rsi, MemOperand(rsp, kPointerSize));
335 __ call(kScratchRegister);
338 if (info()->saves_caller_doubles()) {
339 ASSERT(info()->IsStub());
340 RestoreCallerDoubles();
342 __ call(entry, RelocInfo::RUNTIME_ENTRY);
345 return !is_aborted();
349 bool LCodeGen::GenerateDeferredCode() {
350 ASSERT(is_generating());
351 if (deferred_.length() > 0) {
352 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
353 LDeferredCode* code = deferred_[i];
356 instructions_->at(code->instruction_index())->hydrogen_value();
357 RecordAndWritePosition(
358 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
360 Comment(";;; <@%d,#%d> "
361 "-------------------- Deferred %s --------------------",
362 code->instruction_index(),
363 code->instr()->hydrogen_value()->id(),
364 code->instr()->Mnemonic());
365 __ bind(code->entry());
366 if (NeedsDeferredFrame()) {
367 Comment(";;; Build frame");
368 ASSERT(!frame_is_built_);
369 ASSERT(info()->IsStub());
370 frame_is_built_ = true;
371 // Build the frame in such a way that esi isn't trashed.
372 __ pushq(rbp); // Caller's frame pointer.
373 __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
374 __ Push(Smi::FromInt(StackFrame::STUB));
375 __ leap(rbp, Operand(rsp, 2 * kPointerSize));
376 Comment(";;; Deferred code");
379 if (NeedsDeferredFrame()) {
380 __ bind(code->done());
381 Comment(";;; Destroy frame");
382 ASSERT(frame_is_built_);
383 frame_is_built_ = false;
387 __ jmp(code->exit());
391 // Deferred code is the last part of the instruction sequence. Mark
392 // the generated code as done unless we bailed out.
393 if (!is_aborted()) status_ = DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
405 Register LCodeGen::ToRegister(int index) const {
406 return Register::FromAllocationIndex(index);
410 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
411 return XMMRegister::FromAllocationIndex(index);
415 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
416 return XMMRegister::FromAllocationIndex(index);
420 Register LCodeGen::ToRegister(LOperand* op) const {
421 ASSERT(op->IsRegister());
422 return ToRegister(op->index());
426 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
427 ASSERT(op->IsDoubleRegister());
428 return ToDoubleRegister(op->index());
432 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
433 ASSERT(op->IsFloat32x4Register());
434 return ToSIMD128Register(op->index());
438 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
439 ASSERT(op->IsInt32x4Register());
440 return ToSIMD128Register(op->index());
444 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
445 ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
446 return ToSIMD128Register(op->index());
450 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
451 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
455 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
456 return op->IsConstantOperand() &&
457 chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
461 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
462 return chunk_->LookupLiteralRepresentation(op).IsSmi();
466 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
467 HConstant* constant = chunk_->LookupConstant(op);
468 return constant->Integer32Value();
472 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
473 HConstant* constant = chunk_->LookupConstant(op);
474 return Smi::FromInt(constant->Integer32Value());
478 double LCodeGen::ToDouble(LConstantOperand* op) const {
479 HConstant* constant = chunk_->LookupConstant(op);
480 ASSERT(constant->HasDoubleValue());
481 return constant->DoubleValue();
485 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
486 HConstant* constant = chunk_->LookupConstant(op);
487 ASSERT(constant->HasExternalReferenceValue());
488 return constant->ExternalReferenceValue();
492 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
493 HConstant* constant = chunk_->LookupConstant(op);
494 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
495 return constant->handle(isolate());
499 static int ArgumentsOffsetWithoutFrame(int index) {
501 return -(index + 1) * kPointerSize + kPCOnStackSize;
505 Operand LCodeGen::ToOperand(LOperand* op) const {
506 // Does not handle registers. In X64 assembler, plain registers are not
507 // representable as an Operand.
508 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
509 op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
510 if (NeedsEagerFrame()) {
511 return Operand(rbp, StackSlotOffset(op->index()));
513 // Retrieve parameter without eager stack-frame relative to the
515 return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
520 void LCodeGen::WriteTranslation(LEnvironment* environment,
521 Translation* translation) {
522 if (environment == NULL) return;
524 // The translation includes one command per value in the environment.
525 int translation_size = environment->translation_size();
526 // The output frame height does not include the parameters.
527 int height = translation_size - environment->parameter_count();
529 WriteTranslation(environment->outer(), translation);
530 bool has_closure_id = !info()->closure().is_null() &&
531 !info()->closure().is_identical_to(environment->closure());
532 int closure_id = has_closure_id
533 ? DefineDeoptimizationLiteral(environment->closure())
534 : Translation::kSelfLiteralId;
536 switch (environment->frame_type()) {
538 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
541 translation->BeginConstructStubFrame(closure_id, translation_size);
544 ASSERT(translation_size == 1);
546 translation->BeginGetterStubFrame(closure_id);
549 ASSERT(translation_size == 2);
551 translation->BeginSetterStubFrame(closure_id);
553 case ARGUMENTS_ADAPTOR:
554 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
557 translation->BeginCompiledStubFrame();
561 int object_index = 0;
562 int dematerialized_index = 0;
563 for (int i = 0; i < translation_size; ++i) {
564 LOperand* value = environment->values()->at(i);
565 AddToTranslation(environment,
568 environment->HasTaggedValueAt(i),
569 environment->HasUint32ValueAt(i),
571 &dematerialized_index);
576 void LCodeGen::AddToTranslation(LEnvironment* environment,
577 Translation* translation,
581 int* object_index_pointer,
582 int* dematerialized_index_pointer) {
583 if (op == LEnvironment::materialization_marker()) {
584 int object_index = (*object_index_pointer)++;
585 if (environment->ObjectIsDuplicateAt(object_index)) {
586 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
587 translation->DuplicateObject(dupe_of);
590 int object_length = environment->ObjectLengthAt(object_index);
591 if (environment->ObjectIsArgumentsAt(object_index)) {
592 translation->BeginArgumentsObject(object_length);
594 translation->BeginCapturedObject(object_length);
596 int dematerialized_index = *dematerialized_index_pointer;
597 int env_offset = environment->translation_size() + dematerialized_index;
598 *dematerialized_index_pointer += object_length;
599 for (int i = 0; i < object_length; ++i) {
600 LOperand* value = environment->values()->at(env_offset + i);
601 AddToTranslation(environment,
604 environment->HasTaggedValueAt(env_offset + i),
605 environment->HasUint32ValueAt(env_offset + i),
606 object_index_pointer,
607 dematerialized_index_pointer);
612 if (op->IsStackSlot()) {
614 translation->StoreStackSlot(op->index());
615 } else if (is_uint32) {
616 translation->StoreUint32StackSlot(op->index());
618 translation->StoreInt32StackSlot(op->index());
620 } else if (op->IsDoubleStackSlot()) {
621 translation->StoreDoubleStackSlot(op->index());
622 } else if (op->IsFloat32x4StackSlot()) {
623 translation->StoreSIMD128StackSlot(op->index(),
624 Translation::FLOAT32x4_STACK_SLOT);
625 } else if (op->IsInt32x4StackSlot()) {
626 translation->StoreSIMD128StackSlot(op->index(),
627 Translation::INT32x4_STACK_SLOT);
628 } else if (op->IsRegister()) {
629 Register reg = ToRegister(op);
631 translation->StoreRegister(reg);
632 } else if (is_uint32) {
633 translation->StoreUint32Register(reg);
635 translation->StoreInt32Register(reg);
637 } else if (op->IsDoubleRegister()) {
638 XMMRegister reg = ToDoubleRegister(op);
639 translation->StoreDoubleRegister(reg);
640 } else if (op->IsFloat32x4Register()) {
641 XMMRegister reg = ToFloat32x4Register(op);
642 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
643 } else if (op->IsInt32x4Register()) {
644 XMMRegister reg = ToInt32x4Register(op);
645 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
646 } else if (op->IsConstantOperand()) {
647 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
648 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
649 translation->StoreLiteral(src_index);
656 void LCodeGen::CallCodeGeneric(Handle<Code> code,
657 RelocInfo::Mode mode,
659 SafepointMode safepoint_mode,
661 ASSERT(instr != NULL);
663 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
665 // Signal that we don't inline smi code before these stubs in the
666 // optimizing code generator.
667 if (code->kind() == Code::BINARY_OP_IC ||
668 code->kind() == Code::COMPARE_IC) {
674 void LCodeGen::CallCode(Handle<Code> code,
675 RelocInfo::Mode mode,
676 LInstruction* instr) {
677 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
681 void LCodeGen::CallRuntime(const Runtime::Function* function,
684 SaveFPRegsMode save_doubles) {
685 ASSERT(instr != NULL);
686 ASSERT(instr->HasPointerMap());
688 __ CallRuntime(function, num_arguments, save_doubles);
690 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
694 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
695 if (context->IsRegister()) {
696 if (!ToRegister(context).is(rsi)) {
697 __ movp(rsi, ToRegister(context));
699 } else if (context->IsStackSlot()) {
700 __ movp(rsi, ToOperand(context));
701 } else if (context->IsConstantOperand()) {
702 HConstant* constant =
703 chunk_->LookupConstant(LConstantOperand::cast(context));
704 __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
712 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
716 LoadContextFromDeferred(context);
718 __ CallRuntimeSaveDoubles(id);
719 RecordSafepointWithRegisters(
720 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
724 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
725 Safepoint::DeoptMode mode) {
726 if (!environment->HasBeenRegistered()) {
727 // Physical stack frame layout:
728 // -x ............. -4 0 ..................................... y
729 // [incoming arguments] [spill slots] [pushed outgoing arguments]
731 // Layout of the environment:
732 // 0 ..................................................... size-1
733 // [parameters] [locals] [expression stack including arguments]
735 // Layout of the translation:
736 // 0 ........................................................ size - 1 + 4
737 // [expression stack including arguments] [locals] [4 words] [parameters]
738 // |>------------ translation_size ------------<|
741 int jsframe_count = 0;
742 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
744 if (e->frame_type() == JS_FUNCTION) {
748 Translation translation(&translations_, frame_count, jsframe_count, zone());
749 WriteTranslation(environment, &translation);
750 int deoptimization_index = deoptimizations_.length();
751 int pc_offset = masm()->pc_offset();
752 environment->Register(deoptimization_index,
754 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
755 deoptimizations_.Add(environment, environment->zone());
760 void LCodeGen::DeoptimizeIf(Condition cc,
761 LEnvironment* environment,
762 Deoptimizer::BailoutType bailout_type) {
763 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
764 ASSERT(environment->HasBeenRegistered());
765 int id = environment->deoptimization_index();
766 ASSERT(info()->IsOptimizing() || info()->IsStub());
768 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
770 Abort(kBailoutWasNotPrepared);
774 if (DeoptEveryNTimes()) {
775 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
779 Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
780 __ movl(rax, count_operand);
781 __ subl(rax, Immediate(1));
782 __ j(not_zero, &no_deopt, Label::kNear);
783 if (FLAG_trap_on_deopt) __ int3();
784 __ movl(rax, Immediate(FLAG_deopt_every_n_times));
785 __ movl(count_operand, rax);
788 ASSERT(frame_is_built_);
789 __ call(entry, RelocInfo::RUNTIME_ENTRY);
791 __ movl(count_operand, rax);
796 if (info()->ShouldTrapOnDeopt()) {
798 if (cc != no_condition) {
799 __ j(NegateCondition(cc), &done, Label::kNear);
805 ASSERT(info()->IsStub() || frame_is_built_);
806 // Go through jump table if we need to handle condition, build frame, or
807 // restore caller doubles.
808 if (cc == no_condition && frame_is_built_ &&
809 !info()->saves_caller_doubles()) {
810 __ call(entry, RelocInfo::RUNTIME_ENTRY);
812 // We often have several deopts to the same entry, reuse the last
813 // jump entry if this is the case.
814 if (jump_table_.is_empty() ||
815 jump_table_.last().address != entry ||
816 jump_table_.last().needs_frame != !frame_is_built_ ||
817 jump_table_.last().bailout_type != bailout_type) {
818 Deoptimizer::JumpTableEntry table_entry(entry,
821 jump_table_.Add(table_entry, zone());
823 if (cc == no_condition) {
824 __ jmp(&jump_table_.last().label);
826 __ j(cc, &jump_table_.last().label);
832 void LCodeGen::DeoptimizeIf(Condition cc,
833 LEnvironment* environment) {
834 Deoptimizer::BailoutType bailout_type = info()->IsStub()
836 : Deoptimizer::EAGER;
837 DeoptimizeIf(cc, environment, bailout_type);
841 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
842 int length = deoptimizations_.length();
843 if (length == 0) return;
844 Handle<DeoptimizationInputData> data =
845 factory()->NewDeoptimizationInputData(length, TENURED);
847 Handle<ByteArray> translations =
848 translations_.CreateByteArray(isolate()->factory());
849 data->SetTranslationByteArray(*translations);
850 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
851 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
852 if (info_->IsOptimizing()) {
853 // Reference to shared function info does not change between phases.
854 AllowDeferredHandleDereference allow_handle_dereference;
855 data->SetSharedFunctionInfo(*info_->shared_info());
857 data->SetSharedFunctionInfo(Smi::FromInt(0));
860 Handle<FixedArray> literals =
861 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
862 { AllowDeferredHandleDereference copy_handles;
863 for (int i = 0; i < deoptimization_literals_.length(); i++) {
864 literals->set(i, *deoptimization_literals_[i]);
866 data->SetLiteralArray(*literals);
869 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
870 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
872 // Populate the deoptimization entries.
873 for (int i = 0; i < length; i++) {
874 LEnvironment* env = deoptimizations_[i];
875 data->SetAstId(i, env->ast_id());
876 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
877 data->SetArgumentsStackHeight(i,
878 Smi::FromInt(env->arguments_stack_height()));
879 data->SetPc(i, Smi::FromInt(env->pc_offset()));
881 code->set_deoptimization_data(*data);
885 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
886 int result = deoptimization_literals_.length();
887 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
888 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
890 deoptimization_literals_.Add(literal, zone());
895 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
896 ASSERT(deoptimization_literals_.length() == 0);
898 const ZoneList<Handle<JSFunction> >* inlined_closures =
899 chunk()->inlined_closures();
901 for (int i = 0, length = inlined_closures->length();
904 DefineDeoptimizationLiteral(inlined_closures->at(i));
907 inlined_function_count_ = deoptimization_literals_.length();
911 void LCodeGen::RecordSafepointWithLazyDeopt(
912 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
913 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
914 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
916 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
917 RecordSafepointWithRegisters(
918 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
923 void LCodeGen::RecordSafepoint(
924 LPointerMap* pointers,
925 Safepoint::Kind kind,
927 Safepoint::DeoptMode deopt_mode) {
928 ASSERT(kind == expected_safepoint_kind_);
930 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
932 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
933 kind, arguments, deopt_mode);
934 for (int i = 0; i < operands->length(); i++) {
935 LOperand* pointer = operands->at(i);
936 if (pointer->IsStackSlot()) {
937 safepoint.DefinePointerSlot(pointer->index(), zone());
938 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
939 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
945 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
946 Safepoint::DeoptMode deopt_mode) {
947 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
951 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
952 LPointerMap empty_pointers(zone());
953 RecordSafepoint(&empty_pointers, deopt_mode);
957 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
959 Safepoint::DeoptMode deopt_mode) {
960 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
964 void LCodeGen::RecordAndWritePosition(int position) {
965 if (position == RelocInfo::kNoPosition) return;
966 masm()->positions_recorder()->RecordPosition(position);
967 masm()->positions_recorder()->WriteRecordedPositions();
971 static const char* LabelType(LLabel* label) {
972 if (label->is_loop_header()) return " (loop header)";
973 if (label->is_osr_entry()) return " (OSR entry)";
978 void LCodeGen::DoLabel(LLabel* label) {
979 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
980 current_instruction_,
981 label->hydrogen_value()->id(),
984 __ bind(label->label());
985 current_block_ = label->block_id();
990 void LCodeGen::DoParallelMove(LParallelMove* move) {
991 resolver_.Resolve(move);
995 void LCodeGen::DoGap(LGap* gap) {
996 for (int i = LGap::FIRST_INNER_POSITION;
997 i <= LGap::LAST_INNER_POSITION;
999 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1000 LParallelMove* move = gap->GetParallelMove(inner_pos);
1001 if (move != NULL) DoParallelMove(move);
1006 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1011 void LCodeGen::DoParameter(LParameter* instr) {
1016 void LCodeGen::DoCallStub(LCallStub* instr) {
1017 ASSERT(ToRegister(instr->context()).is(rsi));
1018 ASSERT(ToRegister(instr->result()).is(rax));
1019 switch (instr->hydrogen()->major_key()) {
1020 case CodeStub::RegExpExec: {
1021 RegExpExecStub stub;
1022 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1025 case CodeStub::SubString: {
1027 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1030 case CodeStub::StringCompare: {
1031 StringCompareStub stub;
1032 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1041 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1042 GenerateOsrPrologue();
1046 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1047 Register dividend = ToRegister(instr->dividend());
1048 int32_t divisor = instr->divisor();
1049 ASSERT(dividend.is(ToRegister(instr->result())));
1051 // Theoretically, a variation of the branch-free code for integer division by
1052 // a power of 2 (calculating the remainder via an additional multiplication
1053 // (which gets simplified to an 'and') and subtraction) should be faster, and
1054 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1055 // indicate that positive dividends are heavily favored, so the branching
1056 // version performs better.
1057 HMod* hmod = instr->hydrogen();
1058 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1059 Label dividend_is_not_negative, done;
1060 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1061 __ testl(dividend, dividend);
1062 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1063 // Note that this is correct even for kMinInt operands.
1065 __ andl(dividend, Immediate(mask));
1067 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1068 DeoptimizeIf(zero, instr->environment());
1070 __ jmp(&done, Label::kNear);
1073 __ bind(÷nd_is_not_negative);
1074 __ andl(dividend, Immediate(mask));
1079 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1080 Register dividend = ToRegister(instr->dividend());
1081 int32_t divisor = instr->divisor();
1082 ASSERT(ToRegister(instr->result()).is(rax));
1085 DeoptimizeIf(no_condition, instr->environment());
1089 __ TruncatingDiv(dividend, Abs(divisor));
1090 __ imull(rdx, rdx, Immediate(Abs(divisor)));
1091 __ movl(rax, dividend);
1094 // Check for negative zero.
1095 HMod* hmod = instr->hydrogen();
1096 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1097 Label remainder_not_zero;
1098 __ j(not_zero, &remainder_not_zero, Label::kNear);
1099 __ cmpl(dividend, Immediate(0));
1100 DeoptimizeIf(less, instr->environment());
1101 __ bind(&remainder_not_zero);
1106 void LCodeGen::DoModI(LModI* instr) {
1107 HMod* hmod = instr->hydrogen();
1109 Register left_reg = ToRegister(instr->left());
1110 ASSERT(left_reg.is(rax));
1111 Register right_reg = ToRegister(instr->right());
1112 ASSERT(!right_reg.is(rax));
1113 ASSERT(!right_reg.is(rdx));
1114 Register result_reg = ToRegister(instr->result());
1115 ASSERT(result_reg.is(rdx));
1118 // Check for x % 0, idiv would signal a divide error. We have to
1119 // deopt in this case because we can't return a NaN.
1120 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1121 __ testl(right_reg, right_reg);
1122 DeoptimizeIf(zero, instr->environment());
1125 // Check for kMinInt % -1, idiv would signal a divide error. We
1126 // have to deopt if we care about -0, because we can't return that.
1127 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1128 Label no_overflow_possible;
1129 __ cmpl(left_reg, Immediate(kMinInt));
1130 __ j(not_zero, &no_overflow_possible, Label::kNear);
1131 __ cmpl(right_reg, Immediate(-1));
1132 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1133 DeoptimizeIf(equal, instr->environment());
1135 __ j(not_equal, &no_overflow_possible, Label::kNear);
1136 __ Set(result_reg, 0);
1137 __ jmp(&done, Label::kNear);
1139 __ bind(&no_overflow_possible);
1142 // Sign extend dividend in eax into edx:eax, since we are using only the low
1143 // 32 bits of the values.
1146 // If we care about -0, test if the dividend is <0 and the result is 0.
1147 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1148 Label positive_left;
1149 __ testl(left_reg, left_reg);
1150 __ j(not_sign, &positive_left, Label::kNear);
1151 __ idivl(right_reg);
1152 __ testl(result_reg, result_reg);
1153 DeoptimizeIf(zero, instr->environment());
1154 __ jmp(&done, Label::kNear);
1155 __ bind(&positive_left);
1157 __ idivl(right_reg);
1162 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1163 Register dividend = ToRegister(instr->dividend());
1164 int32_t divisor = instr->divisor();
1165 ASSERT(dividend.is(ToRegister(instr->result())));
1167 // If the divisor is positive, things are easy: There can be no deopts and we
1168 // can simply do an arithmetic right shift.
1169 if (divisor == 1) return;
1170 int32_t shift = WhichPowerOf2Abs(divisor);
1172 __ sarl(dividend, Immediate(shift));
1176 // If the divisor is negative, we have to negate and handle edge cases.
1177 Label not_kmin_int, done;
1179 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1180 DeoptimizeIf(zero, instr->environment());
1182 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1183 // Note that we could emit branch-free code, but that would need one more
1185 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1186 if (divisor == -1) {
1187 DeoptimizeIf(no_condition, instr->environment());
1189 __ movl(dividend, Immediate(kMinInt / divisor));
1190 __ jmp(&done, Label::kNear);
1193 __ bind(¬_kmin_int);
1194 __ sarl(dividend, Immediate(shift));
1199 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1200 Register dividend = ToRegister(instr->dividend());
1201 int32_t divisor = instr->divisor();
1202 ASSERT(ToRegister(instr->result()).is(rdx));
1205 DeoptimizeIf(no_condition, instr->environment());
1209 // Check for (0 / -x) that will produce negative zero.
1210 HMathFloorOfDiv* hdiv = instr->hydrogen();
1211 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1212 __ testl(dividend, dividend);
1213 DeoptimizeIf(zero, instr->environment());
1216 // Easy case: We need no dynamic check for the dividend and the flooring
1217 // division is the same as the truncating division.
1218 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1219 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1220 __ TruncatingDiv(dividend, Abs(divisor));
1221 if (divisor < 0) __ negl(rdx);
1225 // In the general case we may need to adjust before and after the truncating
1226 // division to get a flooring division.
1227 Register temp = ToRegister(instr->temp3());
1228 ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1229 Label needs_adjustment, done;
1230 __ cmpl(dividend, Immediate(0));
1231 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1232 __ TruncatingDiv(dividend, Abs(divisor));
1233 if (divisor < 0) __ negl(rdx);
1234 __ jmp(&done, Label::kNear);
1235 __ bind(&needs_adjustment);
1236 __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1237 __ TruncatingDiv(temp, Abs(divisor));
1238 if (divisor < 0) __ negl(rdx);
1244 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1245 Register dividend = ToRegister(instr->dividend());
1246 int32_t divisor = instr->divisor();
1247 Register result = ToRegister(instr->result());
1248 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1249 ASSERT(!result.is(dividend));
1251 // Check for (0 / -x) that will produce negative zero.
1252 HDiv* hdiv = instr->hydrogen();
1253 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1254 __ testl(dividend, dividend);
1255 DeoptimizeIf(zero, instr->environment());
1257 // Check for (kMinInt / -1).
1258 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1259 __ cmpl(dividend, Immediate(kMinInt));
1260 DeoptimizeIf(zero, instr->environment());
1262 // Deoptimize if remainder will not be 0.
1263 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1264 divisor != 1 && divisor != -1) {
1265 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1266 __ testl(dividend, Immediate(mask));
1267 DeoptimizeIf(not_zero, instr->environment());
1269 __ Move(result, dividend);
1270 int32_t shift = WhichPowerOf2Abs(divisor);
1272 // The arithmetic shift is always OK, the 'if' is an optimization only.
1273 if (shift > 1) __ sarl(result, Immediate(31));
1274 __ shrl(result, Immediate(32 - shift));
1275 __ addl(result, dividend);
1276 __ sarl(result, Immediate(shift));
1278 if (divisor < 0) __ negl(result);
1282 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1283 Register dividend = ToRegister(instr->dividend());
1284 int32_t divisor = instr->divisor();
1285 ASSERT(ToRegister(instr->result()).is(rdx));
1288 DeoptimizeIf(no_condition, instr->environment());
1292 // Check for (0 / -x) that will produce negative zero.
1293 HDiv* hdiv = instr->hydrogen();
1294 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1295 __ testl(dividend, dividend);
1296 DeoptimizeIf(zero, instr->environment());
1299 __ TruncatingDiv(dividend, Abs(divisor));
1300 if (divisor < 0) __ negp(rdx);
1302 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1304 __ imull(rax, rax, Immediate(divisor));
1305 __ subl(rax, dividend);
1306 DeoptimizeIf(not_equal, instr->environment());
1311 void LCodeGen::DoDivI(LDivI* instr) {
1312 HBinaryOperation* hdiv = instr->hydrogen();
1313 Register dividend = ToRegister(instr->left());
1314 Register divisor = ToRegister(instr->right());
1315 Register remainder = ToRegister(instr->temp());
1316 Register result = ToRegister(instr->result());
1317 ASSERT(dividend.is(rax));
1318 ASSERT(remainder.is(rdx));
1319 ASSERT(result.is(rax));
1320 ASSERT(!divisor.is(rax));
1321 ASSERT(!divisor.is(rdx));
1324 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1325 __ testl(divisor, divisor);
1326 DeoptimizeIf(zero, instr->environment());
1329 // Check for (0 / -x) that will produce negative zero.
1330 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1331 Label dividend_not_zero;
1332 __ testl(dividend, dividend);
1333 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1334 __ testl(divisor, divisor);
1335 DeoptimizeIf(sign, instr->environment());
1336 __ bind(÷nd_not_zero);
1339 // Check for (kMinInt / -1).
1340 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1341 Label dividend_not_min_int;
1342 __ cmpl(dividend, Immediate(kMinInt));
1343 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1344 __ cmpl(divisor, Immediate(-1));
1345 DeoptimizeIf(zero, instr->environment());
1346 __ bind(÷nd_not_min_int);
1349 // Sign extend to rdx (= remainder).
1353 if (hdiv->IsMathFloorOfDiv()) {
1355 __ testl(remainder, remainder);
1356 __ j(zero, &done, Label::kNear);
1357 __ xorl(remainder, divisor);
1358 __ sarl(remainder, Immediate(31));
1359 __ addl(result, remainder);
1361 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1362 // Deoptimize if remainder is not 0.
1363 __ testl(remainder, remainder);
1364 DeoptimizeIf(not_zero, instr->environment());
1369 void LCodeGen::DoMulI(LMulI* instr) {
1370 Register left = ToRegister(instr->left());
1371 LOperand* right = instr->right();
1373 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1374 if (instr->hydrogen_value()->representation().IsSmi()) {
1375 __ movp(kScratchRegister, left);
1377 __ movl(kScratchRegister, left);
1382 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1383 if (right->IsConstantOperand()) {
1384 int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1385 if (right_value == -1) {
1387 } else if (right_value == 0) {
1388 __ xorl(left, left);
1389 } else if (right_value == 2) {
1390 __ addl(left, left);
1391 } else if (!can_overflow) {
1392 // If the multiplication is known to not overflow, we
1393 // can use operations that don't set the overflow flag
1395 switch (right_value) {
1400 __ leal(left, Operand(left, left, times_2, 0));
1403 __ shll(left, Immediate(2));
1406 __ leal(left, Operand(left, left, times_4, 0));
1409 __ shll(left, Immediate(3));
1412 __ leal(left, Operand(left, left, times_8, 0));
1415 __ shll(left, Immediate(4));
1418 __ imull(left, left, Immediate(right_value));
1422 __ imull(left, left, Immediate(right_value));
1424 } else if (right->IsStackSlot()) {
1425 if (instr->hydrogen_value()->representation().IsSmi()) {
1426 __ SmiToInteger64(left, left);
1427 __ imulp(left, ToOperand(right));
1429 __ imull(left, ToOperand(right));
1432 if (instr->hydrogen_value()->representation().IsSmi()) {
1433 __ SmiToInteger64(left, left);
1434 __ imulp(left, ToRegister(right));
1436 __ imull(left, ToRegister(right));
1441 DeoptimizeIf(overflow, instr->environment());
1444 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1445 // Bail out if the result is supposed to be negative zero.
1447 if (instr->hydrogen_value()->representation().IsSmi()) {
1448 __ testp(left, left);
1450 __ testl(left, left);
1452 __ j(not_zero, &done, Label::kNear);
1453 if (right->IsConstantOperand()) {
1454 // Constant can't be represented as Smi due to immediate size limit.
1455 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1456 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1457 DeoptimizeIf(no_condition, instr->environment());
1458 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1459 __ cmpl(kScratchRegister, Immediate(0));
1460 DeoptimizeIf(less, instr->environment());
1462 } else if (right->IsStackSlot()) {
1463 if (instr->hydrogen_value()->representation().IsSmi()) {
1464 __ orp(kScratchRegister, ToOperand(right));
1466 __ orl(kScratchRegister, ToOperand(right));
1468 DeoptimizeIf(sign, instr->environment());
1470 // Test the non-zero operand for negative sign.
1471 if (instr->hydrogen_value()->representation().IsSmi()) {
1472 __ orp(kScratchRegister, ToRegister(right));
1474 __ orl(kScratchRegister, ToRegister(right));
1476 DeoptimizeIf(sign, instr->environment());
1483 void LCodeGen::DoBitI(LBitI* instr) {
1484 LOperand* left = instr->left();
1485 LOperand* right = instr->right();
1486 ASSERT(left->Equals(instr->result()));
1487 ASSERT(left->IsRegister());
1489 if (right->IsConstantOperand()) {
1490 int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
1491 switch (instr->op()) {
1492 case Token::BIT_AND:
1493 __ andl(ToRegister(left), Immediate(right_operand));
1496 __ orl(ToRegister(left), Immediate(right_operand));
1498 case Token::BIT_XOR:
1499 if (right_operand == int32_t(~0)) {
1500 __ notl(ToRegister(left));
1502 __ xorl(ToRegister(left), Immediate(right_operand));
1509 } else if (right->IsStackSlot()) {
1510 switch (instr->op()) {
1511 case Token::BIT_AND:
1512 __ andp(ToRegister(left), ToOperand(right));
1515 __ orp(ToRegister(left), ToOperand(right));
1517 case Token::BIT_XOR:
1518 __ xorp(ToRegister(left), ToOperand(right));
1525 ASSERT(right->IsRegister());
1526 switch (instr->op()) {
1527 case Token::BIT_AND:
1528 __ andp(ToRegister(left), ToRegister(right));
1531 __ orp(ToRegister(left), ToRegister(right));
1533 case Token::BIT_XOR:
1534 __ xorp(ToRegister(left), ToRegister(right));
1544 void LCodeGen::DoShiftI(LShiftI* instr) {
1545 LOperand* left = instr->left();
1546 LOperand* right = instr->right();
1547 ASSERT(left->Equals(instr->result()));
1548 ASSERT(left->IsRegister());
1549 if (right->IsRegister()) {
1550 ASSERT(ToRegister(right).is(rcx));
1552 switch (instr->op()) {
1554 __ rorl_cl(ToRegister(left));
1557 __ sarl_cl(ToRegister(left));
1560 __ shrl_cl(ToRegister(left));
1561 if (instr->can_deopt()) {
1562 __ testl(ToRegister(left), ToRegister(left));
1563 DeoptimizeIf(negative, instr->environment());
1567 __ shll_cl(ToRegister(left));
1574 int32_t value = ToInteger32(LConstantOperand::cast(right));
1575 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1576 switch (instr->op()) {
1578 if (shift_count != 0) {
1579 __ rorl(ToRegister(left), Immediate(shift_count));
1583 if (shift_count != 0) {
1584 __ sarl(ToRegister(left), Immediate(shift_count));
1588 if (shift_count == 0 && instr->can_deopt()) {
1589 __ testl(ToRegister(left), ToRegister(left));
1590 DeoptimizeIf(negative, instr->environment());
1592 __ shrl(ToRegister(left), Immediate(shift_count));
1596 if (shift_count != 0) {
1597 if (instr->hydrogen_value()->representation().IsSmi()) {
1598 __ shl(ToRegister(left), Immediate(shift_count));
1600 __ shll(ToRegister(left), Immediate(shift_count));
1612 void LCodeGen::DoSubI(LSubI* instr) {
1613 LOperand* left = instr->left();
1614 LOperand* right = instr->right();
1615 ASSERT(left->Equals(instr->result()));
1617 if (right->IsConstantOperand()) {
1618 __ subl(ToRegister(left),
1619 Immediate(ToInteger32(LConstantOperand::cast(right))));
1620 } else if (right->IsRegister()) {
1621 if (instr->hydrogen_value()->representation().IsSmi()) {
1622 __ subp(ToRegister(left), ToRegister(right));
1624 __ subl(ToRegister(left), ToRegister(right));
1627 if (instr->hydrogen_value()->representation().IsSmi()) {
1628 __ subp(ToRegister(left), ToOperand(right));
1630 __ subl(ToRegister(left), ToOperand(right));
1634 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1635 DeoptimizeIf(overflow, instr->environment());
1640 void LCodeGen::DoConstantI(LConstantI* instr) {
1641 __ Set(ToRegister(instr->result()), instr->value());
1645 void LCodeGen::DoConstantS(LConstantS* instr) {
1646 __ Move(ToRegister(instr->result()), instr->value());
1650 void LCodeGen::DoConstantD(LConstantD* instr) {
1651 ASSERT(instr->result()->IsDoubleRegister());
1652 XMMRegister res = ToDoubleRegister(instr->result());
1653 double v = instr->value();
1654 uint64_t int_val = BitCast<uint64_t, double>(v);
1655 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1656 // do so if the constant is -0.0.
1660 Register tmp = ToRegister(instr->temp());
1661 __ Set(tmp, int_val);
1667 void LCodeGen::DoConstantE(LConstantE* instr) {
1668 __ LoadAddress(ToRegister(instr->result()), instr->value());
1672 void LCodeGen::DoConstantT(LConstantT* instr) {
1673 Handle<Object> value = instr->value(isolate());
1674 __ Move(ToRegister(instr->result()), value);
1678 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1679 Register result = ToRegister(instr->result());
1680 Register map = ToRegister(instr->value());
1681 __ EnumLength(result, map);
1685 void LCodeGen::DoDateField(LDateField* instr) {
1686 Register object = ToRegister(instr->date());
1687 Register result = ToRegister(instr->result());
1688 Smi* index = instr->index();
1689 Label runtime, done, not_date_object;
1690 ASSERT(object.is(result));
1691 ASSERT(object.is(rax));
1693 Condition cc = masm()->CheckSmi(object);
1694 DeoptimizeIf(cc, instr->environment());
1695 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1696 DeoptimizeIf(not_equal, instr->environment());
1698 if (index->value() == 0) {
1699 __ movp(result, FieldOperand(object, JSDate::kValueOffset));
1701 if (index->value() < JSDate::kFirstUncachedField) {
1702 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1703 Operand stamp_operand = __ ExternalOperand(stamp);
1704 __ movp(kScratchRegister, stamp_operand);
1705 __ cmpp(kScratchRegister, FieldOperand(object,
1706 JSDate::kCacheStampOffset));
1707 __ j(not_equal, &runtime, Label::kNear);
1708 __ movp(result, FieldOperand(object, JSDate::kValueOffset +
1709 kPointerSize * index->value()));
1710 __ jmp(&done, Label::kNear);
1713 __ PrepareCallCFunction(2);
1714 __ movp(arg_reg_1, object);
1715 __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
1716 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1722 Operand LCodeGen::BuildSeqStringOperand(Register string,
1724 String::Encoding encoding) {
1725 if (index->IsConstantOperand()) {
1726 int offset = ToInteger32(LConstantOperand::cast(index));
1727 if (encoding == String::TWO_BYTE_ENCODING) {
1728 offset *= kUC16Size;
1730 STATIC_ASSERT(kCharSize == 1);
1731 return FieldOperand(string, SeqString::kHeaderSize + offset);
1733 return FieldOperand(
1734 string, ToRegister(index),
1735 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1736 SeqString::kHeaderSize);
1740 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1741 String::Encoding encoding = instr->hydrogen()->encoding();
1742 Register result = ToRegister(instr->result());
1743 Register string = ToRegister(instr->string());
1745 if (FLAG_debug_code) {
1747 __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1748 __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1750 __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1751 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1752 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1753 __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1754 ? one_byte_seq_type : two_byte_seq_type));
1755 __ Check(equal, kUnexpectedStringType);
1759 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1760 if (encoding == String::ONE_BYTE_ENCODING) {
1761 __ movzxbl(result, operand);
1763 __ movzxwl(result, operand);
1768 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1769 String::Encoding encoding = instr->hydrogen()->encoding();
1770 Register string = ToRegister(instr->string());
1772 if (FLAG_debug_code) {
1773 Register value = ToRegister(instr->value());
1774 Register index = ToRegister(instr->index());
1775 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1776 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1778 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1779 ? one_byte_seq_type : two_byte_seq_type;
1780 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1783 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1784 if (instr->value()->IsConstantOperand()) {
1785 int value = ToInteger32(LConstantOperand::cast(instr->value()));
1786 ASSERT_LE(0, value);
1787 if (encoding == String::ONE_BYTE_ENCODING) {
1788 ASSERT_LE(value, String::kMaxOneByteCharCode);
1789 __ movb(operand, Immediate(value));
1791 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
1792 __ movw(operand, Immediate(value));
1795 Register value = ToRegister(instr->value());
1796 if (encoding == String::ONE_BYTE_ENCODING) {
1797 __ movb(operand, value);
1799 __ movw(operand, value);
1805 void LCodeGen::DoAddI(LAddI* instr) {
1806 LOperand* left = instr->left();
1807 LOperand* right = instr->right();
1809 Representation target_rep = instr->hydrogen()->representation();
1810 bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1812 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1813 if (right->IsConstantOperand()) {
1814 int32_t offset = ToInteger32(LConstantOperand::cast(right));
1816 __ leap(ToRegister(instr->result()),
1817 MemOperand(ToRegister(left), offset));
1819 __ leal(ToRegister(instr->result()),
1820 MemOperand(ToRegister(left), offset));
1823 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1825 __ leap(ToRegister(instr->result()), address);
1827 __ leal(ToRegister(instr->result()), address);
1831 if (right->IsConstantOperand()) {
1833 __ addp(ToRegister(left),
1834 Immediate(ToInteger32(LConstantOperand::cast(right))));
1836 __ addl(ToRegister(left),
1837 Immediate(ToInteger32(LConstantOperand::cast(right))));
1839 } else if (right->IsRegister()) {
1841 __ addp(ToRegister(left), ToRegister(right));
1843 __ addl(ToRegister(left), ToRegister(right));
1847 __ addp(ToRegister(left), ToOperand(right));
1849 __ addl(ToRegister(left), ToOperand(right));
1852 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1853 DeoptimizeIf(overflow, instr->environment());
1859 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1860 LOperand* left = instr->left();
1861 LOperand* right = instr->right();
1862 ASSERT(left->Equals(instr->result()));
1863 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1864 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1866 Condition condition = (operation == HMathMinMax::kMathMin)
1869 Register left_reg = ToRegister(left);
1870 if (right->IsConstantOperand()) {
1871 Immediate right_imm =
1872 Immediate(ToInteger32(LConstantOperand::cast(right)));
1873 ASSERT(!instr->hydrogen_value()->representation().IsSmi());
1874 __ cmpl(left_reg, right_imm);
1875 __ j(condition, &return_left, Label::kNear);
1876 __ movp(left_reg, right_imm);
1877 } else if (right->IsRegister()) {
1878 Register right_reg = ToRegister(right);
1879 if (instr->hydrogen_value()->representation().IsSmi()) {
1880 __ cmpp(left_reg, right_reg);
1882 __ cmpl(left_reg, right_reg);
1884 __ j(condition, &return_left, Label::kNear);
1885 __ movp(left_reg, right_reg);
1887 Operand right_op = ToOperand(right);
1888 if (instr->hydrogen_value()->representation().IsSmi()) {
1889 __ cmpp(left_reg, right_op);
1891 __ cmpl(left_reg, right_op);
1893 __ j(condition, &return_left, Label::kNear);
1894 __ movp(left_reg, right_op);
1896 __ bind(&return_left);
1898 ASSERT(instr->hydrogen()->representation().IsDouble());
1899 Label check_nan_left, check_zero, return_left, return_right;
1900 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1901 XMMRegister left_reg = ToDoubleRegister(left);
1902 XMMRegister right_reg = ToDoubleRegister(right);
1903 __ ucomisd(left_reg, right_reg);
1904 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1905 __ j(equal, &check_zero, Label::kNear); // left == right.
1906 __ j(condition, &return_left, Label::kNear);
1907 __ jmp(&return_right, Label::kNear);
1909 __ bind(&check_zero);
1910 XMMRegister xmm_scratch = double_scratch0();
1911 __ xorps(xmm_scratch, xmm_scratch);
1912 __ ucomisd(left_reg, xmm_scratch);
1913 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1914 // At this point, both left and right are either 0 or -0.
1915 if (operation == HMathMinMax::kMathMin) {
1916 __ orps(left_reg, right_reg);
1918 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1919 __ addsd(left_reg, right_reg);
1921 __ jmp(&return_left, Label::kNear);
1923 __ bind(&check_nan_left);
1924 __ ucomisd(left_reg, left_reg); // NaN check.
1925 __ j(parity_even, &return_left, Label::kNear);
1926 __ bind(&return_right);
1927 __ movaps(left_reg, right_reg);
1929 __ bind(&return_left);
1934 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1935 XMMRegister left = ToDoubleRegister(instr->left());
1936 XMMRegister right = ToDoubleRegister(instr->right());
1937 XMMRegister result = ToDoubleRegister(instr->result());
1938 // All operations except MOD are computed in-place.
1939 ASSERT(instr->op() == Token::MOD || left.is(result));
1940 switch (instr->op()) {
1942 __ addsd(left, right);
1945 __ subsd(left, right);
1948 __ mulsd(left, right);
1951 __ divsd(left, right);
1952 // Don't delete this mov. It may improve performance on some CPUs,
1953 // when there is a mulsd depending on the result
1954 __ movaps(left, left);
1957 XMMRegister xmm_scratch = double_scratch0();
1958 __ PrepareCallCFunction(2);
1959 __ movaps(xmm_scratch, left);
1960 ASSERT(right.is(xmm1));
1962 ExternalReference::mod_two_doubles_operation(isolate()), 2);
1963 __ movaps(result, xmm_scratch);
1973 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1974 ASSERT(ToRegister(instr->context()).is(rsi));
1975 ASSERT(ToRegister(instr->left()).is(rdx));
1976 ASSERT(ToRegister(instr->right()).is(rax));
1977 ASSERT(ToRegister(instr->result()).is(rax));
1979 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1980 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1984 template<class InstrType>
1985 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1986 int left_block = instr->TrueDestination(chunk_);
1987 int right_block = instr->FalseDestination(chunk_);
1989 int next_block = GetNextEmittedBlock();
1991 if (right_block == left_block || cc == no_condition) {
1992 EmitGoto(left_block);
1993 } else if (left_block == next_block) {
1994 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1995 } else if (right_block == next_block) {
1996 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1998 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2000 __ jmp(chunk_->GetAssemblyLabel(right_block));
2006 template<class InstrType>
2007 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2008 int false_block = instr->FalseDestination(chunk_);
2009 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2013 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2018 void LCodeGen::DoBranch(LBranch* instr) {
2019 Representation r = instr->hydrogen()->value()->representation();
2020 if (r.IsInteger32()) {
2021 ASSERT(!info()->IsStub());
2022 Register reg = ToRegister(instr->value());
2024 EmitBranch(instr, not_zero);
2025 } else if (r.IsSmi()) {
2026 ASSERT(!info()->IsStub());
2027 Register reg = ToRegister(instr->value());
2029 EmitBranch(instr, not_zero);
2030 } else if (r.IsDouble()) {
2031 ASSERT(!info()->IsStub());
2032 XMMRegister reg = ToDoubleRegister(instr->value());
2033 XMMRegister xmm_scratch = double_scratch0();
2034 __ xorps(xmm_scratch, xmm_scratch);
2035 __ ucomisd(reg, xmm_scratch);
2036 EmitBranch(instr, not_equal);
2038 ASSERT(r.IsTagged());
2039 Register reg = ToRegister(instr->value());
2040 HType type = instr->hydrogen()->value()->type();
2041 if (type.IsBoolean()) {
2042 ASSERT(!info()->IsStub());
2043 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2044 EmitBranch(instr, equal);
2045 } else if (type.IsSmi()) {
2046 ASSERT(!info()->IsStub());
2047 __ SmiCompare(reg, Smi::FromInt(0));
2048 EmitBranch(instr, not_equal);
2049 } else if (type.IsJSArray()) {
2050 ASSERT(!info()->IsStub());
2051 EmitBranch(instr, no_condition);
2052 } else if (type.IsHeapNumber()) {
2053 ASSERT(!info()->IsStub());
2054 XMMRegister xmm_scratch = double_scratch0();
2055 __ xorps(xmm_scratch, xmm_scratch);
2056 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2057 EmitBranch(instr, not_equal);
2058 } else if (type.IsString()) {
2059 ASSERT(!info()->IsStub());
2060 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2061 EmitBranch(instr, not_equal);
2063 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2064 // Avoid deopts in the case where we've never executed this path before.
2065 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2067 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2068 // undefined -> false.
2069 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2070 __ j(equal, instr->FalseLabel(chunk_));
2072 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2074 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2075 __ j(equal, instr->TrueLabel(chunk_));
2077 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2078 __ j(equal, instr->FalseLabel(chunk_));
2080 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2082 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2083 __ j(equal, instr->FalseLabel(chunk_));
2086 if (expected.Contains(ToBooleanStub::SMI)) {
2087 // Smis: 0 -> false, all other -> true.
2088 __ Cmp(reg, Smi::FromInt(0));
2089 __ j(equal, instr->FalseLabel(chunk_));
2090 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2091 } else if (expected.NeedsMap()) {
2092 // If we need a map later and have a Smi -> deopt.
2093 __ testb(reg, Immediate(kSmiTagMask));
2094 DeoptimizeIf(zero, instr->environment());
2097 const Register map = kScratchRegister;
2098 if (expected.NeedsMap()) {
2099 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2101 if (expected.CanBeUndetectable()) {
2102 // Undetectable -> false.
2103 __ testb(FieldOperand(map, Map::kBitFieldOffset),
2104 Immediate(1 << Map::kIsUndetectable));
2105 __ j(not_zero, instr->FalseLabel(chunk_));
2109 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2110 // spec object -> true.
2111 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2112 __ j(above_equal, instr->TrueLabel(chunk_));
2115 if (expected.Contains(ToBooleanStub::STRING)) {
2116 // String value -> false iff empty.
2118 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2119 __ j(above_equal, ¬_string, Label::kNear);
2120 __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2121 __ j(not_zero, instr->TrueLabel(chunk_));
2122 __ jmp(instr->FalseLabel(chunk_));
2123 __ bind(¬_string);
2126 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2127 // Symbol value -> true.
2128 __ CmpInstanceType(map, SYMBOL_TYPE);
2129 __ j(equal, instr->TrueLabel(chunk_));
2132 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2133 // heap number -> false iff +0, -0, or NaN.
2134 Label not_heap_number;
2135 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2136 __ j(not_equal, ¬_heap_number, Label::kNear);
2137 XMMRegister xmm_scratch = double_scratch0();
2138 __ xorps(xmm_scratch, xmm_scratch);
2139 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2140 __ j(zero, instr->FalseLabel(chunk_));
2141 __ jmp(instr->TrueLabel(chunk_));
2142 __ bind(¬_heap_number);
2145 if (!expected.IsGeneric()) {
2146 // We've seen something for the first time -> deopt.
2147 // This can only happen if we are not generic already.
2148 DeoptimizeIf(no_condition, instr->environment());
2155 void LCodeGen::EmitGoto(int block) {
2156 if (!IsNextEmittedBlock(block)) {
2157 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2162 void LCodeGen::DoGoto(LGoto* instr) {
2163 EmitGoto(instr->block_id());
2167 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2168 Condition cond = no_condition;
2171 case Token::EQ_STRICT:
2175 case Token::NE_STRICT:
2179 cond = is_unsigned ? below : less;
2182 cond = is_unsigned ? above : greater;
2185 cond = is_unsigned ? below_equal : less_equal;
2188 cond = is_unsigned ? above_equal : greater_equal;
2191 case Token::INSTANCEOF:
2199 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2200 LOperand* left = instr->left();
2201 LOperand* right = instr->right();
2202 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2204 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2205 // We can statically evaluate the comparison.
2206 double left_val = ToDouble(LConstantOperand::cast(left));
2207 double right_val = ToDouble(LConstantOperand::cast(right));
2208 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2209 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2210 EmitGoto(next_block);
2212 if (instr->is_double()) {
2213 // Don't base result on EFLAGS when a NaN is involved. Instead
2214 // jump to the false block.
2215 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2216 __ j(parity_even, instr->FalseLabel(chunk_));
2219 if (right->IsConstantOperand()) {
2220 value = ToInteger32(LConstantOperand::cast(right));
2221 if (instr->hydrogen_value()->representation().IsSmi()) {
2222 __ Cmp(ToRegister(left), Smi::FromInt(value));
2224 __ cmpl(ToRegister(left), Immediate(value));
2226 } else if (left->IsConstantOperand()) {
2227 value = ToInteger32(LConstantOperand::cast(left));
2228 if (instr->hydrogen_value()->representation().IsSmi()) {
2229 if (right->IsRegister()) {
2230 __ Cmp(ToRegister(right), Smi::FromInt(value));
2232 __ Cmp(ToOperand(right), Smi::FromInt(value));
2234 } else if (right->IsRegister()) {
2235 __ cmpl(ToRegister(right), Immediate(value));
2237 __ cmpl(ToOperand(right), Immediate(value));
2239 // We transposed the operands. Reverse the condition.
2240 cc = ReverseCondition(cc);
2241 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2242 if (right->IsRegister()) {
2243 __ cmpp(ToRegister(left), ToRegister(right));
2245 __ cmpp(ToRegister(left), ToOperand(right));
2248 if (right->IsRegister()) {
2249 __ cmpl(ToRegister(left), ToRegister(right));
2251 __ cmpl(ToRegister(left), ToOperand(right));
2255 EmitBranch(instr, cc);
2260 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2261 Register left = ToRegister(instr->left());
2263 if (instr->right()->IsConstantOperand()) {
2264 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2265 __ Cmp(left, right);
2267 Register right = ToRegister(instr->right());
2268 __ cmpp(left, right);
2270 EmitBranch(instr, equal);
2274 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2275 if (instr->hydrogen()->representation().IsTagged()) {
2276 Register input_reg = ToRegister(instr->object());
2277 __ Cmp(input_reg, factory()->the_hole_value());
2278 EmitBranch(instr, equal);
2282 XMMRegister input_reg = ToDoubleRegister(instr->object());
2283 __ ucomisd(input_reg, input_reg);
2284 EmitFalseBranch(instr, parity_odd);
2286 __ subp(rsp, Immediate(kDoubleSize));
2287 __ movsd(MemOperand(rsp, 0), input_reg);
2288 __ addp(rsp, Immediate(kDoubleSize));
2290 int offset = sizeof(kHoleNanUpper32);
2291 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2292 EmitBranch(instr, equal);
2296 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2297 Representation rep = instr->hydrogen()->value()->representation();
2298 ASSERT(!rep.IsInteger32());
2300 if (rep.IsDouble()) {
2301 XMMRegister value = ToDoubleRegister(instr->value());
2302 XMMRegister xmm_scratch = double_scratch0();
2303 __ xorps(xmm_scratch, xmm_scratch);
2304 __ ucomisd(xmm_scratch, value);
2305 EmitFalseBranch(instr, not_equal);
2306 __ movmskpd(kScratchRegister, value);
2307 __ testl(kScratchRegister, Immediate(1));
2308 EmitBranch(instr, not_zero);
2310 Register value = ToRegister(instr->value());
2311 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2312 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2313 __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
2315 EmitFalseBranch(instr, no_overflow);
2316 __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
2317 Immediate(0x00000000));
2318 EmitBranch(instr, equal);
2323 Condition LCodeGen::EmitIsObject(Register input,
2324 Label* is_not_object,
2326 ASSERT(!input.is(kScratchRegister));
2328 __ JumpIfSmi(input, is_not_object);
2330 __ CompareRoot(input, Heap::kNullValueRootIndex);
2331 __ j(equal, is_object);
2333 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
2334 // Undetectable objects behave like undefined.
2335 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
2336 Immediate(1 << Map::kIsUndetectable));
2337 __ j(not_zero, is_not_object);
2339 __ movzxbl(kScratchRegister,
2340 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
2341 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2342 __ j(below, is_not_object);
2343 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2348 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2349 Register reg = ToRegister(instr->value());
2351 Condition true_cond = EmitIsObject(
2352 reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2354 EmitBranch(instr, true_cond);
2358 Condition LCodeGen::EmitIsString(Register input,
2360 Label* is_not_string,
2361 SmiCheck check_needed = INLINE_SMI_CHECK) {
2362 if (check_needed == INLINE_SMI_CHECK) {
2363 __ JumpIfSmi(input, is_not_string);
2366 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2372 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2373 Register reg = ToRegister(instr->value());
2374 Register temp = ToRegister(instr->temp());
2376 SmiCheck check_needed =
2377 instr->hydrogen()->value()->IsHeapObject()
2378 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2380 Condition true_cond = EmitIsString(
2381 reg, temp, instr->FalseLabel(chunk_), check_needed);
2383 EmitBranch(instr, true_cond);
2387 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2389 if (instr->value()->IsRegister()) {
2390 Register input = ToRegister(instr->value());
2391 is_smi = masm()->CheckSmi(input);
2393 Operand input = ToOperand(instr->value());
2394 is_smi = masm()->CheckSmi(input);
2396 EmitBranch(instr, is_smi);
2400 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2401 Register input = ToRegister(instr->value());
2402 Register temp = ToRegister(instr->temp());
2404 if (!instr->hydrogen()->value()->IsHeapObject()) {
2405 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2407 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2408 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2409 Immediate(1 << Map::kIsUndetectable));
2410 EmitBranch(instr, not_zero);
2414 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2415 ASSERT(ToRegister(instr->context()).is(rsi));
2416 Token::Value op = instr->op();
2418 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2419 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2421 Condition condition = TokenToCondition(op, false);
2424 EmitBranch(instr, condition);
2428 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2429 InstanceType from = instr->from();
2430 InstanceType to = instr->to();
2431 if (from == FIRST_TYPE) return to;
2432 ASSERT(from == to || to == LAST_TYPE);
2437 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2438 InstanceType from = instr->from();
2439 InstanceType to = instr->to();
2440 if (from == to) return equal;
2441 if (to == LAST_TYPE) return above_equal;
2442 if (from == FIRST_TYPE) return below_equal;
2448 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2449 Register input = ToRegister(instr->value());
2451 if (!instr->hydrogen()->value()->IsHeapObject()) {
2452 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2455 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2456 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2460 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2461 Register input = ToRegister(instr->value());
2462 Register result = ToRegister(instr->result());
2464 __ AssertString(input);
2466 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
2467 ASSERT(String::kHashShift >= kSmiTagSize);
2468 __ IndexFromHash(result, result);
2472 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2473 LHasCachedArrayIndexAndBranch* instr) {
2474 Register input = ToRegister(instr->value());
2476 __ testl(FieldOperand(input, String::kHashFieldOffset),
2477 Immediate(String::kContainsCachedArrayIndexMask));
2478 EmitBranch(instr, equal);
2482 // Branches to a label or falls through with the answer in the z flag.
2483 // Trashes the temp register.
2484 void LCodeGen::EmitClassOfTest(Label* is_true,
2486 Handle<String> class_name,
2490 ASSERT(!input.is(temp));
2491 ASSERT(!input.is(temp2));
2492 ASSERT(!temp.is(temp2));
2494 __ JumpIfSmi(input, is_false);
2496 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2497 // Assuming the following assertions, we can use the same compares to test
2498 // for both being a function type and being in the object type range.
2499 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2500 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2501 FIRST_SPEC_OBJECT_TYPE + 1);
2502 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2503 LAST_SPEC_OBJECT_TYPE - 1);
2504 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2505 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2506 __ j(below, is_false);
2507 __ j(equal, is_true);
2508 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2509 __ j(equal, is_true);
2511 // Faster code path to avoid two compares: subtract lower bound from the
2512 // actual type and do a signed compare with the width of the type range.
2513 __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2514 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2515 __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2516 __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2517 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2518 __ j(above, is_false);
2521 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2522 // Check if the constructor in the map is a function.
2523 __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
2525 // Objects with a non-function constructor have class 'Object'.
2526 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
2527 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2528 __ j(not_equal, is_true);
2530 __ j(not_equal, is_false);
2533 // temp now contains the constructor function. Grab the
2534 // instance class name from there.
2535 __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2536 __ movp(temp, FieldOperand(temp,
2537 SharedFunctionInfo::kInstanceClassNameOffset));
2538 // The class name we are testing against is internalized since it's a literal.
2539 // The name in the constructor is internalized because of the way the context
2540 // is booted. This routine isn't expected to work for random API-created
2541 // classes and it doesn't have to because you can't access it with natives
2542 // syntax. Since both sides are internalized it is sufficient to use an
2543 // identity comparison.
2544 ASSERT(class_name->IsInternalizedString());
2545 __ Cmp(temp, class_name);
2546 // End with the answer in the z flag.
2550 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2551 Register input = ToRegister(instr->value());
2552 Register temp = ToRegister(instr->temp());
2553 Register temp2 = ToRegister(instr->temp2());
2554 Handle<String> class_name = instr->hydrogen()->class_name();
2556 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2557 class_name, input, temp, temp2);
2559 EmitBranch(instr, equal);
2563 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2564 Register reg = ToRegister(instr->value());
2566 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2567 EmitBranch(instr, equal);
2571 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2572 ASSERT(ToRegister(instr->context()).is(rsi));
2573 InstanceofStub stub(InstanceofStub::kNoFlags);
2574 __ Push(ToRegister(instr->left()));
2575 __ Push(ToRegister(instr->right()));
2576 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2577 Label true_value, done;
2579 __ j(zero, &true_value, Label::kNear);
2580 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2581 __ jmp(&done, Label::kNear);
2582 __ bind(&true_value);
2583 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2588 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2589 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2591 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2592 LInstanceOfKnownGlobal* instr)
2593 : LDeferredCode(codegen), instr_(instr) { }
2594 virtual void Generate() V8_OVERRIDE {
2595 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2597 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2598 Label* map_check() { return &map_check_; }
2600 LInstanceOfKnownGlobal* instr_;
2604 ASSERT(ToRegister(instr->context()).is(rsi));
2605 DeferredInstanceOfKnownGlobal* deferred;
2606 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2608 Label done, false_result;
2609 Register object = ToRegister(instr->value());
2611 // A Smi is not an instance of anything.
2612 __ JumpIfSmi(object, &false_result, Label::kNear);
2614 // This is the inlined call site instanceof cache. The two occurences of the
2615 // hole value will be patched to the last map/result pair generated by the
2618 // Use a temp register to avoid memory operands with variable lengths.
2619 Register map = ToRegister(instr->temp());
2620 __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
2621 __ bind(deferred->map_check()); // Label for calculating code patching.
2622 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2623 __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
2624 __ cmpp(map, Operand(kScratchRegister, 0));
2625 __ j(not_equal, &cache_miss, Label::kNear);
2626 // Patched to load either true or false.
2627 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2629 // Check that the code size between patch label and patch sites is invariant.
2630 Label end_of_patched_code;
2631 __ bind(&end_of_patched_code);
2634 __ jmp(&done, Label::kNear);
2636 // The inlined call site cache did not match. Check for null and string
2637 // before calling the deferred code.
2638 __ bind(&cache_miss); // Null is not an instance of anything.
2639 __ CompareRoot(object, Heap::kNullValueRootIndex);
2640 __ j(equal, &false_result, Label::kNear);
2642 // String values are not instances of anything.
2643 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
2645 __ bind(&false_result);
2646 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2648 __ bind(deferred->exit());
2653 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2656 PushSafepointRegistersScope scope(this);
2657 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
2658 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
2659 InstanceofStub stub(flags);
2661 __ Push(ToRegister(instr->value()));
2662 __ Push(instr->function());
2664 static const int kAdditionalDelta = 10;
2666 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2668 __ PushImm32(delta);
2670 // We are pushing three values on the stack but recording a
2671 // safepoint with two arguments because stub is going to
2672 // remove the third argument from the stack before jumping
2673 // to instanceof builtin on the slow path.
2674 CallCodeGeneric(stub.GetCode(isolate()),
2675 RelocInfo::CODE_TARGET,
2677 RECORD_SAFEPOINT_WITH_REGISTERS,
2679 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2680 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2681 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2682 // Move result to a register that survives the end of the
2683 // PushSafepointRegisterScope.
2684 __ movp(kScratchRegister, rax);
2686 __ testp(kScratchRegister, kScratchRegister);
2689 __ j(not_zero, &load_false, Label::kNear);
2690 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2691 __ jmp(&done, Label::kNear);
2692 __ bind(&load_false);
2693 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2698 void LCodeGen::DoCmpT(LCmpT* instr) {
2699 ASSERT(ToRegister(instr->context()).is(rsi));
2700 Token::Value op = instr->op();
2702 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2703 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2705 Condition condition = TokenToCondition(op, false);
2706 Label true_value, done;
2708 __ j(condition, &true_value, Label::kNear);
2709 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2710 __ jmp(&done, Label::kNear);
2711 __ bind(&true_value);
2712 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2717 void LCodeGen::DoReturn(LReturn* instr) {
2718 if (FLAG_trace && info()->IsOptimizing()) {
2719 // Preserve the return value on the stack and rely on the runtime call
2720 // to return the value in the same register. We're leaving the code
2721 // managed by the register allocator and tearing down the frame, it's
2722 // safe to write to the context register.
2724 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2725 __ CallRuntime(Runtime::kTraceExit, 1);
2727 if (info()->saves_caller_doubles()) {
2728 RestoreCallerDoubles();
2730 int no_frame_start = -1;
2731 if (NeedsEagerFrame()) {
2734 no_frame_start = masm_->pc_offset();
2736 if (instr->has_constant_parameter_count()) {
2737 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2740 Register reg = ToRegister(instr->parameter_count());
2741 // The argument count parameter is a smi
2742 __ SmiToInteger32(reg, reg);
2743 Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2744 __ PopReturnAddressTo(return_addr_reg);
2745 __ shl(reg, Immediate(kPointerSizeLog2));
2747 __ jmp(return_addr_reg);
2749 if (no_frame_start != -1) {
2750 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2755 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2756 Register result = ToRegister(instr->result());
2757 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
2758 if (instr->hydrogen()->RequiresHoleCheck()) {
2759 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2760 DeoptimizeIf(equal, instr->environment());
2765 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2766 ASSERT(ToRegister(instr->context()).is(rsi));
2767 ASSERT(ToRegister(instr->global_object()).is(rax));
2768 ASSERT(ToRegister(instr->result()).is(rax));
2770 __ Move(rcx, instr->name());
2771 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2772 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2773 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2777 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2778 Register value = ToRegister(instr->value());
2779 Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
2781 // If the cell we are storing to contains the hole it could have
2782 // been deleted from the property dictionary. In that case, we need
2783 // to update the property details in the property dictionary to mark
2784 // it as no longer deleted. We deoptimize in that case.
2785 if (instr->hydrogen()->RequiresHoleCheck()) {
2786 // We have a temp because CompareRoot might clobber kScratchRegister.
2787 Register cell = ToRegister(instr->temp());
2788 ASSERT(!value.is(cell));
2789 __ Move(cell, cell_handle, RelocInfo::CELL);
2790 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2791 DeoptimizeIf(equal, instr->environment());
2793 __ movp(Operand(cell, 0), value);
2796 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
2797 __ movp(Operand(kScratchRegister, 0), value);
2799 // Cells are always rescanned, so no write barrier here.
2803 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2804 Register context = ToRegister(instr->context());
2805 Register result = ToRegister(instr->result());
2806 __ movp(result, ContextOperand(context, instr->slot_index()));
2807 if (instr->hydrogen()->RequiresHoleCheck()) {
2808 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2809 if (instr->hydrogen()->DeoptimizesOnHole()) {
2810 DeoptimizeIf(equal, instr->environment());
2813 __ j(not_equal, &is_not_hole, Label::kNear);
2814 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2815 __ bind(&is_not_hole);
2821 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2822 Register context = ToRegister(instr->context());
2823 Register value = ToRegister(instr->value());
2825 Operand target = ContextOperand(context, instr->slot_index());
2827 Label skip_assignment;
2828 if (instr->hydrogen()->RequiresHoleCheck()) {
2829 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2830 if (instr->hydrogen()->DeoptimizesOnHole()) {
2831 DeoptimizeIf(equal, instr->environment());
2833 __ j(not_equal, &skip_assignment);
2836 __ movp(target, value);
2838 if (instr->hydrogen()->NeedsWriteBarrier()) {
2839 SmiCheck check_needed =
2840 instr->hydrogen()->value()->IsHeapObject()
2841 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2842 int offset = Context::SlotOffset(instr->slot_index());
2843 Register scratch = ToRegister(instr->temp());
2844 __ RecordWriteContextSlot(context,
2849 EMIT_REMEMBERED_SET,
2853 __ bind(&skip_assignment);
2857 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2858 HObjectAccess access = instr->hydrogen()->access();
2859 int offset = access.offset();
2861 if (access.IsExternalMemory()) {
2862 Register result = ToRegister(instr->result());
2863 if (instr->object()->IsConstantOperand()) {
2864 ASSERT(result.is(rax));
2865 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2867 Register object = ToRegister(instr->object());
2868 __ Load(result, MemOperand(object, offset), access.representation());
2873 Register object = ToRegister(instr->object());
2874 if (instr->hydrogen()->representation().IsDouble()) {
2875 XMMRegister result = ToDoubleRegister(instr->result());
2876 __ movsd(result, FieldOperand(object, offset));
2880 Register result = ToRegister(instr->result());
2881 if (!access.IsInobject()) {
2882 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2886 Representation representation = access.representation();
2887 if (representation.IsSmi() &&
2888 instr->hydrogen()->representation().IsInteger32()) {
2890 Register scratch = kScratchRegister;
2891 __ Load(scratch, FieldOperand(object, offset), representation);
2892 __ AssertSmi(scratch);
2895 // Read int value directly from upper half of the smi.
2896 STATIC_ASSERT(kSmiTag == 0);
2897 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
2898 offset += kPointerSize / 2;
2899 representation = Representation::Integer32();
2901 __ Load(result, FieldOperand(object, offset), representation);
2905 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2906 ASSERT(ToRegister(instr->context()).is(rsi));
2907 ASSERT(ToRegister(instr->object()).is(rax));
2908 ASSERT(ToRegister(instr->result()).is(rax));
2910 __ Move(rcx, instr->name());
2911 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2912 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2916 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2917 Register function = ToRegister(instr->function());
2918 Register result = ToRegister(instr->result());
2920 // Check that the function really is a function.
2921 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2922 DeoptimizeIf(not_equal, instr->environment());
2924 // Check whether the function has an instance prototype.
2926 __ testb(FieldOperand(result, Map::kBitFieldOffset),
2927 Immediate(1 << Map::kHasNonInstancePrototype));
2928 __ j(not_zero, &non_instance, Label::kNear);
2930 // Get the prototype or initial map from the function.
2932 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2934 // Check that the function has a prototype or an initial map.
2935 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2936 DeoptimizeIf(equal, instr->environment());
2938 // If the function does not have an initial map, we're done.
2940 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2941 __ j(not_equal, &done, Label::kNear);
2943 // Get the prototype from the initial map.
2944 __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2945 __ jmp(&done, Label::kNear);
2947 // Non-instance prototype: Fetch prototype from constructor field
2948 // in the function's map.
2949 __ bind(&non_instance);
2950 __ movp(result, FieldOperand(result, Map::kConstructorOffset));
2957 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2958 Register result = ToRegister(instr->result());
2959 __ LoadRoot(result, instr->index());
2963 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2964 Register arguments = ToRegister(instr->arguments());
2965 Register result = ToRegister(instr->result());
2967 if (instr->length()->IsConstantOperand() &&
2968 instr->index()->IsConstantOperand()) {
2969 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2970 int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2971 if (const_index >= 0 && const_index < const_length) {
2972 StackArgumentsAccessor args(arguments, const_length,
2973 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2974 __ movp(result, args.GetArgumentOperand(const_index));
2975 } else if (FLAG_debug_code) {
2979 Register length = ToRegister(instr->length());
2980 // There are two words between the frame pointer and the last argument.
2981 // Subtracting from length accounts for one of them add one more.
2982 if (instr->index()->IsRegister()) {
2983 __ subl(length, ToRegister(instr->index()));
2985 __ subl(length, ToOperand(instr->index()));
2987 StackArgumentsAccessor args(arguments, length,
2988 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2989 __ movp(result, args.GetArgumentOperand(0));
2994 void LCodeGen::HandleExternalArrayOpRequiresPreScale(
2996 ElementsKind elements_kind) {
2997 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
2998 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
2999 static_cast<int>(maximal_scale_factor);
3000 ASSERT(pre_shift_size > 0);
3001 __ shl(ToRegister(key), Immediate(pre_shift_size));
3006 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3007 ElementsKind elements_kind = instr->elements_kind();
3008 LOperand* key = instr->key();
3009 if (!key->IsConstantOperand()) {
3010 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
3012 int base_offset = instr->is_fixed_typed_array()
3013 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3015 Operand operand(BuildFastArrayOperand(
3020 instr->additional_index()));
3022 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3023 elements_kind == FLOAT32_ELEMENTS) {
3024 XMMRegister result(ToDoubleRegister(instr->result()));
3025 __ movss(result, operand);
3026 __ cvtss2sd(result, result);
3027 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3028 elements_kind == FLOAT64_ELEMENTS) {
3029 __ movsd(ToDoubleRegister(instr->result()), operand);
3030 } else if (IsSIMD128ElementsKind(elements_kind)) {
3031 __ movups(ToSIMD128Register(instr->result()), operand);
3033 Register result(ToRegister(instr->result()));
3034 switch (elements_kind) {
3035 case EXTERNAL_INT8_ELEMENTS:
3037 __ movsxbq(result, operand);
3039 case EXTERNAL_UINT8_ELEMENTS:
3040 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3041 case UINT8_ELEMENTS:
3042 case UINT8_CLAMPED_ELEMENTS:
3043 __ movzxbp(result, operand);
3045 case EXTERNAL_INT16_ELEMENTS:
3046 case INT16_ELEMENTS:
3047 __ movsxwq(result, operand);
3049 case EXTERNAL_UINT16_ELEMENTS:
3050 case UINT16_ELEMENTS:
3051 __ movzxwp(result, operand);
3053 case EXTERNAL_INT32_ELEMENTS:
3054 case INT32_ELEMENTS:
3055 __ movsxlq(result, operand);
3057 case EXTERNAL_UINT32_ELEMENTS:
3058 case UINT32_ELEMENTS:
3059 __ movl(result, operand);
3060 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3061 __ testl(result, result);
3062 DeoptimizeIf(negative, instr->environment());
3065 case EXTERNAL_FLOAT32_ELEMENTS:
3066 case EXTERNAL_FLOAT64_ELEMENTS:
3067 case EXTERNAL_FLOAT32x4_ELEMENTS:
3068 case EXTERNAL_INT32x4_ELEMENTS:
3069 case FLOAT32_ELEMENTS:
3070 case FLOAT64_ELEMENTS:
3071 case FLOAT32x4_ELEMENTS:
3072 case INT32x4_ELEMENTS:
3074 case FAST_SMI_ELEMENTS:
3075 case FAST_DOUBLE_ELEMENTS:
3076 case FAST_HOLEY_ELEMENTS:
3077 case FAST_HOLEY_SMI_ELEMENTS:
3078 case FAST_HOLEY_DOUBLE_ELEMENTS:
3079 case DICTIONARY_ELEMENTS:
3080 case SLOPPY_ARGUMENTS_ELEMENTS:
3088 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3089 XMMRegister result(ToDoubleRegister(instr->result()));
3090 LOperand* key = instr->key();
3091 if (instr->hydrogen()->RequiresHoleCheck()) {
3092 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3093 sizeof(kHoleNanLower32);
3094 Operand hole_check_operand = BuildFastArrayOperand(
3097 FAST_DOUBLE_ELEMENTS,
3099 instr->additional_index());
3100 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
3101 DeoptimizeIf(equal, instr->environment());
3104 Operand double_load_operand = BuildFastArrayOperand(
3107 FAST_DOUBLE_ELEMENTS,
3108 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3109 instr->additional_index());
3110 __ movsd(result, double_load_operand);
3114 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3115 HLoadKeyed* hinstr = instr->hydrogen();
3116 Register result = ToRegister(instr->result());
3117 LOperand* key = instr->key();
3118 bool requires_hole_check = hinstr->RequiresHoleCheck();
3119 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
3120 Representation representation = hinstr->representation();
3122 if (representation.IsInteger32() &&
3123 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3124 ASSERT(!requires_hole_check);
3126 Register scratch = kScratchRegister;
3128 BuildFastArrayOperand(instr->elements(),
3132 instr->additional_index()),
3133 Representation::Smi());
3134 __ AssertSmi(scratch);
3136 // Read int value directly from upper half of the smi.
3137 STATIC_ASSERT(kSmiTag == 0);
3138 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3139 offset += kPointerSize / 2;
3143 BuildFastArrayOperand(instr->elements(),
3147 instr->additional_index()),
3150 // Check for the hole value.
3151 if (requires_hole_check) {
3152 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3153 Condition smi = __ CheckSmi(result);
3154 DeoptimizeIf(NegateCondition(smi), instr->environment());
3156 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3157 DeoptimizeIf(equal, instr->environment());
3163 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3164 if (instr->is_typed_elements()) {
3165 DoLoadKeyedExternalArray(instr);
3166 } else if (instr->hydrogen()->representation().IsDouble()) {
3167 DoLoadKeyedFixedDoubleArray(instr);
3169 DoLoadKeyedFixedArray(instr);
3174 Operand LCodeGen::BuildFastArrayOperand(
3175 LOperand* elements_pointer,
3177 ElementsKind elements_kind,
3179 uint32_t additional_index) {
3180 Register elements_pointer_reg = ToRegister(elements_pointer);
3181 int shift_size = ElementsKindToShiftSize(elements_kind);
3182 if (key->IsConstantOperand()) {
3183 int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
3184 if (constant_value & 0xF0000000) {
3185 Abort(kArrayIndexConstantValueTooBig);
3188 return Operand(elements_pointer_reg,
3189 ((constant_value + additional_index) << shift_size)
3192 if (ExternalArrayOpRequiresPreScale(elements_kind)) {
3193 // Make sure the key is pre-scaled against maximal_scale_factor.
3194 shift_size = static_cast<int>(maximal_scale_factor);
3196 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3197 return Operand(elements_pointer_reg,
3200 offset + (additional_index << shift_size));
3205 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3206 ASSERT(ToRegister(instr->context()).is(rsi));
3207 ASSERT(ToRegister(instr->object()).is(rdx));
3208 ASSERT(ToRegister(instr->key()).is(rax));
3210 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3211 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3215 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3216 Register result = ToRegister(instr->result());
3218 if (instr->hydrogen()->from_inlined()) {
3219 __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
3221 // Check for arguments adapter frame.
3222 Label done, adapted;
3223 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3224 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
3225 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3226 __ j(equal, &adapted, Label::kNear);
3228 // No arguments adaptor frame.
3229 __ movp(result, rbp);
3230 __ jmp(&done, Label::kNear);
3232 // Arguments adaptor frame present.
3234 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3236 // Result is the frame pointer for the frame if not adapted and for the real
3237 // frame below the adaptor frame if adapted.
3243 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3244 Register result = ToRegister(instr->result());
3248 // If no arguments adaptor frame the number of arguments is fixed.
3249 if (instr->elements()->IsRegister()) {
3250 __ cmpp(rbp, ToRegister(instr->elements()));
3252 __ cmpp(rbp, ToOperand(instr->elements()));
3254 __ movl(result, Immediate(scope()->num_parameters()));
3255 __ j(equal, &done, Label::kNear);
3257 // Arguments adaptor frame present. Get argument length from there.
3258 __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3259 __ SmiToInteger32(result,
3261 ArgumentsAdaptorFrameConstants::kLengthOffset));
3263 // Argument length is in result register.
3268 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3269 Register receiver = ToRegister(instr->receiver());
3270 Register function = ToRegister(instr->function());
3272 // If the receiver is null or undefined, we have to pass the global
3273 // object as a receiver to normal functions. Values have to be
3274 // passed unchanged to builtins and strict-mode functions.
3275 Label global_object, receiver_ok;
3276 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3278 if (!instr->hydrogen()->known_function()) {
3279 // Do not transform the receiver to object for strict mode
3281 __ movp(kScratchRegister,
3282 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3283 __ testb(FieldOperand(kScratchRegister,
3284 SharedFunctionInfo::kStrictModeByteOffset),
3285 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3286 __ j(not_equal, &receiver_ok, dist);
3288 // Do not transform the receiver to object for builtins.
3289 __ testb(FieldOperand(kScratchRegister,
3290 SharedFunctionInfo::kNativeByteOffset),
3291 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3292 __ j(not_equal, &receiver_ok, dist);
3295 // Normal function. Replace undefined or null with global receiver.
3296 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3297 __ j(equal, &global_object, Label::kNear);
3298 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3299 __ j(equal, &global_object, Label::kNear);
3301 // The receiver should be a JS object.
3302 Condition is_smi = __ CheckSmi(receiver);
3303 DeoptimizeIf(is_smi, instr->environment());
3304 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
3305 DeoptimizeIf(below, instr->environment());
3307 __ jmp(&receiver_ok, Label::kNear);
3308 __ bind(&global_object);
3309 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
3312 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3314 FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
3316 __ bind(&receiver_ok);
3320 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3321 Register receiver = ToRegister(instr->receiver());
3322 Register function = ToRegister(instr->function());
3323 Register length = ToRegister(instr->length());
3324 Register elements = ToRegister(instr->elements());
3325 ASSERT(receiver.is(rax)); // Used for parameter count.
3326 ASSERT(function.is(rdi)); // Required by InvokeFunction.
3327 ASSERT(ToRegister(instr->result()).is(rax));
3329 // Copy the arguments to this function possibly from the
3330 // adaptor frame below it.
3331 const uint32_t kArgumentsLimit = 1 * KB;
3332 __ cmpp(length, Immediate(kArgumentsLimit));
3333 DeoptimizeIf(above, instr->environment());
3336 __ movp(receiver, length);
3338 // Loop through the arguments pushing them onto the execution
3341 // length is a small non-negative integer, due to the test above.
3342 __ testl(length, length);
3343 __ j(zero, &invoke, Label::kNear);
3345 StackArgumentsAccessor args(elements, length,
3346 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3347 __ Push(args.GetArgumentOperand(0));
3349 __ j(not_zero, &loop);
3351 // Invoke the function.
3353 ASSERT(instr->HasPointerMap());
3354 LPointerMap* pointers = instr->pointer_map();
3355 SafepointGenerator safepoint_generator(
3356 this, pointers, Safepoint::kLazyDeopt);
3357 ParameterCount actual(rax);
3358 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3362 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3363 LOperand* argument = instr->value();
3364 EmitPushTaggedOperand(argument);
3368 void LCodeGen::DoDrop(LDrop* instr) {
3369 __ Drop(instr->count());
3373 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3374 Register result = ToRegister(instr->result());
3375 __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3379 void LCodeGen::DoContext(LContext* instr) {
3380 Register result = ToRegister(instr->result());
3381 if (info()->IsOptimizing()) {
3382 __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3384 // If there is no frame, the context must be in rsi.
3385 ASSERT(result.is(rsi));
3390 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3391 ASSERT(ToRegister(instr->context()).is(rsi));
3392 __ Push(rsi); // The context is the first argument.
3393 __ Push(instr->hydrogen()->pairs());
3394 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3395 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3399 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3400 int formal_parameter_count,
3402 LInstruction* instr,
3403 RDIState rdi_state) {
3404 bool dont_adapt_arguments =
3405 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3406 bool can_invoke_directly =
3407 dont_adapt_arguments || formal_parameter_count == arity;
3409 LPointerMap* pointers = instr->pointer_map();
3411 if (can_invoke_directly) {
3412 if (rdi_state == RDI_UNINITIALIZED) {
3413 __ Move(rdi, function);
3417 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3419 // Set rax to arguments count if adaption is not needed. Assumes that rax
3420 // is available to write to at this point.
3421 if (dont_adapt_arguments) {
3426 if (function.is_identical_to(info()->closure())) {
3429 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3432 // Set up deoptimization.
3433 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3435 // We need to adapt arguments.
3436 SafepointGenerator generator(
3437 this, pointers, Safepoint::kLazyDeopt);
3438 ParameterCount count(arity);
3439 ParameterCount expected(formal_parameter_count);
3440 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3445 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3446 ASSERT(ToRegister(instr->result()).is(rax));
3448 LPointerMap* pointers = instr->pointer_map();
3449 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3451 if (instr->target()->IsConstantOperand()) {
3452 LConstantOperand* target = LConstantOperand::cast(instr->target());
3453 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3454 generator.BeforeCall(__ CallSize(code));
3455 __ call(code, RelocInfo::CODE_TARGET);
3457 ASSERT(instr->target()->IsRegister());
3458 Register target = ToRegister(instr->target());
3459 generator.BeforeCall(__ CallSize(target));
3460 __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3463 generator.AfterCall();
3467 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3468 ASSERT(ToRegister(instr->function()).is(rdi));
3469 ASSERT(ToRegister(instr->result()).is(rax));
3471 if (instr->hydrogen()->pass_argument_count()) {
3472 __ Set(rax, instr->arity());
3476 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3478 LPointerMap* pointers = instr->pointer_map();
3479 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3481 bool is_self_call = false;
3482 if (instr->hydrogen()->function()->IsConstant()) {
3483 Handle<JSFunction> jsfun = Handle<JSFunction>::null();
3484 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3485 jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
3486 is_self_call = jsfun.is_identical_to(info()->closure());
3492 Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
3493 generator.BeforeCall(__ CallSize(target));
3496 generator.AfterCall();
3500 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3501 Register input_reg = ToRegister(instr->value());
3502 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3503 Heap::kHeapNumberMapRootIndex);
3504 DeoptimizeIf(not_equal, instr->environment());
3506 Label slow, allocated, done;
3507 Register tmp = input_reg.is(rax) ? rcx : rax;
3508 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
3510 // Preserve the value of all registers.
3511 PushSafepointRegistersScope scope(this);
3513 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3514 // Check the sign of the argument. If the argument is positive, just
3515 // return it. We do not need to patch the stack since |input| and
3516 // |result| are the same register and |input| will be restored
3517 // unchanged by popping safepoint registers.
3518 __ testl(tmp, Immediate(HeapNumber::kSignMask));
3521 __ AllocateHeapNumber(tmp, tmp2, &slow);
3522 __ jmp(&allocated, Label::kNear);
3524 // Slow case: Call the runtime system to do the number allocation.
3526 CallRuntimeFromDeferred(
3527 Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
3528 // Set the pointer to the new heap number in tmp.
3529 if (!tmp.is(rax)) __ movp(tmp, rax);
3530 // Restore input_reg after call to runtime.
3531 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3533 __ bind(&allocated);
3534 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3535 __ shl(tmp2, Immediate(1));
3536 __ shr(tmp2, Immediate(1));
3537 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3538 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3544 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3545 Register input_reg = ToRegister(instr->value());
3546 __ testl(input_reg, input_reg);
3548 __ j(not_sign, &is_positive, Label::kNear);
3549 __ negl(input_reg); // Sets flags.
3550 DeoptimizeIf(negative, instr->environment());
3551 __ bind(&is_positive);
3555 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3556 Register input_reg = ToRegister(instr->value());
3557 __ testp(input_reg, input_reg);
3559 __ j(not_sign, &is_positive, Label::kNear);
3560 __ negp(input_reg); // Sets flags.
3561 DeoptimizeIf(negative, instr->environment());
3562 __ bind(&is_positive);
3566 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3567 // Class for deferred case.
3568 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3570 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3571 : LDeferredCode(codegen), instr_(instr) { }
3572 virtual void Generate() V8_OVERRIDE {
3573 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3575 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3580 ASSERT(instr->value()->Equals(instr->result()));
3581 Representation r = instr->hydrogen()->value()->representation();
3584 XMMRegister scratch = double_scratch0();
3585 XMMRegister input_reg = ToDoubleRegister(instr->value());
3586 __ xorps(scratch, scratch);
3587 __ subsd(scratch, input_reg);
3588 __ andps(input_reg, scratch);
3589 } else if (r.IsInteger32()) {
3590 EmitIntegerMathAbs(instr);
3591 } else if (r.IsSmi()) {
3592 EmitSmiMathAbs(instr);
3593 } else { // Tagged case.
3594 DeferredMathAbsTaggedHeapNumber* deferred =
3595 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3596 Register input_reg = ToRegister(instr->value());
3598 __ JumpIfNotSmi(input_reg, deferred->entry());
3599 EmitSmiMathAbs(instr);
3600 __ bind(deferred->exit());
3605 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3606 XMMRegister xmm_scratch = double_scratch0();
3607 Register output_reg = ToRegister(instr->result());
3608 XMMRegister input_reg = ToDoubleRegister(instr->value());
3610 if (CpuFeatures::IsSupported(SSE4_1)) {
3611 CpuFeatureScope scope(masm(), SSE4_1);
3612 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3613 // Deoptimize if minus zero.
3614 __ movq(output_reg, input_reg);
3615 __ subq(output_reg, Immediate(1));
3616 DeoptimizeIf(overflow, instr->environment());
3618 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3619 __ cvttsd2si(output_reg, xmm_scratch);
3620 __ cmpl(output_reg, Immediate(0x1));
3621 DeoptimizeIf(overflow, instr->environment());
3623 Label negative_sign, done;
3624 // Deoptimize on unordered.
3625 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3626 __ ucomisd(input_reg, xmm_scratch);
3627 DeoptimizeIf(parity_even, instr->environment());
3628 __ j(below, &negative_sign, Label::kNear);
3630 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3631 // Check for negative zero.
3632 Label positive_sign;
3633 __ j(above, &positive_sign, Label::kNear);
3634 __ movmskpd(output_reg, input_reg);
3635 __ testq(output_reg, Immediate(1));
3636 DeoptimizeIf(not_zero, instr->environment());
3637 __ Set(output_reg, 0);
3638 __ jmp(&done, Label::kNear);
3639 __ bind(&positive_sign);
3642 // Use truncating instruction (OK because input is positive).
3643 __ cvttsd2si(output_reg, input_reg);
3644 // Overflow is signalled with minint.
3645 __ cmpl(output_reg, Immediate(0x1));
3646 DeoptimizeIf(overflow, instr->environment());
3647 __ jmp(&done, Label::kNear);
3649 // Non-zero negative reaches here.
3650 __ bind(&negative_sign);
3651 // Truncate, then compare and compensate.
3652 __ cvttsd2si(output_reg, input_reg);
3653 __ Cvtlsi2sd(xmm_scratch, output_reg);
3654 __ ucomisd(input_reg, xmm_scratch);
3655 __ j(equal, &done, Label::kNear);
3656 __ subl(output_reg, Immediate(1));
3657 DeoptimizeIf(overflow, instr->environment());
3664 void LCodeGen::DoMathRound(LMathRound* instr) {
3665 const XMMRegister xmm_scratch = double_scratch0();
3666 Register output_reg = ToRegister(instr->result());
3667 XMMRegister input_reg = ToDoubleRegister(instr->value());
3668 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3669 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
3670 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
3672 Label done, round_to_zero, below_one_half;
3673 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3674 __ movq(kScratchRegister, one_half);
3675 __ movq(xmm_scratch, kScratchRegister);
3676 __ ucomisd(xmm_scratch, input_reg);
3677 __ j(above, &below_one_half, Label::kNear);
3679 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3680 __ addsd(xmm_scratch, input_reg);
3681 __ cvttsd2si(output_reg, xmm_scratch);
3682 // Overflow is signalled with minint.
3683 __ cmpl(output_reg, Immediate(0x1));
3684 __ RecordComment("D2I conversion overflow");
3685 DeoptimizeIf(overflow, instr->environment());
3686 __ jmp(&done, dist);
3688 __ bind(&below_one_half);
3689 __ movq(kScratchRegister, minus_one_half);
3690 __ movq(xmm_scratch, kScratchRegister);
3691 __ ucomisd(xmm_scratch, input_reg);
3692 __ j(below_equal, &round_to_zero, Label::kNear);
3694 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3695 // compare and compensate.
3696 __ movq(input_temp, input_reg); // Do not alter input_reg.
3697 __ subsd(input_temp, xmm_scratch);
3698 __ cvttsd2si(output_reg, input_temp);
3699 // Catch minint due to overflow, and to prevent overflow when compensating.
3700 __ cmpl(output_reg, Immediate(0x1));
3701 __ RecordComment("D2I conversion overflow");
3702 DeoptimizeIf(overflow, instr->environment());
3704 __ Cvtlsi2sd(xmm_scratch, output_reg);
3705 __ ucomisd(xmm_scratch, input_temp);
3706 __ j(equal, &done, dist);
3707 __ subl(output_reg, Immediate(1));
3708 // No overflow because we already ruled out minint.
3709 __ jmp(&done, dist);
3711 __ bind(&round_to_zero);
3712 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3713 // we can ignore the difference between a result of -0 and +0.
3714 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3715 __ movq(output_reg, input_reg);
3716 __ testq(output_reg, output_reg);
3717 __ RecordComment("Minus zero");
3718 DeoptimizeIf(negative, instr->environment());
3720 __ Set(output_reg, 0);
3725 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3726 XMMRegister input_reg = ToDoubleRegister(instr->value());
3727 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3728 __ sqrtsd(input_reg, input_reg);
3732 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3733 XMMRegister xmm_scratch = double_scratch0();
3734 XMMRegister input_reg = ToDoubleRegister(instr->value());
3735 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3737 // Note that according to ECMA-262 15.8.2.13:
3738 // Math.pow(-Infinity, 0.5) == Infinity
3739 // Math.sqrt(-Infinity) == NaN
3741 // Check base for -Infinity. According to IEEE-754, double-precision
3742 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3743 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3744 __ movq(xmm_scratch, kScratchRegister);
3745 __ ucomisd(xmm_scratch, input_reg);
3746 // Comparing -Infinity with NaN results in "unordered", which sets the
3747 // zero flag as if both were equal. However, it also sets the carry flag.
3748 __ j(not_equal, &sqrt, Label::kNear);
3749 __ j(carry, &sqrt, Label::kNear);
3750 // If input is -Infinity, return Infinity.
3751 __ xorps(input_reg, input_reg);
3752 __ subsd(input_reg, xmm_scratch);
3753 __ jmp(&done, Label::kNear);
3757 __ xorps(xmm_scratch, xmm_scratch);
3758 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3759 __ sqrtsd(input_reg, input_reg);
3764 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
3765 switch (instr->op()) {
3766 case kFloat32x4Zero: {
3767 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3768 __ xorps(result_reg, result_reg);
3771 case kInt32x4Zero: {
3772 XMMRegister result_reg = ToInt32x4Register(instr->result());
3773 __ xorps(result_reg, result_reg);
3783 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
3785 switch (instr->op()) {
3786 case kSIMD128Change: {
3787 Comment(";;; deoptimize: can not perform representation change"
3788 "for float32x4 or int32x4");
3789 DeoptimizeIf(no_condition, instr->environment());
3794 case kFloat32x4Reciprocal:
3795 case kFloat32x4ReciprocalSqrt:
3796 case kFloat32x4Sqrt: {
3797 ASSERT(instr->value()->Equals(instr->result()));
3798 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3799 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3800 switch (instr->op()) {
3802 __ absps(input_reg);
3805 __ negateps(input_reg);
3807 case kFloat32x4Reciprocal:
3808 __ rcpps(input_reg, input_reg);
3810 case kFloat32x4ReciprocalSqrt:
3811 __ rsqrtps(input_reg, input_reg);
3813 case kFloat32x4Sqrt:
3814 __ sqrtps(input_reg, input_reg);
3824 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3825 XMMRegister input_reg = ToInt32x4Register(instr->value());
3826 switch (instr->op()) {
3828 __ notps(input_reg);
3831 __ pnegd(input_reg);
3839 case kFloat32x4BitsToInt32x4:
3840 case kFloat32x4ToInt32x4: {
3841 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3842 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3843 XMMRegister result_reg = ToInt32x4Register(instr->result());
3844 if (instr->op() == kFloat32x4BitsToInt32x4) {
3845 if (!result_reg.is(input_reg)) {
3846 __ movaps(result_reg, input_reg);
3849 ASSERT(instr->op() == kFloat32x4ToInt32x4);
3850 __ cvtps2dq(result_reg, input_reg);
3854 case kInt32x4BitsToFloat32x4:
3855 case kInt32x4ToFloat32x4: {
3856 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3857 XMMRegister input_reg = ToInt32x4Register(instr->value());
3858 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3859 if (instr->op() == kInt32x4BitsToFloat32x4) {
3860 if (!result_reg.is(input_reg)) {
3861 __ movaps(result_reg, input_reg);
3864 ASSERT(instr->op() == kInt32x4ToFloat32x4);
3865 __ cvtdq2ps(result_reg, input_reg);
3869 case kFloat32x4Splat: {
3870 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
3871 XMMRegister input_reg = ToDoubleRegister(instr->value());
3872 XMMRegister result_reg = ToFloat32x4Register(instr->result());
3873 XMMRegister xmm_scratch = xmm0;
3874 __ xorps(xmm_scratch, xmm_scratch);
3875 __ cvtsd2ss(xmm_scratch, input_reg);
3876 __ shufps(xmm_scratch, xmm_scratch, 0x0);
3877 __ movaps(result_reg, xmm_scratch);
3880 case kInt32x4Splat: {
3881 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3882 Register input_reg = ToRegister(instr->value());
3883 XMMRegister result_reg = ToInt32x4Register(instr->result());
3884 __ movd(result_reg, input_reg);
3885 __ shufps(result_reg, result_reg, 0x0);
3888 case kInt32x4GetSignMask: {
3889 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3890 XMMRegister input_reg = ToInt32x4Register(instr->value());
3891 Register result = ToRegister(instr->result());
3892 __ movmskps(result, input_reg);
3895 case kFloat32x4GetSignMask: {
3896 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3897 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3898 Register result = ToRegister(instr->result());
3899 __ movmskps(result, input_reg);
3902 case kFloat32x4GetW:
3904 case kFloat32x4GetZ:
3906 case kFloat32x4GetY:
3908 case kFloat32x4GetX: {
3909 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
3910 XMMRegister input_reg = ToFloat32x4Register(instr->value());
3911 XMMRegister result = ToDoubleRegister(instr->result());
3912 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
3914 if (select == 0x0) {
3915 __ xorps(xmm_scratch, xmm_scratch);
3916 __ cvtss2sd(xmm_scratch, input_reg);
3917 if (!xmm_scratch.is(result)) {
3918 __ movaps(result, xmm_scratch);
3921 __ pshufd(xmm_scratch, input_reg, select);
3922 if (!xmm_scratch.is(result)) {
3923 __ xorps(result, result);
3925 __ cvtss2sd(result, xmm_scratch);
3933 case kInt32x4GetFlagX:
3934 case kInt32x4GetFlagY:
3935 case kInt32x4GetFlagZ:
3936 case kInt32x4GetFlagW: {
3937 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
3939 switch (instr->op()) {
3940 case kInt32x4GetFlagX:
3944 case kInt32x4GetFlagY:
3949 case kInt32x4GetFlagZ:
3954 case kInt32x4GetFlagW:
3963 XMMRegister input_reg = ToInt32x4Register(instr->value());
3964 Register result = ToRegister(instr->result());
3965 if (select == 0x0) {
3966 __ movd(result, input_reg);
3968 if (CpuFeatures::IsSupported(SSE4_1)) {
3969 CpuFeatureScope scope(masm(), SSE4_1);
3970 __ extractps(result, input_reg, select);
3972 XMMRegister xmm_scratch = xmm0;
3973 __ pshufd(xmm_scratch, input_reg, select);
3974 __ movd(result, xmm_scratch);
3979 Label false_value, done;
3980 __ testl(result, result);
3981 __ j(zero, &false_value, Label::kNear);
3982 __ LoadRoot(result, Heap::kTrueValueRootIndex);
3983 __ jmp(&done, Label::kNear);
3984 __ bind(&false_value);
3985 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3997 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
3998 uint8_t imm8 = 0; // for with operation
3999 switch (instr->op()) {
4005 case kFloat32x4Max: {
4006 ASSERT(instr->left()->Equals(instr->result()));
4007 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4008 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4009 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4010 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4011 switch (instr->op()) {
4013 __ addps(left_reg, right_reg);
4016 __ subps(left_reg, right_reg);
4019 __ mulps(left_reg, right_reg);
4022 __ divps(left_reg, right_reg);
4025 __ minps(left_reg, right_reg);
4028 __ maxps(left_reg, right_reg);
4036 case kFloat32x4Scale: {
4037 ASSERT(instr->left()->Equals(instr->result()));
4038 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4039 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4040 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4041 XMMRegister right_reg = ToDoubleRegister(instr->right());
4042 XMMRegister scratch_reg = xmm0;
4043 __ xorps(scratch_reg, scratch_reg);
4044 __ cvtsd2ss(scratch_reg, right_reg);
4045 __ shufps(scratch_reg, scratch_reg, 0x0);
4046 __ mulps(left_reg, scratch_reg);
4049 case kFloat32x4Shuffle: {
4050 ASSERT(instr->left()->Equals(instr->result()));
4051 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4052 if (instr->hydrogen()->right()->IsConstant() &&
4053 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4054 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4055 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4056 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4057 __ shufps(left_reg, left_reg, select);
4060 Comment(";;; deoptimize: non-constant selector for shuffle");
4061 DeoptimizeIf(no_condition, instr->environment());
4065 case kInt32x4Shuffle: {
4066 ASSERT(instr->left()->Equals(instr->result()));
4067 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4068 if (instr->hydrogen()->right()->IsConstant() &&
4069 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4070 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4071 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4072 XMMRegister left_reg = ToInt32x4Register(instr->left());
4073 __ pshufd(left_reg, left_reg, select);
4076 Comment(";;; deoptimize: non-constant selector for shuffle");
4077 DeoptimizeIf(no_condition, instr->environment());
4081 case kInt32x4ShiftLeft:
4082 case kInt32x4ShiftRight:
4083 case kInt32x4ShiftRightArithmetic: {
4084 ASSERT(instr->left()->Equals(instr->result()));
4085 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4086 if (instr->hydrogen()->right()->IsConstant() &&
4087 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
4088 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
4089 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
4090 XMMRegister left_reg = ToInt32x4Register(instr->left());
4091 switch (instr->op()) {
4092 case kInt32x4ShiftLeft:
4093 __ pslld(left_reg, shift);
4095 case kInt32x4ShiftRight:
4096 __ psrld(left_reg, shift);
4098 case kInt32x4ShiftRightArithmetic:
4099 __ psrad(left_reg, shift);
4106 XMMRegister left_reg = ToInt32x4Register(instr->left());
4107 Register shift = ToRegister(instr->right());
4108 XMMRegister xmm_scratch = double_scratch0();
4109 __ movd(xmm_scratch, shift);
4110 switch (instr->op()) {
4111 case kInt32x4ShiftLeft:
4112 __ pslld(left_reg, xmm_scratch);
4114 case kInt32x4ShiftRight:
4115 __ psrld(left_reg, xmm_scratch);
4117 case kInt32x4ShiftRightArithmetic:
4118 __ psrad(left_reg, xmm_scratch);
4126 case kFloat32x4LessThan:
4127 case kFloat32x4LessThanOrEqual:
4128 case kFloat32x4Equal:
4129 case kFloat32x4NotEqual:
4130 case kFloat32x4GreaterThanOrEqual:
4131 case kFloat32x4GreaterThan: {
4132 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4133 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
4134 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4135 XMMRegister right_reg = ToFloat32x4Register(instr->right());
4136 XMMRegister result_reg = ToInt32x4Register(instr->result());
4137 switch (instr->op()) {
4138 case kFloat32x4LessThan:
4139 if (result_reg.is(left_reg)) {
4140 __ cmpltps(result_reg, right_reg);
4141 } else if (result_reg.is(right_reg)) {
4142 __ cmpnltps(result_reg, left_reg);
4144 __ movaps(result_reg, left_reg);
4145 __ cmpltps(result_reg, right_reg);
4148 case kFloat32x4LessThanOrEqual:
4149 if (result_reg.is(left_reg)) {
4150 __ cmpleps(result_reg, right_reg);
4151 } else if (result_reg.is(right_reg)) {
4152 __ cmpnleps(result_reg, left_reg);
4154 __ movaps(result_reg, left_reg);
4155 __ cmpleps(result_reg, right_reg);
4158 case kFloat32x4Equal:
4159 if (result_reg.is(left_reg)) {
4160 __ cmpeqps(result_reg, right_reg);
4161 } else if (result_reg.is(right_reg)) {
4162 __ cmpeqps(result_reg, left_reg);
4164 __ movaps(result_reg, left_reg);
4165 __ cmpeqps(result_reg, right_reg);
4168 case kFloat32x4NotEqual:
4169 if (result_reg.is(left_reg)) {
4170 __ cmpneqps(result_reg, right_reg);
4171 } else if (result_reg.is(right_reg)) {
4172 __ cmpneqps(result_reg, left_reg);
4174 __ movaps(result_reg, left_reg);
4175 __ cmpneqps(result_reg, right_reg);
4178 case kFloat32x4GreaterThanOrEqual:
4179 if (result_reg.is(left_reg)) {
4180 __ cmpnltps(result_reg, right_reg);
4181 } else if (result_reg.is(right_reg)) {
4182 __ cmpltps(result_reg, left_reg);
4184 __ movaps(result_reg, left_reg);
4185 __ cmpnltps(result_reg, right_reg);
4188 case kFloat32x4GreaterThan:
4189 if (result_reg.is(left_reg)) {
4190 __ cmpnleps(result_reg, right_reg);
4191 } else if (result_reg.is(right_reg)) {
4192 __ cmpleps(result_reg, left_reg);
4194 __ movaps(result_reg, left_reg);
4195 __ cmpnleps(result_reg, right_reg);
4210 case kInt32x4GreaterThan:
4212 case kInt32x4LessThan: {
4213 ASSERT(instr->left()->Equals(instr->result()));
4214 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4215 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
4216 XMMRegister left_reg = ToInt32x4Register(instr->left());
4217 XMMRegister right_reg = ToInt32x4Register(instr->right());
4218 switch (instr->op()) {
4220 __ andps(left_reg, right_reg);
4223 __ orps(left_reg, right_reg);
4226 __ xorps(left_reg, right_reg);
4229 __ paddd(left_reg, right_reg);
4232 __ psubd(left_reg, right_reg);
4235 if (CpuFeatures::IsSupported(SSE4_1)) {
4236 CpuFeatureScope scope(masm(), SSE4_1);
4237 __ pmulld(left_reg, right_reg);
4239 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
4240 XMMRegister xmm_scratch = xmm0;
4241 __ movaps(xmm_scratch, left_reg);
4242 __ pmuludq(left_reg, right_reg);
4243 __ psrldq(xmm_scratch, 4);
4244 __ psrldq(right_reg, 4);
4245 __ pmuludq(xmm_scratch, right_reg);
4246 __ pshufd(left_reg, left_reg, 8);
4247 __ pshufd(xmm_scratch, xmm_scratch, 8);
4248 __ punpackldq(left_reg, xmm_scratch);
4251 case kInt32x4GreaterThan:
4252 __ pcmpgtd(left_reg, right_reg);
4255 __ pcmpeqd(left_reg, right_reg);
4257 case kInt32x4LessThan: {
4258 XMMRegister xmm_scratch = xmm0;
4259 __ movaps(xmm_scratch, right_reg);
4260 __ pcmpgtd(xmm_scratch, left_reg);
4261 __ movaps(left_reg, xmm_scratch);
4270 case kFloat32x4WithW:
4272 case kFloat32x4WithZ:
4274 case kFloat32x4WithY:
4276 case kFloat32x4WithX: {
4277 ASSERT(instr->left()->Equals(instr->result()));
4278 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
4279 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
4280 XMMRegister left_reg = ToFloat32x4Register(instr->left());
4281 XMMRegister right_reg = ToDoubleRegister(instr->right());
4282 XMMRegister xmm_scratch = xmm0;
4283 __ xorps(xmm_scratch, xmm_scratch);
4284 __ cvtsd2ss(xmm_scratch, right_reg);
4285 if (CpuFeatures::IsSupported(SSE4_1)) {
4287 CpuFeatureScope scope(masm(), SSE4_1);
4288 __ insertps(left_reg, xmm_scratch, imm8);
4290 __ subq(rsp, Immediate(kFloat32x4Size));
4291 __ movups(Operand(rsp, 0), left_reg);
4292 __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
4293 __ movups(left_reg, Operand(rsp, 0));
4294 __ addq(rsp, Immediate(kFloat32x4Size));
4304 case kInt32x4WithX: {
4305 ASSERT(instr->left()->Equals(instr->result()));
4306 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4307 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
4308 XMMRegister left_reg = ToInt32x4Register(instr->left());
4309 Register right_reg = ToRegister(instr->right());
4310 if (CpuFeatures::IsSupported(SSE4_1)) {
4311 CpuFeatureScope scope(masm(), SSE4_1);
4312 __ pinsrd(left_reg, right_reg, imm8);
4314 __ subq(rsp, Immediate(kInt32x4Size));
4315 __ movdqu(Operand(rsp, 0), left_reg);
4316 __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
4317 __ movdqu(left_reg, Operand(rsp, 0));
4318 __ addq(rsp, Immediate(kInt32x4Size));
4322 case kInt32x4WithFlagW:
4324 case kInt32x4WithFlagZ:
4326 case kInt32x4WithFlagY:
4328 case kInt32x4WithFlagX: {
4329 ASSERT(instr->left()->Equals(instr->result()));
4330 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
4331 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
4332 HType type = instr->hydrogen()->right()->type();
4333 XMMRegister left_reg = ToInt32x4Register(instr->left());
4334 Register right_reg = ToRegister(instr->right());
4335 Label load_false_value, done;
4336 if (type.IsBoolean()) {
4337 __ subq(rsp, Immediate(kInt32x4Size));
4338 __ movups(Operand(rsp, 0), left_reg);
4339 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
4340 __ j(not_equal, &load_false_value, Label::kNear);
4342 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
4343 DeoptimizeIf(no_condition, instr->environment());
4347 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
4348 __ jmp(&done, Label::kNear);
4349 __ bind(&load_false_value);
4350 __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
4352 __ movups(left_reg, Operand(rsp, 0));
4353 __ addq(rsp, Immediate(kInt32x4Size));
4363 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
4364 switch (instr->op()) {
4365 case kInt32x4Select: {
4366 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
4367 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4368 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4370 XMMRegister mask_reg = ToInt32x4Register(instr->first());
4371 XMMRegister left_reg = ToFloat32x4Register(instr->second());
4372 XMMRegister right_reg = ToFloat32x4Register(instr->third());
4373 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4374 XMMRegister temp_reg = xmm0;
4377 __ movaps(temp_reg, mask_reg);
4380 // temp_reg = temp_reg & falseValue.
4381 __ andps(temp_reg, right_reg);
4383 if (!result_reg.is(mask_reg)) {
4384 if (result_reg.is(left_reg)) {
4385 // result_reg = result_reg & trueValue.
4386 __ andps(result_reg, mask_reg);
4387 // out = result_reg | temp_reg.
4388 __ orps(result_reg, temp_reg);
4390 __ movaps(result_reg, mask_reg);
4391 // result_reg = result_reg & trueValue.
4392 __ andps(result_reg, left_reg);
4393 // out = result_reg | temp_reg.
4394 __ orps(result_reg, temp_reg);
4397 // result_reg = result_reg & trueValue.
4398 __ andps(result_reg, left_reg);
4399 // out = result_reg | temp_reg.
4400 __ orps(result_reg, temp_reg);
4404 case kFloat32x4ShuffleMix: {
4405 ASSERT(instr->first()->Equals(instr->result()));
4406 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4407 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4408 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
4409 if (instr->hydrogen()->third()->IsConstant() &&
4410 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
4411 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
4412 uint8_t select = static_cast<uint8_t>(value & 0xFF);
4413 XMMRegister first_reg = ToFloat32x4Register(instr->first());
4414 XMMRegister second_reg = ToFloat32x4Register(instr->second());
4415 __ shufps(first_reg, second_reg, select);
4418 Comment(";;; deoptimize: non-constant selector for shuffle");
4419 DeoptimizeIf(no_condition, instr->environment());
4423 case kFloat32x4Clamp: {
4424 ASSERT(instr->first()->Equals(instr->result()));
4425 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
4426 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
4427 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
4429 XMMRegister value_reg = ToFloat32x4Register(instr->first());
4430 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
4431 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
4432 __ minps(value_reg, upper_reg);
4433 __ maxps(value_reg, lower_reg);
4443 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
4444 switch (instr->op()) {
4445 case kFloat32x4Constructor: {
4446 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
4447 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
4448 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
4449 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
4450 XMMRegister x_reg = ToDoubleRegister(instr->x());
4451 XMMRegister y_reg = ToDoubleRegister(instr->y());
4452 XMMRegister z_reg = ToDoubleRegister(instr->z());
4453 XMMRegister w_reg = ToDoubleRegister(instr->w());
4454 XMMRegister result_reg = ToFloat32x4Register(instr->result());
4455 __ subq(rsp, Immediate(kFloat32x4Size));
4456 __ xorps(xmm0, xmm0);
4457 __ cvtsd2ss(xmm0, x_reg);
4458 __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
4459 __ xorps(xmm0, xmm0);
4460 __ cvtsd2ss(xmm0, y_reg);
4461 __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
4462 __ xorps(xmm0, xmm0);
4463 __ cvtsd2ss(xmm0, z_reg);
4464 __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
4465 __ xorps(xmm0, xmm0);
4466 __ cvtsd2ss(xmm0, w_reg);
4467 __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
4468 __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
4469 __ addq(rsp, Immediate(kFloat32x4Size));
4472 case kInt32x4Constructor: {
4473 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
4474 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
4475 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
4476 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
4477 Register x_reg = ToRegister(instr->x());
4478 Register y_reg = ToRegister(instr->y());
4479 Register z_reg = ToRegister(instr->z());
4480 Register w_reg = ToRegister(instr->w());
4481 XMMRegister result_reg = ToInt32x4Register(instr->result());
4482 __ subq(rsp, Immediate(kInt32x4Size));
4483 __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
4484 __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
4485 __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
4486 __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
4487 __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
4488 __ addq(rsp, Immediate(kInt32x4Size));
4491 case kInt32x4Bool: {
4492 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
4493 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
4494 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
4495 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
4496 HType x_type = instr->hydrogen()->x()->type();
4497 HType y_type = instr->hydrogen()->y()->type();
4498 HType z_type = instr->hydrogen()->z()->type();
4499 HType w_type = instr->hydrogen()->w()->type();
4500 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
4501 !z_type.IsBoolean() || !w_type.IsBoolean()) {
4502 Comment(";;; deoptimize: other types for int32x4.bool.");
4503 DeoptimizeIf(no_condition, instr->environment());
4506 XMMRegister result_reg = ToInt32x4Register(instr->result());
4507 Register x_reg = ToRegister(instr->x());
4508 Register y_reg = ToRegister(instr->y());
4509 Register z_reg = ToRegister(instr->z());
4510 Register w_reg = ToRegister(instr->w());
4511 Label load_false_x, done_x, load_false_y, done_y,
4512 load_false_z, done_z, load_false_w, done_w;
4513 __ subq(rsp, Immediate(kInt32x4Size));
4515 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
4516 __ j(not_equal, &load_false_x, Label::kNear);
4517 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
4518 __ jmp(&done_x, Label::kNear);
4519 __ bind(&load_false_x);
4520 __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
4523 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
4524 __ j(not_equal, &load_false_y, Label::kNear);
4525 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
4526 __ jmp(&done_y, Label::kNear);
4527 __ bind(&load_false_y);
4528 __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
4531 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
4532 __ j(not_equal, &load_false_z, Label::kNear);
4533 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
4534 __ jmp(&done_z, Label::kNear);
4535 __ bind(&load_false_z);
4536 __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
4539 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
4540 __ j(not_equal, &load_false_w, Label::kNear);
4541 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
4542 __ jmp(&done_w, Label::kNear);
4543 __ bind(&load_false_w);
4544 __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
4547 __ movups(result_reg, Operand(rsp, 0));
4548 __ addq(rsp, Immediate(kInt32x4Size));
4558 void LCodeGen::DoPower(LPower* instr) {
4559 Representation exponent_type = instr->hydrogen()->right()->representation();
4560 // Having marked this as a call, we can use any registers.
4561 // Just make sure that the input/output registers are the expected ones.
4563 Register exponent = rdx;
4564 ASSERT(!instr->right()->IsRegister() ||
4565 ToRegister(instr->right()).is(exponent));
4566 ASSERT(!instr->right()->IsDoubleRegister() ||
4567 ToDoubleRegister(instr->right()).is(xmm1));
4568 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4569 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4571 if (exponent_type.IsSmi()) {
4572 MathPowStub stub(MathPowStub::TAGGED);
4574 } else if (exponent_type.IsTagged()) {
4576 __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
4577 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
4578 DeoptimizeIf(not_equal, instr->environment());
4580 MathPowStub stub(MathPowStub::TAGGED);
4582 } else if (exponent_type.IsInteger32()) {
4583 MathPowStub stub(MathPowStub::INTEGER);
4586 ASSERT(exponent_type.IsDouble());
4587 MathPowStub stub(MathPowStub::DOUBLE);
4593 void LCodeGen::DoMathExp(LMathExp* instr) {
4594 XMMRegister input = ToDoubleRegister(instr->value());
4595 XMMRegister result = ToDoubleRegister(instr->result());
4596 XMMRegister temp0 = double_scratch0();
4597 Register temp1 = ToRegister(instr->temp1());
4598 Register temp2 = ToRegister(instr->temp2());
4600 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4604 void LCodeGen::DoMathLog(LMathLog* instr) {
4605 ASSERT(instr->value()->Equals(instr->result()));
4606 XMMRegister input_reg = ToDoubleRegister(instr->value());
4607 XMMRegister xmm_scratch = double_scratch0();
4608 Label positive, done, zero;
4609 __ xorps(xmm_scratch, xmm_scratch);
4610 __ ucomisd(input_reg, xmm_scratch);
4611 __ j(above, &positive, Label::kNear);
4612 __ j(not_carry, &zero, Label::kNear);
4613 ExternalReference nan =
4614 ExternalReference::address_of_canonical_non_hole_nan();
4615 Operand nan_operand = masm()->ExternalOperand(nan);
4616 __ movsd(input_reg, nan_operand);
4617 __ jmp(&done, Label::kNear);
4619 ExternalReference ninf =
4620 ExternalReference::address_of_negative_infinity();
4621 Operand ninf_operand = masm()->ExternalOperand(ninf);
4622 __ movsd(input_reg, ninf_operand);
4623 __ jmp(&done, Label::kNear);
4626 __ subp(rsp, Immediate(kDoubleSize));
4627 __ movsd(Operand(rsp, 0), input_reg);
4628 __ fld_d(Operand(rsp, 0));
4630 __ fstp_d(Operand(rsp, 0));
4631 __ movsd(input_reg, Operand(rsp, 0));
4632 __ addp(rsp, Immediate(kDoubleSize));
4637 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4638 Register input = ToRegister(instr->value());
4639 Register result = ToRegister(instr->result());
4640 Label not_zero_input;
4641 __ bsrl(result, input);
4643 __ j(not_zero, ¬_zero_input);
4644 __ Set(result, 63); // 63^31 == 32
4646 __ bind(¬_zero_input);
4647 __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4651 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4652 ASSERT(ToRegister(instr->context()).is(rsi));
4653 ASSERT(ToRegister(instr->function()).is(rdi));
4654 ASSERT(instr->HasPointerMap());
4656 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4657 if (known_function.is_null()) {
4658 LPointerMap* pointers = instr->pointer_map();
4659 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4660 ParameterCount count(instr->arity());
4661 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
4663 CallKnownFunction(known_function,
4664 instr->hydrogen()->formal_parameter_count(),
4667 RDI_CONTAINS_TARGET);
4672 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4673 ASSERT(ToRegister(instr->context()).is(rsi));
4674 ASSERT(ToRegister(instr->function()).is(rdi));
4675 ASSERT(ToRegister(instr->result()).is(rax));
4677 int arity = instr->arity();
4678 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4679 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4683 void LCodeGen::DoCallNew(LCallNew* instr) {
4684 ASSERT(ToRegister(instr->context()).is(rsi));
4685 ASSERT(ToRegister(instr->constructor()).is(rdi));
4686 ASSERT(ToRegister(instr->result()).is(rax));
4688 __ Set(rax, instr->arity());
4689 // No cell in ebx for construct type feedback in optimized code
4690 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4691 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4692 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4696 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4697 ASSERT(ToRegister(instr->context()).is(rsi));
4698 ASSERT(ToRegister(instr->constructor()).is(rdi));
4699 ASSERT(ToRegister(instr->result()).is(rax));
4701 __ Set(rax, instr->arity());
4702 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
4703 ElementsKind kind = instr->hydrogen()->elements_kind();
4704 AllocationSiteOverrideMode override_mode =
4705 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4706 ? DISABLE_ALLOCATION_SITES
4709 if (instr->arity() == 0) {
4710 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4711 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4712 } else if (instr->arity() == 1) {
4714 if (IsFastPackedElementsKind(kind)) {
4716 // We might need a change here
4717 // look at the first argument
4718 __ movp(rcx, Operand(rsp, 0));
4720 __ j(zero, &packed_case, Label::kNear);
4722 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4723 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4724 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4725 __ jmp(&done, Label::kNear);
4726 __ bind(&packed_case);
4729 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4730 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4733 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4734 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4739 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4740 ASSERT(ToRegister(instr->context()).is(rsi));
4741 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4745 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4746 Register function = ToRegister(instr->function());
4747 Register code_object = ToRegister(instr->code_object());
4748 __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
4749 __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4753 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4754 Register result = ToRegister(instr->result());
4755 Register base = ToRegister(instr->base_object());
4756 if (instr->offset()->IsConstantOperand()) {
4757 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4758 __ leap(result, Operand(base, ToInteger32(offset)));
4760 Register offset = ToRegister(instr->offset());
4761 __ leap(result, Operand(base, offset, times_1, 0));
4766 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4767 HStoreNamedField* hinstr = instr->hydrogen();
4768 Representation representation = instr->representation();
4770 HObjectAccess access = hinstr->access();
4771 int offset = access.offset();
4773 if (access.IsExternalMemory()) {
4774 ASSERT(!hinstr->NeedsWriteBarrier());
4775 Register value = ToRegister(instr->value());
4776 if (instr->object()->IsConstantOperand()) {
4777 ASSERT(value.is(rax));
4778 LConstantOperand* object = LConstantOperand::cast(instr->object());
4779 __ store_rax(ToExternalReference(object));
4781 Register object = ToRegister(instr->object());
4782 __ Store(MemOperand(object, offset), value, representation);
4787 Register object = ToRegister(instr->object());
4788 Handle<Map> transition = instr->transition();
4789 SmiCheck check_needed = hinstr->value()->IsHeapObject()
4790 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4792 ASSERT(!(representation.IsSmi() &&
4793 instr->value()->IsConstantOperand() &&
4794 !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
4795 if (representation.IsHeapObject()) {
4796 if (instr->value()->IsConstantOperand()) {
4797 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4798 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
4799 DeoptimizeIf(no_condition, instr->environment());
4802 if (!hinstr->value()->type().IsHeapObject()) {
4803 Register value = ToRegister(instr->value());
4804 Condition cc = masm()->CheckSmi(value);
4805 DeoptimizeIf(cc, instr->environment());
4807 // We know that value is a smi now, so we can omit the check below.
4808 check_needed = OMIT_SMI_CHECK;
4811 } else if (representation.IsDouble()) {
4812 ASSERT(transition.is_null());
4813 ASSERT(access.IsInobject());
4814 ASSERT(!hinstr->NeedsWriteBarrier());
4815 XMMRegister value = ToDoubleRegister(instr->value());
4816 __ movsd(FieldOperand(object, offset), value);
4820 if (!transition.is_null()) {
4821 if (!hinstr->NeedsWriteBarrierForMap()) {
4822 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
4824 Register temp = ToRegister(instr->temp());
4825 __ Move(kScratchRegister, transition);
4826 __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
4827 // Update the write barrier for the map field.
4828 __ RecordWriteField(object,
4829 HeapObject::kMapOffset,
4833 OMIT_REMEMBERED_SET,
4839 Register write_register = object;
4840 if (!access.IsInobject()) {
4841 write_register = ToRegister(instr->temp());
4842 __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4845 if (representation.IsSmi() &&
4846 hinstr->value()->representation().IsInteger32()) {
4847 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4849 Register scratch = kScratchRegister;
4850 __ Load(scratch, FieldOperand(write_register, offset), representation);
4851 __ AssertSmi(scratch);
4853 // Store int value directly to upper half of the smi.
4854 STATIC_ASSERT(kSmiTag == 0);
4855 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4856 offset += kPointerSize / 2;
4857 representation = Representation::Integer32();
4860 Operand operand = FieldOperand(write_register, offset);
4862 if (instr->value()->IsRegister()) {
4863 Register value = ToRegister(instr->value());
4864 __ Store(operand, value, representation);
4866 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4867 if (IsInteger32Constant(operand_value)) {
4868 ASSERT(!hinstr->NeedsWriteBarrier());
4869 int32_t value = ToInteger32(operand_value);
4870 if (representation.IsSmi()) {
4871 __ Move(operand, Smi::FromInt(value));
4874 __ movl(operand, Immediate(value));
4878 Handle<Object> handle_value = ToHandle(operand_value);
4879 ASSERT(!hinstr->NeedsWriteBarrier());
4880 __ Move(operand, handle_value);
4884 if (hinstr->NeedsWriteBarrier()) {
4885 Register value = ToRegister(instr->value());
4886 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4887 // Update the write barrier for the object for in-object properties.
4888 __ RecordWriteField(write_register,
4893 EMIT_REMEMBERED_SET,
4899 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4900 ASSERT(ToRegister(instr->context()).is(rsi));
4901 ASSERT(ToRegister(instr->object()).is(rdx));
4902 ASSERT(ToRegister(instr->value()).is(rax));
4904 __ Move(rcx, instr->hydrogen()->name());
4905 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4906 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4910 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4911 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4913 __ j(NegateCondition(cc), &done, Label::kNear);
4917 DeoptimizeIf(cc, check->environment());
4922 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4923 HBoundsCheck* hinstr = instr->hydrogen();
4924 if (hinstr->skip_check()) return;
4926 Representation representation = hinstr->length()->representation();
4927 ASSERT(representation.Equals(hinstr->index()->representation()));
4928 ASSERT(representation.IsSmiOrInteger32());
4930 if (instr->length()->IsRegister()) {
4931 Register reg = ToRegister(instr->length());
4933 if (instr->index()->IsConstantOperand()) {
4934 int32_t constant_index =
4935 ToInteger32(LConstantOperand::cast(instr->index()));
4936 if (representation.IsSmi()) {
4937 __ Cmp(reg, Smi::FromInt(constant_index));
4939 __ cmpl(reg, Immediate(constant_index));
4942 Register reg2 = ToRegister(instr->index());
4943 if (representation.IsSmi()) {
4950 Operand length = ToOperand(instr->length());
4951 if (instr->index()->IsConstantOperand()) {
4952 int32_t constant_index =
4953 ToInteger32(LConstantOperand::cast(instr->index()));
4954 if (representation.IsSmi()) {
4955 __ Cmp(length, Smi::FromInt(constant_index));
4957 __ cmpl(length, Immediate(constant_index));
4960 if (representation.IsSmi()) {
4961 __ cmpp(length, ToRegister(instr->index()));
4963 __ cmpl(length, ToRegister(instr->index()));
4967 Condition condition = hinstr->allow_equality() ? below : below_equal;
4968 ApplyCheckIf(condition, instr);
4972 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4973 ElementsKind elements_kind = instr->elements_kind();
4974 LOperand* key = instr->key();
4975 if (!key->IsConstantOperand()) {
4976 HandleExternalArrayOpRequiresPreScale(key, elements_kind);
4978 int base_offset = instr->is_fixed_typed_array()
4979 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4981 Operand operand(BuildFastArrayOperand(
4986 instr->additional_index()));
4988 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4989 elements_kind == FLOAT32_ELEMENTS) {
4990 XMMRegister value(ToDoubleRegister(instr->value()));
4991 __ cvtsd2ss(value, value);
4992 __ movss(operand, value);
4993 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4994 elements_kind == FLOAT64_ELEMENTS) {
4995 __ movsd(operand, ToDoubleRegister(instr->value()));
4996 } else if (IsSIMD128ElementsKind(elements_kind)) {
4997 __ movups(operand, ToSIMD128Register(instr->value()));
4999 Register value(ToRegister(instr->value()));
5000 switch (elements_kind) {
5001 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5002 case EXTERNAL_INT8_ELEMENTS:
5003 case EXTERNAL_UINT8_ELEMENTS:
5005 case UINT8_ELEMENTS:
5006 case UINT8_CLAMPED_ELEMENTS:
5007 __ movb(operand, value);
5009 case EXTERNAL_INT16_ELEMENTS:
5010 case EXTERNAL_UINT16_ELEMENTS:
5011 case INT16_ELEMENTS:
5012 case UINT16_ELEMENTS:
5013 __ movw(operand, value);
5015 case EXTERNAL_INT32_ELEMENTS:
5016 case EXTERNAL_UINT32_ELEMENTS:
5017 case INT32_ELEMENTS:
5018 case UINT32_ELEMENTS:
5019 __ movl(operand, value);
5021 case EXTERNAL_FLOAT32_ELEMENTS:
5022 case EXTERNAL_FLOAT32x4_ELEMENTS:
5023 case EXTERNAL_INT32x4_ELEMENTS:
5024 case EXTERNAL_FLOAT64_ELEMENTS:
5025 case FLOAT32_ELEMENTS:
5026 case FLOAT64_ELEMENTS:
5027 case FLOAT32x4_ELEMENTS:
5028 case INT32x4_ELEMENTS:
5030 case FAST_SMI_ELEMENTS:
5031 case FAST_DOUBLE_ELEMENTS:
5032 case FAST_HOLEY_ELEMENTS:
5033 case FAST_HOLEY_SMI_ELEMENTS:
5034 case FAST_HOLEY_DOUBLE_ELEMENTS:
5035 case DICTIONARY_ELEMENTS:
5036 case SLOPPY_ARGUMENTS_ELEMENTS:
5044 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
5045 XMMRegister value = ToDoubleRegister(instr->value());
5046 LOperand* key = instr->key();
5047 if (instr->NeedsCanonicalization()) {
5050 __ ucomisd(value, value);
5051 __ j(parity_odd, &have_value, Label::kNear); // NaN.
5053 __ Set(kScratchRegister, BitCast<uint64_t>(
5054 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
5055 __ movq(value, kScratchRegister);
5057 __ bind(&have_value);
5060 Operand double_store_operand = BuildFastArrayOperand(
5063 FAST_DOUBLE_ELEMENTS,
5064 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
5065 instr->additional_index());
5067 __ movsd(double_store_operand, value);
5071 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
5072 HStoreKeyed* hinstr = instr->hydrogen();
5073 LOperand* key = instr->key();
5074 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
5075 Representation representation = hinstr->value()->representation();
5077 if (representation.IsInteger32()) {
5078 ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5079 ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
5081 Register scratch = kScratchRegister;
5083 BuildFastArrayOperand(instr->elements(),
5087 instr->additional_index()),
5088 Representation::Smi());
5089 __ AssertSmi(scratch);
5091 // Store int value directly to upper half of the smi.
5092 STATIC_ASSERT(kSmiTag == 0);
5093 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
5094 offset += kPointerSize / 2;
5098 BuildFastArrayOperand(instr->elements(),
5102 instr->additional_index());
5104 if (instr->value()->IsRegister()) {
5105 __ Store(operand, ToRegister(instr->value()), representation);
5107 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
5108 if (IsInteger32Constant(operand_value)) {
5109 int32_t value = ToInteger32(operand_value);
5110 if (representation.IsSmi()) {
5111 __ Move(operand, Smi::FromInt(value));
5114 __ movl(operand, Immediate(value));
5117 Handle<Object> handle_value = ToHandle(operand_value);
5118 __ Move(operand, handle_value);
5122 if (hinstr->NeedsWriteBarrier()) {
5123 Register elements = ToRegister(instr->elements());
5124 ASSERT(instr->value()->IsRegister());
5125 Register value = ToRegister(instr->value());
5126 ASSERT(!key->IsConstantOperand());
5127 SmiCheck check_needed = hinstr->value()->IsHeapObject()
5128 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5129 // Compute address of modified element and store it into key register.
5130 Register key_reg(ToRegister(key));
5131 __ leap(key_reg, operand);
5132 __ RecordWrite(elements,
5136 EMIT_REMEMBERED_SET,
5142 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
5143 if (instr->is_typed_elements()) {
5144 DoStoreKeyedExternalArray(instr);
5145 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
5146 DoStoreKeyedFixedDoubleArray(instr);
5148 DoStoreKeyedFixedArray(instr);
5153 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5154 ASSERT(ToRegister(instr->context()).is(rsi));
5155 ASSERT(ToRegister(instr->object()).is(rdx));
5156 ASSERT(ToRegister(instr->key()).is(rcx));
5157 ASSERT(ToRegister(instr->value()).is(rax));
5159 Handle<Code> ic = instr->strict_mode() == STRICT
5160 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5161 : isolate()->builtins()->KeyedStoreIC_Initialize();
5162 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5166 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5167 Register object_reg = ToRegister(instr->object());
5169 Handle<Map> from_map = instr->original_map();
5170 Handle<Map> to_map = instr->transitioned_map();
5171 ElementsKind from_kind = instr->from_kind();
5172 ElementsKind to_kind = instr->to_kind();
5174 Label not_applicable;
5175 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
5176 __ j(not_equal, ¬_applicable);
5177 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5178 Register new_map_reg = ToRegister(instr->new_map_temp());
5179 __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
5180 __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
5182 ASSERT_NE(instr->temp(), NULL);
5183 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
5184 ToRegister(instr->temp()), kDontSaveFPRegs);
5186 ASSERT(ToRegister(instr->context()).is(rsi));
5187 PushSafepointRegistersScope scope(this);
5188 if (!object_reg.is(rax)) {
5189 __ movp(rax, object_reg);
5191 __ Move(rbx, to_map);
5192 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5193 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
5195 RecordSafepointWithRegisters(
5196 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5198 __ bind(¬_applicable);
5202 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5203 Register object = ToRegister(instr->object());
5204 Register temp = ToRegister(instr->temp());
5205 Label no_memento_found;
5206 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
5207 DeoptimizeIf(equal, instr->environment());
5208 __ bind(&no_memento_found);
5212 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5213 ASSERT(ToRegister(instr->context()).is(rsi));
5214 ASSERT(ToRegister(instr->left()).is(rdx));
5215 ASSERT(ToRegister(instr->right()).is(rax));
5216 StringAddStub stub(instr->hydrogen()->flags(),
5217 instr->hydrogen()->pretenure_flag());
5218 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5222 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5223 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
5225 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5226 : LDeferredCode(codegen), instr_(instr) { }
5227 virtual void Generate() V8_OVERRIDE {
5228 codegen()->DoDeferredStringCharCodeAt(instr_);
5230 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5232 LStringCharCodeAt* instr_;
5235 DeferredStringCharCodeAt* deferred =
5236 new(zone()) DeferredStringCharCodeAt(this, instr);
5238 StringCharLoadGenerator::Generate(masm(),
5239 ToRegister(instr->string()),
5240 ToRegister(instr->index()),
5241 ToRegister(instr->result()),
5243 __ bind(deferred->exit());
5247 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5248 Register string = ToRegister(instr->string());
5249 Register result = ToRegister(instr->result());
5251 // TODO(3095996): Get rid of this. For now, we need to make the
5252 // result register contain a valid pointer because it is already
5253 // contained in the register pointer map.
5256 PushSafepointRegistersScope scope(this);
5258 // Push the index as a smi. This is safe because of the checks in
5259 // DoStringCharCodeAt above.
5260 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5261 if (instr->index()->IsConstantOperand()) {
5262 int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
5263 __ Push(Smi::FromInt(const_index));
5265 Register index = ToRegister(instr->index());
5266 __ Integer32ToSmi(index, index);
5269 CallRuntimeFromDeferred(
5270 Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
5272 __ SmiToInteger32(rax, rax);
5273 __ StoreToSafepointRegisterSlot(result, rax);
5277 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5278 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5280 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5281 : LDeferredCode(codegen), instr_(instr) { }
5282 virtual void Generate() V8_OVERRIDE {
5283 codegen()->DoDeferredStringCharFromCode(instr_);
5285 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5287 LStringCharFromCode* instr_;
5290 DeferredStringCharFromCode* deferred =
5291 new(zone()) DeferredStringCharFromCode(this, instr);
5293 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5294 Register char_code = ToRegister(instr->char_code());
5295 Register result = ToRegister(instr->result());
5296 ASSERT(!char_code.is(result));
5298 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
5299 __ j(above, deferred->entry());
5300 __ movsxlq(char_code, char_code);
5301 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5302 __ movp(result, FieldOperand(result,
5303 char_code, times_pointer_size,
5304 FixedArray::kHeaderSize));
5305 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5306 __ j(equal, deferred->entry());
5307 __ bind(deferred->exit());
5311 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5312 Register char_code = ToRegister(instr->char_code());
5313 Register result = ToRegister(instr->result());
5315 // TODO(3095996): Get rid of this. For now, we need to make the
5316 // result register contain a valid pointer because it is already
5317 // contained in the register pointer map.
5320 PushSafepointRegistersScope scope(this);
5321 __ Integer32ToSmi(char_code, char_code);
5323 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5324 __ StoreToSafepointRegisterSlot(result, rax);
5328 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5329 LOperand* input = instr->value();
5330 ASSERT(input->IsRegister() || input->IsStackSlot());
5331 LOperand* output = instr->result();
5332 ASSERT(output->IsDoubleRegister());
5333 if (input->IsRegister()) {
5334 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
5336 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
5341 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5342 LOperand* input = instr->value();
5343 LOperand* output = instr->result();
5344 LOperand* temp = instr->temp();
5346 __ LoadUint32(ToDoubleRegister(output),
5348 ToDoubleRegister(temp));
5352 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5353 LOperand* input = instr->value();
5354 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5355 Register reg = ToRegister(input);
5357 __ Integer32ToSmi(reg, reg);
5361 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5362 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5364 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5365 : LDeferredCode(codegen), instr_(instr) { }
5366 virtual void Generate() V8_OVERRIDE {
5367 codegen()->DoDeferredNumberTagU(instr_);
5369 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5371 LNumberTagU* instr_;
5374 LOperand* input = instr->value();
5375 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5376 Register reg = ToRegister(input);
5378 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
5379 __ cmpl(reg, Immediate(Smi::kMaxValue));
5380 __ j(above, deferred->entry());
5381 __ Integer32ToSmi(reg, reg);
5382 __ bind(deferred->exit());
5386 void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
5388 Register reg = ToRegister(instr->value());
5389 Register tmp = ToRegister(instr->temp1());
5390 XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
5392 // Load value into temp_xmm which will be preserved across potential call to
5393 // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
5394 // XMM registers on x64).
5395 XMMRegister xmm_scratch = double_scratch0();
5396 __ LoadUint32(temp_xmm, reg, xmm_scratch);
5398 if (FLAG_inline_new) {
5399 __ AllocateHeapNumber(reg, tmp, &slow);
5400 __ jmp(&done, Label::kNear);
5403 // Slow case: Call the runtime system to do the number allocation.
5406 // Put a valid pointer value in the stack slot where the result
5407 // register is stored, as this register is in the pointer map, but contains
5408 // an integer value.
5411 // Preserve the value of all registers.
5412 PushSafepointRegistersScope scope(this);
5414 // NumberTagU uses the context from the frame, rather than
5415 // the environment's HContext or HInlinedContext value.
5416 // They only call Runtime::kHiddenAllocateHeapNumber.
5417 // The corresponding HChange instructions are added in a phase that does
5418 // not have easy access to the local context.
5419 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5420 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5421 RecordSafepointWithRegisters(
5422 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5423 __ StoreToSafepointRegisterSlot(reg, rax);
5426 // Done. Put the value in temp_xmm into the value of the allocated heap
5429 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
5433 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5434 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5436 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5437 : LDeferredCode(codegen), instr_(instr) { }
5438 virtual void Generate() V8_OVERRIDE {
5439 codegen()->DoDeferredNumberTagD(instr_);
5441 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5443 LNumberTagD* instr_;
5446 XMMRegister input_reg = ToDoubleRegister(instr->value());
5447 Register reg = ToRegister(instr->result());
5448 Register tmp = ToRegister(instr->temp());
5450 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
5451 if (FLAG_inline_new) {
5452 __ AllocateHeapNumber(reg, tmp, deferred->entry());
5454 __ jmp(deferred->entry());
5456 __ bind(deferred->exit());
5457 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5461 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5462 // TODO(3095996): Get rid of this. For now, we need to make the
5463 // result register contain a valid pointer because it is already
5464 // contained in the register pointer map.
5465 Register reg = ToRegister(instr->result());
5466 __ Move(reg, Smi::FromInt(0));
5469 PushSafepointRegistersScope scope(this);
5470 // NumberTagD uses the context from the frame, rather than
5471 // the environment's HContext or HInlinedContext value.
5472 // They only call Runtime::kHiddenAllocateHeapNumber.
5473 // The corresponding HChange instructions are added in a phase that does
5474 // not have easy access to the local context.
5475 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5476 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5477 RecordSafepointWithRegisters(
5478 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5479 __ movp(kScratchRegister, rax);
5481 __ movp(reg, kScratchRegister);
5485 void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
5486 Runtime::FunctionId id) {
5487 // TODO(3095996): Get rid of this. For now, we need to make the
5488 // result register contain a valid pointer because it is already
5489 // contained in the register pointer map.
5490 Register reg = ToRegister(instr->result());
5491 __ Move(reg, Smi::FromInt(0));
5494 PushSafepointRegistersScope scope(this);
5495 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5496 __ CallRuntimeSaveDoubles(id);
5497 RecordSafepointWithRegisters(
5498 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5499 __ movp(kScratchRegister, rax);
5501 __ movp(reg, kScratchRegister);
5506 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
5507 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
5509 DeferredSIMD128ToTagged(LCodeGen* codegen,
5510 LSIMD128ToTagged* instr,
5511 Runtime::FunctionId id)
5512 : LDeferredCode(codegen), instr_(instr), id_(id) { }
5513 virtual void Generate() V8_OVERRIDE {
5514 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
5516 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5518 LSIMD128ToTagged* instr_;
5519 Runtime::FunctionId id_;
5522 XMMRegister input_reg = ToSIMD128Register(instr->value());
5523 Register reg = ToRegister(instr->result());
5524 Register tmp = ToRegister(instr->temp());
5526 DeferredSIMD128ToTagged* deferred =
5527 new(zone()) DeferredSIMD128ToTagged(this, instr,
5528 static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
5529 if (FLAG_inline_new) {
5530 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
5531 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5533 __ jmp(deferred->entry());
5535 __ bind(deferred->exit());
5536 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
5540 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
5541 if (instr->value()->IsFloat32x4Register()) {
5542 HandleSIMD128ToTagged<Float32x4>(instr);
5544 ASSERT(instr->value()->IsInt32x4Register());
5545 HandleSIMD128ToTagged<Int32x4>(instr);
5550 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5551 HChange* hchange = instr->hydrogen();
5552 Register input = ToRegister(instr->value());
5553 Register output = ToRegister(instr->result());
5554 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5555 hchange->value()->CheckFlag(HValue::kUint32)) {
5556 __ testl(input, input);
5557 DeoptimizeIf(sign, instr->environment());
5559 __ Integer32ToSmi(output, input);
5560 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5561 !hchange->value()->CheckFlag(HValue::kUint32)) {
5562 DeoptimizeIf(overflow, instr->environment());
5567 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5568 ASSERT(instr->value()->Equals(instr->result()));
5569 Register input = ToRegister(instr->value());
5570 if (instr->needs_check()) {
5571 Condition is_smi = __ CheckSmi(input);
5572 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
5574 __ AssertSmi(input);
5576 __ SmiToInteger32(input, input);
5580 void LCodeGen::EmitNumberUntagD(Register input_reg,
5581 XMMRegister result_reg,
5582 bool can_convert_undefined_to_nan,
5583 bool deoptimize_on_minus_zero,
5585 NumberUntagDMode mode) {
5586 Label convert, load_smi, done;
5588 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5590 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5592 // Heap number map check.
5593 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5594 Heap::kHeapNumberMapRootIndex);
5596 // On x64 it is safe to load at heap number offset before evaluating the map
5597 // check, since all heap objects are at least two words long.
5598 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5600 if (can_convert_undefined_to_nan) {
5601 __ j(not_equal, &convert, Label::kNear);
5603 DeoptimizeIf(not_equal, env);
5606 if (deoptimize_on_minus_zero) {
5607 XMMRegister xmm_scratch = double_scratch0();
5608 __ xorps(xmm_scratch, xmm_scratch);
5609 __ ucomisd(xmm_scratch, result_reg);
5610 __ j(not_equal, &done, Label::kNear);
5611 __ movmskpd(kScratchRegister, result_reg);
5612 __ testq(kScratchRegister, Immediate(1));
5613 DeoptimizeIf(not_zero, env);
5615 __ jmp(&done, Label::kNear);
5617 if (can_convert_undefined_to_nan) {
5620 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
5621 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5622 DeoptimizeIf(not_equal, env);
5624 __ xorps(result_reg, result_reg);
5625 __ divsd(result_reg, result_reg);
5626 __ jmp(&done, Label::kNear);
5629 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5632 // Smi to XMM conversion
5634 __ SmiToInteger32(kScratchRegister, input_reg);
5635 __ Cvtlsi2sd(result_reg, kScratchRegister);
5640 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5641 Register input_reg = ToRegister(instr->value());
5643 if (instr->truncating()) {
5644 Label no_heap_number, check_bools, check_false;
5646 // Heap number map check.
5647 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5648 Heap::kHeapNumberMapRootIndex);
5649 __ j(not_equal, &no_heap_number, Label::kNear);
5650 __ TruncateHeapNumberToI(input_reg, input_reg);
5653 __ bind(&no_heap_number);
5654 // Check for Oddballs. Undefined/False is converted to zero and True to one
5655 // for truncating conversions.
5656 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
5657 __ j(not_equal, &check_bools, Label::kNear);
5658 __ Set(input_reg, 0);
5661 __ bind(&check_bools);
5662 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
5663 __ j(not_equal, &check_false, Label::kNear);
5664 __ Set(input_reg, 1);
5667 __ bind(&check_false);
5668 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
5669 __ RecordComment("Deferred TaggedToI: cannot truncate");
5670 DeoptimizeIf(not_equal, instr->environment());
5671 __ Set(input_reg, 0);
5675 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5676 __ TaggedToI(input_reg, input_reg, xmm_temp,
5677 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5681 DeoptimizeIf(no_condition, instr->environment());
5686 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5687 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5689 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5690 : LDeferredCode(codegen), instr_(instr) { }
5691 virtual void Generate() V8_OVERRIDE {
5692 codegen()->DoDeferredTaggedToI(instr_, done());
5694 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5699 LOperand* input = instr->value();
5700 ASSERT(input->IsRegister());
5701 ASSERT(input->Equals(instr->result()));
5702 Register input_reg = ToRegister(input);
5704 if (instr->hydrogen()->value()->representation().IsSmi()) {
5705 __ SmiToInteger32(input_reg, input_reg);
5707 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5708 __ JumpIfNotSmi(input_reg, deferred->entry());
5709 __ SmiToInteger32(input_reg, input_reg);
5710 __ bind(deferred->exit());
5715 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5716 LOperand* input = instr->value();
5717 ASSERT(input->IsRegister());
5718 LOperand* result = instr->result();
5719 ASSERT(result->IsDoubleRegister());
5721 Register input_reg = ToRegister(input);
5722 XMMRegister result_reg = ToDoubleRegister(result);
5724 HValue* value = instr->hydrogen()->value();
5725 NumberUntagDMode mode = value->representation().IsSmi()
5726 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5728 EmitNumberUntagD(input_reg, result_reg,
5729 instr->hydrogen()->can_convert_undefined_to_nan(),
5730 instr->hydrogen()->deoptimize_on_minus_zero(),
5731 instr->environment(),
5737 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
5738 LOperand* input = instr->value();
5739 ASSERT(input->IsRegister());
5740 LOperand* result = instr->result();
5741 ASSERT(result->IsSIMD128Register());
5743 Register input_reg = ToRegister(input);
5744 XMMRegister result_reg = ToSIMD128Register(result);
5746 Condition cc = masm()->CheckSmi(input_reg);
5747 DeoptimizeIf(cc, instr->environment());
5748 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
5749 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
5750 DeoptimizeIf(not_equal, instr->environment());
5751 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
5755 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
5756 if (instr->representation().IsFloat32x4()) {
5757 HandleTaggedToSIMD128<Float32x4>(instr);
5759 ASSERT(instr->representation().IsInt32x4());
5760 HandleTaggedToSIMD128<Int32x4>(instr);
5765 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5766 LOperand* input = instr->value();
5767 ASSERT(input->IsDoubleRegister());
5768 LOperand* result = instr->result();
5769 ASSERT(result->IsRegister());
5771 XMMRegister input_reg = ToDoubleRegister(input);
5772 Register result_reg = ToRegister(result);
5774 if (instr->truncating()) {
5775 __ TruncateDoubleToI(result_reg, input_reg);
5777 Label bailout, done;
5778 XMMRegister xmm_scratch = double_scratch0();
5779 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5780 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5782 __ jmp(&done, Label::kNear);
5784 DeoptimizeIf(no_condition, instr->environment());
5790 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5791 LOperand* input = instr->value();
5792 ASSERT(input->IsDoubleRegister());
5793 LOperand* result = instr->result();
5794 ASSERT(result->IsRegister());
5796 XMMRegister input_reg = ToDoubleRegister(input);
5797 Register result_reg = ToRegister(result);
5799 Label bailout, done;
5800 XMMRegister xmm_scratch = double_scratch0();
5801 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5802 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5804 __ jmp(&done, Label::kNear);
5806 DeoptimizeIf(no_condition, instr->environment());
5809 __ Integer32ToSmi(result_reg, result_reg);
5810 DeoptimizeIf(overflow, instr->environment());
5814 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5815 LOperand* input = instr->value();
5816 Condition cc = masm()->CheckSmi(ToRegister(input));
5817 DeoptimizeIf(NegateCondition(cc), instr->environment());
5821 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5822 if (!instr->hydrogen()->value()->IsHeapObject()) {
5823 LOperand* input = instr->value();
5824 Condition cc = masm()->CheckSmi(ToRegister(input));
5825 DeoptimizeIf(cc, instr->environment());
5830 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5831 Register input = ToRegister(instr->value());
5833 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
5835 if (instr->hydrogen()->is_interval_check()) {
5838 instr->hydrogen()->GetCheckInterval(&first, &last);
5840 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5841 Immediate(static_cast<int8_t>(first)));
5843 // If there is only one type in the interval check for equality.
5844 if (first == last) {
5845 DeoptimizeIf(not_equal, instr->environment());
5847 DeoptimizeIf(below, instr->environment());
5848 // Omit check for the last type.
5849 if (last != LAST_TYPE) {
5850 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5851 Immediate(static_cast<int8_t>(last)));
5852 DeoptimizeIf(above, instr->environment());
5858 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5860 if (IsPowerOf2(mask)) {
5861 ASSERT(tag == 0 || IsPowerOf2(tag));
5862 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
5864 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5866 __ movzxbl(kScratchRegister,
5867 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
5868 __ andb(kScratchRegister, Immediate(mask));
5869 __ cmpb(kScratchRegister, Immediate(tag));
5870 DeoptimizeIf(not_equal, instr->environment());
5876 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5877 Register reg = ToRegister(instr->value());
5878 __ Cmp(reg, instr->hydrogen()->object().handle());
5879 DeoptimizeIf(not_equal, instr->environment());
5883 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5885 PushSafepointRegistersScope scope(this);
5888 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5889 RecordSafepointWithRegisters(
5890 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5892 __ testp(rax, Immediate(kSmiTagMask));
5894 DeoptimizeIf(zero, instr->environment());
5898 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5899 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5901 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5902 : LDeferredCode(codegen), instr_(instr), object_(object) {
5903 SetExit(check_maps());
5905 virtual void Generate() V8_OVERRIDE {
5906 codegen()->DoDeferredInstanceMigration(instr_, object_);
5908 Label* check_maps() { return &check_maps_; }
5909 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5916 if (instr->hydrogen()->CanOmitMapChecks()) return;
5918 LOperand* input = instr->value();
5919 ASSERT(input->IsRegister());
5920 Register reg = ToRegister(input);
5922 DeferredCheckMaps* deferred = NULL;
5923 if (instr->hydrogen()->has_migration_target()) {
5924 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5925 __ bind(deferred->check_maps());
5928 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5930 for (int i = 0; i < map_set.size() - 1; i++) {
5931 Handle<Map> map = map_set.at(i).handle();
5932 __ CompareMap(reg, map);
5933 __ j(equal, &success, Label::kNear);
5936 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5937 __ CompareMap(reg, map);
5938 if (instr->hydrogen()->has_migration_target()) {
5939 __ j(not_equal, deferred->entry());
5941 DeoptimizeIf(not_equal, instr->environment());
5948 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5949 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5950 XMMRegister xmm_scratch = double_scratch0();
5951 Register result_reg = ToRegister(instr->result());
5952 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5956 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5957 ASSERT(instr->unclamped()->Equals(instr->result()));
5958 Register value_reg = ToRegister(instr->result());
5959 __ ClampUint8(value_reg);
5963 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5964 ASSERT(instr->unclamped()->Equals(instr->result()));
5965 Register input_reg = ToRegister(instr->unclamped());
5966 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5967 XMMRegister xmm_scratch = double_scratch0();
5968 Label is_smi, done, heap_number;
5969 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5970 __ JumpIfSmi(input_reg, &is_smi, dist);
5972 // Check for heap number
5973 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5974 factory()->heap_number_map());
5975 __ j(equal, &heap_number, Label::kNear);
5977 // Check for undefined. Undefined is converted to zero for clamping
5979 __ Cmp(input_reg, factory()->undefined_value());
5980 DeoptimizeIf(not_equal, instr->environment());
5981 __ xorl(input_reg, input_reg);
5982 __ jmp(&done, Label::kNear);
5985 __ bind(&heap_number);
5986 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5987 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5988 __ jmp(&done, Label::kNear);
5992 __ SmiToInteger32(input_reg, input_reg);
5993 __ ClampUint8(input_reg);
5999 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
6000 XMMRegister value_reg = ToDoubleRegister(instr->value());
6001 Register result_reg = ToRegister(instr->result());
6002 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
6003 __ movq(result_reg, value_reg);
6004 __ shr(result_reg, Immediate(32));
6006 __ movd(result_reg, value_reg);
6011 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
6012 Register hi_reg = ToRegister(instr->hi());
6013 Register lo_reg = ToRegister(instr->lo());
6014 XMMRegister result_reg = ToDoubleRegister(instr->result());
6015 XMMRegister xmm_scratch = double_scratch0();
6016 __ movd(result_reg, hi_reg);
6017 __ psllq(result_reg, 32);
6018 __ movd(xmm_scratch, lo_reg);
6019 __ orps(result_reg, xmm_scratch);
6023 void LCodeGen::DoAllocate(LAllocate* instr) {
6024 class DeferredAllocate V8_FINAL : public LDeferredCode {
6026 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
6027 : LDeferredCode(codegen), instr_(instr) { }
6028 virtual void Generate() V8_OVERRIDE {
6029 codegen()->DoDeferredAllocate(instr_);
6031 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6036 DeferredAllocate* deferred =
6037 new(zone()) DeferredAllocate(this, instr);
6039 Register result = ToRegister(instr->result());
6040 Register temp = ToRegister(instr->temp());
6042 // Allocate memory for the object.
6043 AllocationFlags flags = TAG_OBJECT;
6044 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
6045 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6047 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6048 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6049 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6050 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6051 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6052 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6053 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6056 if (instr->size()->IsConstantOperand()) {
6057 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6058 if (size <= Page::kMaxRegularHeapObjectSize) {
6059 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6061 __ jmp(deferred->entry());
6064 Register size = ToRegister(instr->size());
6065 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6068 __ bind(deferred->exit());
6070 if (instr->hydrogen()->MustPrefillWithFiller()) {
6071 if (instr->size()->IsConstantOperand()) {
6072 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6073 __ movl(temp, Immediate((size / kPointerSize) - 1));
6075 temp = ToRegister(instr->size());
6076 __ sar(temp, Immediate(kPointerSizeLog2));
6081 __ Move(FieldOperand(result, temp, times_pointer_size, 0),
6082 isolate()->factory()->one_pointer_filler_map());
6084 __ j(not_zero, &loop);
6089 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6090 Register result = ToRegister(instr->result());
6092 // TODO(3095996): Get rid of this. For now, we need to make the
6093 // result register contain a valid pointer because it is already
6094 // contained in the register pointer map.
6095 __ Move(result, Smi::FromInt(0));
6097 PushSafepointRegistersScope scope(this);
6098 if (instr->size()->IsRegister()) {
6099 Register size = ToRegister(instr->size());
6100 ASSERT(!size.is(result));
6101 __ Integer32ToSmi(size, size);
6104 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6105 __ Push(Smi::FromInt(size));
6109 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6110 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6111 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6112 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6113 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6114 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6115 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6117 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6119 __ Push(Smi::FromInt(flags));
6121 CallRuntimeFromDeferred(
6122 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
6123 __ StoreToSafepointRegisterSlot(result, rax);
6127 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6128 ASSERT(ToRegister(instr->value()).is(rax));
6130 CallRuntime(Runtime::kToFastProperties, 1, instr);
6134 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6135 ASSERT(ToRegister(instr->context()).is(rsi));
6137 // Registers will be used as follows:
6138 // rcx = literals array.
6139 // rbx = regexp literal.
6140 // rax = regexp literal clone.
6141 int literal_offset =
6142 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6143 __ Move(rcx, instr->hydrogen()->literals());
6144 __ movp(rbx, FieldOperand(rcx, literal_offset));
6145 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
6146 __ j(not_equal, &materialized, Label::kNear);
6148 // Create regexp literal using runtime function
6149 // Result will be in rax.
6151 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
6152 __ Push(instr->hydrogen()->pattern());
6153 __ Push(instr->hydrogen()->flags());
6154 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
6157 __ bind(&materialized);
6158 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6159 Label allocated, runtime_allocate;
6160 __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
6161 __ jmp(&allocated, Label::kNear);
6163 __ bind(&runtime_allocate);
6165 __ Push(Smi::FromInt(size));
6166 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
6169 __ bind(&allocated);
6170 // Copy the content into the newly allocated memory.
6171 // (Unroll copy loop once for better throughput).
6172 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6173 __ movp(rdx, FieldOperand(rbx, i));
6174 __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
6175 __ movp(FieldOperand(rax, i), rdx);
6176 __ movp(FieldOperand(rax, i + kPointerSize), rcx);
6178 if ((size % (2 * kPointerSize)) != 0) {
6179 __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
6180 __ movp(FieldOperand(rax, size - kPointerSize), rdx);
6185 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6186 ASSERT(ToRegister(instr->context()).is(rsi));
6187 // Use the fast case closure allocation code that allocates in new
6188 // space for nested functions that don't need literals cloning.
6189 bool pretenure = instr->hydrogen()->pretenure();
6190 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6191 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
6192 instr->hydrogen()->is_generator());
6193 __ Move(rbx, instr->hydrogen()->shared_info());
6194 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6197 __ Push(instr->hydrogen()->shared_info());
6198 __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
6199 Heap::kFalseValueRootIndex);
6200 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6205 void LCodeGen::DoTypeof(LTypeof* instr) {
6206 ASSERT(ToRegister(instr->context()).is(rsi));
6207 LOperand* input = instr->value();
6208 EmitPushTaggedOperand(input);
6209 CallRuntime(Runtime::kTypeof, 1, instr);
6213 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
6214 ASSERT(!operand->IsDoubleRegister());
6215 if (operand->IsConstantOperand()) {
6216 __ Push(ToHandle(LConstantOperand::cast(operand)));
6217 } else if (operand->IsRegister()) {
6218 __ Push(ToRegister(operand));
6220 __ Push(ToOperand(operand));
6225 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6226 Register input = ToRegister(instr->value());
6227 Condition final_branch_condition = EmitTypeofIs(instr, input);
6228 if (final_branch_condition != no_condition) {
6229 EmitBranch(instr, final_branch_condition);
6234 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6235 Label* true_label = instr->TrueLabel(chunk_);
6236 Label* false_label = instr->FalseLabel(chunk_);
6237 Handle<String> type_name = instr->type_literal();
6238 int left_block = instr->TrueDestination(chunk_);
6239 int right_block = instr->FalseDestination(chunk_);
6240 int next_block = GetNextEmittedBlock();
6242 Label::Distance true_distance = left_block == next_block ? Label::kNear
6244 Label::Distance false_distance = right_block == next_block ? Label::kNear
6246 Condition final_branch_condition = no_condition;
6247 if (type_name->Equals(heap()->number_string())) {
6248 __ JumpIfSmi(input, true_label, true_distance);
6249 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
6250 Heap::kHeapNumberMapRootIndex);
6252 final_branch_condition = equal;
6254 } else if (type_name->Equals(heap()->float32x4_string())) {
6255 __ JumpIfSmi(input, false_label, false_distance);
6256 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6257 final_branch_condition = equal;
6259 } else if (type_name->Equals(heap()->int32x4_string())) {
6260 __ JumpIfSmi(input, false_label, false_distance);
6261 __ CmpObjectType(input, INT32x4_TYPE, input);
6262 final_branch_condition = equal;
6264 } else if (type_name->Equals(heap()->string_string())) {
6265 __ JumpIfSmi(input, false_label, false_distance);
6266 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6267 __ j(above_equal, false_label, false_distance);
6268 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6269 Immediate(1 << Map::kIsUndetectable));
6270 final_branch_condition = zero;
6272 } else if (type_name->Equals(heap()->symbol_string())) {
6273 __ JumpIfSmi(input, false_label, false_distance);
6274 __ CmpObjectType(input, SYMBOL_TYPE, input);
6275 final_branch_condition = equal;
6277 } else if (type_name->Equals(heap()->boolean_string())) {
6278 __ CompareRoot(input, Heap::kTrueValueRootIndex);
6279 __ j(equal, true_label, true_distance);
6280 __ CompareRoot(input, Heap::kFalseValueRootIndex);
6281 final_branch_condition = equal;
6283 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6284 __ CompareRoot(input, Heap::kNullValueRootIndex);
6285 final_branch_condition = equal;
6287 } else if (type_name->Equals(heap()->undefined_string())) {
6288 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
6289 __ j(equal, true_label, true_distance);
6290 __ JumpIfSmi(input, false_label, false_distance);
6291 // Check for undetectable objects => true.
6292 __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
6293 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6294 Immediate(1 << Map::kIsUndetectable));
6295 final_branch_condition = not_zero;
6297 } else if (type_name->Equals(heap()->function_string())) {
6298 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6299 __ JumpIfSmi(input, false_label, false_distance);
6300 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6301 __ j(equal, true_label, true_distance);
6302 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6303 final_branch_condition = equal;
6305 } else if (type_name->Equals(heap()->object_string())) {
6306 __ JumpIfSmi(input, false_label, false_distance);
6307 if (!FLAG_harmony_typeof) {
6308 __ CompareRoot(input, Heap::kNullValueRootIndex);
6309 __ j(equal, true_label, true_distance);
6311 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6312 __ j(below, false_label, false_distance);
6313 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6314 __ j(above, false_label, false_distance);
6315 // Check for undetectable objects => false.
6316 __ testb(FieldOperand(input, Map::kBitFieldOffset),
6317 Immediate(1 << Map::kIsUndetectable));
6318 final_branch_condition = zero;
6321 __ jmp(false_label, false_distance);
6324 return final_branch_condition;
6328 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6329 Register temp = ToRegister(instr->temp());
6331 EmitIsConstructCall(temp);
6332 EmitBranch(instr, equal);
6336 void LCodeGen::EmitIsConstructCall(Register temp) {
6337 // Get the frame pointer for the calling frame.
6338 __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6340 // Skip the arguments adaptor frame if it exists.
6341 Label check_frame_marker;
6342 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6343 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6344 __ j(not_equal, &check_frame_marker, Label::kNear);
6345 __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6347 // Check the marker in the calling frame.
6348 __ bind(&check_frame_marker);
6349 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6350 Smi::FromInt(StackFrame::CONSTRUCT));
6354 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6355 if (!info()->IsStub()) {
6356 // Ensure that we have enough space after the previous lazy-bailout
6357 // instruction for patching the code here.
6358 int current_pc = masm()->pc_offset();
6359 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6360 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6361 __ Nop(padding_size);
6364 last_lazy_deopt_pc_ = masm()->pc_offset();
6368 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6369 last_lazy_deopt_pc_ = masm()->pc_offset();
6370 ASSERT(instr->HasEnvironment());
6371 LEnvironment* env = instr->environment();
6372 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6373 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6377 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6378 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6379 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6380 // needed return address), even though the implementation of LAZY and EAGER is
6381 // now identical. When LAZY is eventually completely folded into EAGER, remove
6382 // the special case below.
6383 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6384 type = Deoptimizer::LAZY;
6387 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6388 DeoptimizeIf(no_condition, instr->environment(), type);
6392 void LCodeGen::DoDummy(LDummy* instr) {
6393 // Nothing to see here, move on!
6397 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6398 // Nothing to see here, move on!
6402 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6403 PushSafepointRegistersScope scope(this);
6404 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
6405 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6406 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
6407 ASSERT(instr->HasEnvironment());
6408 LEnvironment* env = instr->environment();
6409 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6413 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6414 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6416 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6417 : LDeferredCode(codegen), instr_(instr) { }
6418 virtual void Generate() V8_OVERRIDE {
6419 codegen()->DoDeferredStackCheck(instr_);
6421 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6423 LStackCheck* instr_;
6426 ASSERT(instr->HasEnvironment());
6427 LEnvironment* env = instr->environment();
6428 // There is no LLazyBailout instruction for stack-checks. We have to
6429 // prepare for lazy deoptimization explicitly here.
6430 if (instr->hydrogen()->is_function_entry()) {
6431 // Perform stack overflow check.
6433 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6434 __ j(above_equal, &done, Label::kNear);
6436 ASSERT(instr->context()->IsRegister());
6437 ASSERT(ToRegister(instr->context()).is(rsi));
6438 CallCode(isolate()->builtins()->StackCheck(),
6439 RelocInfo::CODE_TARGET,
6443 ASSERT(instr->hydrogen()->is_backwards_branch());
6444 // Perform stack overflow check if this goto needs it before jumping.
6445 DeferredStackCheck* deferred_stack_check =
6446 new(zone()) DeferredStackCheck(this, instr);
6447 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
6448 __ j(below, deferred_stack_check->entry());
6449 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6450 __ bind(instr->done_label());
6451 deferred_stack_check->SetExit(instr->done_label());
6452 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6453 // Don't record a deoptimization index for the safepoint here.
6454 // This will be done explicitly when emitting call and the safepoint in
6455 // the deferred code.
6460 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6461 // This is a pseudo-instruction that ensures that the environment here is
6462 // properly registered for deoptimization and records the assembler's PC
6464 LEnvironment* environment = instr->environment();
6466 // If the environment were already registered, we would have no way of
6467 // backpatching it with the spill slot operands.
6468 ASSERT(!environment->HasBeenRegistered());
6469 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6471 GenerateOsrPrologue();
6475 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6476 ASSERT(ToRegister(instr->context()).is(rsi));
6477 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
6478 DeoptimizeIf(equal, instr->environment());
6480 Register null_value = rdi;
6481 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6482 __ cmpp(rax, null_value);
6483 DeoptimizeIf(equal, instr->environment());
6485 Condition cc = masm()->CheckSmi(rax);
6486 DeoptimizeIf(cc, instr->environment());
6488 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6489 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
6490 DeoptimizeIf(below_equal, instr->environment());
6492 Label use_cache, call_runtime;
6493 __ CheckEnumCache(null_value, &call_runtime);
6495 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
6496 __ jmp(&use_cache, Label::kNear);
6498 // Get the set of properties to enumerate.
6499 __ bind(&call_runtime);
6501 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6503 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
6504 Heap::kMetaMapRootIndex);
6505 DeoptimizeIf(not_equal, instr->environment());
6506 __ bind(&use_cache);
6510 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6511 Register map = ToRegister(instr->map());
6512 Register result = ToRegister(instr->result());
6513 Label load_cache, done;
6514 __ EnumLength(result, map);
6515 __ Cmp(result, Smi::FromInt(0));
6516 __ j(not_equal, &load_cache, Label::kNear);
6517 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
6518 __ jmp(&done, Label::kNear);
6519 __ bind(&load_cache);
6520 __ LoadInstanceDescriptors(map, result);
6522 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6524 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6526 Condition cc = masm()->CheckSmi(result);
6527 DeoptimizeIf(cc, instr->environment());
6531 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6532 Register object = ToRegister(instr->value());
6533 __ cmpp(ToRegister(instr->map()),
6534 FieldOperand(object, HeapObject::kMapOffset));
6535 DeoptimizeIf(not_equal, instr->environment());
6539 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6540 Register object = ToRegister(instr->object());
6541 Register index = ToRegister(instr->index());
6543 Label out_of_object, done;
6544 __ SmiToInteger32(index, index);
6545 __ cmpl(index, Immediate(0));
6546 __ j(less, &out_of_object, Label::kNear);
6547 __ movp(object, FieldOperand(object,
6550 JSObject::kHeaderSize));
6551 __ jmp(&done, Label::kNear);
6553 __ bind(&out_of_object);
6554 __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
6556 // Index is now equal to out of object property index plus 1.
6557 __ movp(object, FieldOperand(object,
6560 FixedArray::kHeaderSize - kPointerSize));
6567 } } // namespace v8::internal
6569 #endif // V8_TARGET_ARCH_X64