1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_IA32
32 #include "ia32/lithium-codegen-ia32.h"
34 #include "code-stubs.h"
35 #include "deoptimizer.h"
36 #include "stub-cache.h"
38 #include "hydrogen-osr.h"
44 static SaveFPRegsMode GetSaveFPRegsMode() {
45 // We don't need to save floating point regs when generating the snapshot
46 return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
50 // When invoking builtins, we need to record the safepoint in the middle of
51 // the invoke instruction sequence generated by the macro assembler.
52 class SafepointGenerator V8_FINAL : public CallWrapper {
54 SafepointGenerator(LCodeGen* codegen,
55 LPointerMap* pointers,
56 Safepoint::DeoptMode mode)
60 virtual ~SafepointGenerator() {}
62 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
64 virtual void AfterCall() const V8_OVERRIDE {
65 codegen_->RecordSafepoint(pointers_, deopt_mode_);
70 LPointerMap* pointers_;
71 Safepoint::DeoptMode deopt_mode_;
77 bool LCodeGen::GenerateCode() {
78 LPhase phase("Z_Code generation", chunk());
82 // Open a frame scope to indicate that there is a frame on the stack. The
83 // MANUAL indicates that the scope shouldn't actually generate code to set up
84 // the frame (that is done in GeneratePrologue).
85 FrameScope frame_scope(masm_, StackFrame::MANUAL);
87 support_aligned_spilled_doubles_ = info()->IsOptimizing();
89 dynamic_frame_alignment_ = info()->IsOptimizing() &&
90 ((chunk()->num_double_slots() > 2 &&
91 !chunk()->graph()->is_recursive()) ||
92 !info()->osr_ast_id().IsNone());
94 return GeneratePrologue() &&
96 GenerateDeferredCode() &&
97 GenerateJumpTable() &&
98 GenerateSafepointTable();
102 void LCodeGen::FinishCode(Handle<Code> code) {
104 code->set_stack_slots(GetStackSlotCount());
105 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106 RegisterDependentCodeForEmbeddedMaps(code);
107 PopulateDeoptimizationData(code);
108 if (!info()->IsStub()) {
109 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
111 info()->CommitDependencies(code);
115 void LCodeGen::Abort(BailoutReason reason) {
116 info()->set_bailout_reason(reason);
122 void LCodeGen::MakeSureStackPagesMapped(int offset) {
123 const int kPageSize = 4 * KB;
124 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
125 __ mov(Operand(esp, offset), eax);
131 void LCodeGen::SaveCallerDoubles() {
132 ASSERT(info()->saves_caller_doubles());
133 ASSERT(NeedsEagerFrame());
134 Comment(";;; Save clobbered callee double registers");
135 CpuFeatureScope scope(masm(), SSE2);
137 BitVector* doubles = chunk()->allocated_double_registers();
138 BitVector::Iterator save_iterator(doubles);
139 while (!save_iterator.Done()) {
140 __ movsd(MemOperand(esp, count * kDoubleSize),
141 XMMRegister::FromAllocationIndex(save_iterator.Current()));
142 save_iterator.Advance();
148 void LCodeGen::RestoreCallerDoubles() {
149 ASSERT(info()->saves_caller_doubles());
150 ASSERT(NeedsEagerFrame());
151 Comment(";;; Restore clobbered callee double registers");
152 CpuFeatureScope scope(masm(), SSE2);
153 BitVector* doubles = chunk()->allocated_double_registers();
154 BitVector::Iterator save_iterator(doubles);
156 while (!save_iterator.Done()) {
157 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
158 MemOperand(esp, count * kDoubleSize));
159 save_iterator.Advance();
165 bool LCodeGen::GeneratePrologue() {
166 ASSERT(is_generating());
168 if (info()->IsOptimizing()) {
169 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
172 if (strlen(FLAG_stop_at) > 0 &&
173 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
178 // Classic mode functions and builtins need to replace the receiver with the
179 // global proxy when called as functions (without an explicit receiver
181 if (info_->this_has_uses() &&
182 info_->is_classic_mode() &&
183 !info_->is_native()) {
185 // +1 for return address.
186 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
187 __ mov(ecx, Operand(esp, receiver_offset));
189 __ cmp(ecx, isolate()->factory()->undefined_value());
190 __ j(not_equal, &ok, Label::kNear);
192 __ mov(ecx, GlobalObjectOperand());
193 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
195 __ mov(Operand(esp, receiver_offset), ecx);
200 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
201 // Move state of dynamic frame alignment into edx.
202 __ Set(edx, Immediate(kNoAlignmentPadding));
204 Label do_not_pad, align_loop;
205 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
206 // Align esp + 4 to a multiple of 2 * kPointerSize.
207 __ test(esp, Immediate(kPointerSize));
208 __ j(not_zero, &do_not_pad, Label::kNear);
209 __ push(Immediate(0));
211 __ mov(edx, Immediate(kAlignmentPaddingPushed));
212 // Copy arguments, receiver, and return address.
213 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
215 __ bind(&align_loop);
216 __ mov(eax, Operand(ebx, 1 * kPointerSize));
217 __ mov(Operand(ebx, 0), eax);
218 __ add(Operand(ebx), Immediate(kPointerSize));
220 __ j(not_zero, &align_loop, Label::kNear);
221 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
222 __ bind(&do_not_pad);
226 info()->set_prologue_offset(masm_->pc_offset());
227 if (NeedsEagerFrame()) {
228 ASSERT(!frame_is_built_);
229 frame_is_built_ = true;
230 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
231 info()->AddNoFrameRange(0, masm_->pc_offset());
234 if (info()->IsOptimizing() &&
235 dynamic_frame_alignment_ &&
237 __ test(esp, Immediate(kPointerSize));
238 __ Assert(zero, kFrameIsExpectedToBeAligned);
241 // Reserve space for the stack slots needed by the code.
242 int slots = GetStackSlotCount();
243 ASSERT(slots != 0 || !info()->IsOptimizing());
246 if (dynamic_frame_alignment_) {
249 __ push(Immediate(kNoAlignmentPadding));
252 if (FLAG_debug_code) {
253 __ sub(Operand(esp), Immediate(slots * kPointerSize));
255 MakeSureStackPagesMapped(slots * kPointerSize);
258 __ mov(Operand(eax), Immediate(slots));
261 __ mov(MemOperand(esp, eax, times_4, 0),
262 Immediate(kSlotsZapValue));
264 __ j(not_zero, &loop);
267 __ sub(Operand(esp), Immediate(slots * kPointerSize));
269 MakeSureStackPagesMapped(slots * kPointerSize);
273 if (support_aligned_spilled_doubles_) {
274 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
275 // Store dynamic frame alignment state in the first local.
276 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
277 if (dynamic_frame_alignment_) {
278 __ mov(Operand(ebp, offset), edx);
280 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
285 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
290 // Possibly allocate a local context.
291 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
292 if (heap_slots > 0) {
293 Comment(";;; Allocate local context");
294 // Argument to NewContext is the function, which is still in edi.
295 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
296 FastNewContextStub stub(heap_slots);
300 __ CallRuntime(Runtime::kNewFunctionContext, 1);
302 RecordSafepoint(Safepoint::kNoLazyDeopt);
303 // Context is returned in eax. It replaces the context passed to us.
304 // It's saved in the stack and kept live in esi.
306 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
308 // Copy parameters into context if necessary.
309 int num_parameters = scope()->num_parameters();
310 for (int i = 0; i < num_parameters; i++) {
311 Variable* var = scope()->parameter(i);
312 if (var->IsContextSlot()) {
313 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
314 (num_parameters - 1 - i) * kPointerSize;
315 // Load parameter from stack.
316 __ mov(eax, Operand(ebp, parameter_offset));
317 // Store it in the context.
318 int context_offset = Context::SlotOffset(var->index());
319 __ mov(Operand(esi, context_offset), eax);
320 // Update the write barrier. This clobbers eax and ebx.
321 __ RecordWriteContextSlot(esi,
328 Comment(";;; End allocate local context");
332 if (FLAG_trace && info()->IsOptimizing()) {
333 // We have not executed any compiled code yet, so esi still holds the
335 __ CallRuntime(Runtime::kTraceEnter, 0);
337 return !is_aborted();
341 void LCodeGen::GenerateOsrPrologue() {
342 // Generate the OSR entry prologue at the first unknown OSR value, or if there
343 // are none, at the OSR entrypoint instruction.
344 if (osr_pc_offset_ >= 0) return;
346 osr_pc_offset_ = masm()->pc_offset();
348 // Move state of dynamic frame alignment into edx.
349 __ Set(edx, Immediate(kNoAlignmentPadding));
351 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
352 Label do_not_pad, align_loop;
353 // Align ebp + 4 to a multiple of 2 * kPointerSize.
354 __ test(ebp, Immediate(kPointerSize));
355 __ j(zero, &do_not_pad, Label::kNear);
356 __ push(Immediate(0));
358 __ mov(edx, Immediate(kAlignmentPaddingPushed));
360 // Move all parts of the frame over one word. The frame consists of:
361 // unoptimized frame slots, alignment state, context, frame pointer, return
362 // address, receiver, and the arguments.
363 __ mov(ecx, Immediate(scope()->num_parameters() +
364 5 + graph()->osr()->UnoptimizedFrameSlots()));
366 __ bind(&align_loop);
367 __ mov(eax, Operand(ebx, 1 * kPointerSize));
368 __ mov(Operand(ebx, 0), eax);
369 __ add(Operand(ebx), Immediate(kPointerSize));
371 __ j(not_zero, &align_loop, Label::kNear);
372 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
373 __ sub(Operand(ebp), Immediate(kPointerSize));
374 __ bind(&do_not_pad);
377 // Save the first local, which is overwritten by the alignment state.
378 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
379 __ push(alignment_loc);
381 // Set the dynamic frame alignment state.
382 __ mov(alignment_loc, edx);
384 // Adjust the frame size, subsuming the unoptimized frame into the
386 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
388 __ sub(esp, Immediate((slots - 1) * kPointerSize));
392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
393 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
397 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
398 if (!CpuFeatures::IsSupported(SSE2)) {
399 if (instr->IsGoto()) {
400 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
401 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
402 !instr->IsGap() && !instr->IsReturn()) {
403 if (instr->ClobbersDoubleRegisters()) {
404 if (instr->HasDoubleRegisterResult()) {
405 ASSERT_EQ(1, x87_stack_.depth());
407 ASSERT_EQ(0, x87_stack_.depth());
410 __ VerifyX87StackDepth(x87_stack_.depth());
416 bool LCodeGen::GenerateJumpTable() {
418 if (jump_table_.length() > 0) {
419 Comment(";;; -------------------- Jump table --------------------");
421 for (int i = 0; i < jump_table_.length(); i++) {
422 __ bind(&jump_table_[i].label);
423 Address entry = jump_table_[i].address;
424 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
425 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
426 if (id == Deoptimizer::kNotDeoptimizationEntry) {
427 Comment(";;; jump table entry %d.", i);
429 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
431 if (jump_table_[i].needs_frame) {
432 ASSERT(!info()->saves_caller_doubles());
433 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
434 if (needs_frame.is_bound()) {
435 __ jmp(&needs_frame);
437 __ bind(&needs_frame);
438 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
439 // This variant of deopt can only be used with stubs. Since we don't
440 // have a function pointer to install in the stack frame that we're
441 // building, install a special marker there instead.
442 ASSERT(info()->IsStub());
443 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
444 // Push a PC inside the function so that the deopt code can find where
445 // the deopt comes from. It doesn't have to be the precise return
446 // address of a "calling" LAZY deopt, it only has to be somewhere
447 // inside the code body.
448 Label push_approx_pc;
449 __ call(&push_approx_pc);
450 __ bind(&push_approx_pc);
451 // Push the continuation which was stashed were the ebp should
452 // be. Replace it with the saved ebp.
453 __ push(MemOperand(esp, 3 * kPointerSize));
454 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
455 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
456 __ ret(0); // Call the continuation without clobbering registers.
459 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
460 RestoreCallerDoubles();
462 __ call(entry, RelocInfo::RUNTIME_ENTRY);
465 return !is_aborted();
469 bool LCodeGen::GenerateDeferredCode() {
470 ASSERT(is_generating());
471 if (deferred_.length() > 0) {
472 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
473 LDeferredCode* code = deferred_[i];
474 X87Stack copy(code->x87_stack());
478 instructions_->at(code->instruction_index())->hydrogen_value();
479 RecordAndWritePosition(value->position());
481 Comment(";;; <@%d,#%d> "
482 "-------------------- Deferred %s --------------------",
483 code->instruction_index(),
484 code->instr()->hydrogen_value()->id(),
485 code->instr()->Mnemonic());
486 __ bind(code->entry());
487 if (NeedsDeferredFrame()) {
488 Comment(";;; Build frame");
489 ASSERT(!frame_is_built_);
490 ASSERT(info()->IsStub());
491 frame_is_built_ = true;
492 // Build the frame in such a way that esi isn't trashed.
493 __ push(ebp); // Caller's frame pointer.
494 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
495 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
496 __ lea(ebp, Operand(esp, 2 * kPointerSize));
497 Comment(";;; Deferred code");
500 if (NeedsDeferredFrame()) {
501 __ bind(code->done());
502 Comment(";;; Destroy frame");
503 ASSERT(frame_is_built_);
504 frame_is_built_ = false;
508 __ jmp(code->exit());
512 // Deferred code is the last part of the instruction sequence. Mark
513 // the generated code as done unless we bailed out.
514 if (!is_aborted()) status_ = DONE;
515 return !is_aborted();
519 bool LCodeGen::GenerateSafepointTable() {
521 if (!info()->IsStub()) {
522 // For lazy deoptimization we need space to patch a call after every call.
523 // Ensure there is always space for such patching, even if the code ends
525 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
526 while (masm()->pc_offset() < target_offset) {
530 safepoints_.Emit(masm(), GetStackSlotCount());
531 return !is_aborted();
535 Register LCodeGen::ToRegister(int index) const {
536 return Register::FromAllocationIndex(index);
540 X87Register LCodeGen::ToX87Register(int index) const {
541 return X87Register::FromAllocationIndex(index);
545 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
546 return XMMRegister::FromAllocationIndex(index);
550 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
551 return XMMRegister::FromAllocationIndex(index);
555 void LCodeGen::X87LoadForUsage(X87Register reg) {
556 ASSERT(x87_stack_.Contains(reg));
557 x87_stack_.Fxch(reg);
562 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
563 ASSERT(x87_stack_.Contains(reg1));
564 ASSERT(x87_stack_.Contains(reg2));
565 x87_stack_.Fxch(reg1, 1);
566 x87_stack_.Fxch(reg2);
572 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
574 ASSERT(Contains(reg) && stack_depth_ > other_slot);
575 int i = ArrayIndex(reg);
577 if (st != other_slot) {
578 int other_i = st2idx(other_slot);
579 X87Register other = stack_[other_i];
580 stack_[other_i] = reg;
584 } else if (other_slot == 0) {
595 int LCodeGen::X87Stack::st2idx(int pos) {
596 return stack_depth_ - pos - 1;
600 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
601 for (int i = 0; i < stack_depth_; i++) {
602 if (stack_[i].is(reg)) return i;
609 bool LCodeGen::X87Stack::Contains(X87Register reg) {
610 for (int i = 0; i < stack_depth_; i++) {
611 if (stack_[i].is(reg)) return true;
617 void LCodeGen::X87Stack::Free(X87Register reg) {
619 ASSERT(Contains(reg));
620 int i = ArrayIndex(reg);
623 // keep track of how fstp(i) changes the order of elements
624 int tos_i = st2idx(0);
625 stack_[i] = stack_[tos_i];
632 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
633 if (x87_stack_.Contains(dst)) {
634 x87_stack_.Fxch(dst);
637 x87_stack_.push(dst);
643 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
644 ASSERT(!src.is_reg_only());
646 case kX87DoubleOperand:
649 case kX87FloatOperand:
661 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
662 ASSERT(!dst.is_reg_only());
663 x87_stack_.Fxch(src);
665 case kX87DoubleOperand:
677 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
682 // Mark this register as the next register to write to
683 stack_[stack_depth_] = reg;
687 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
689 // Assert the reg is prepared to write, but not on the virtual stack yet
690 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
691 stack_depth_ < X87Register::kNumAllocatableRegisters);
696 void LCodeGen::X87PrepareBinaryOp(
697 X87Register left, X87Register right, X87Register result) {
698 // You need to use DefineSameAsFirst for x87 instructions
699 ASSERT(result.is(left));
700 x87_stack_.Fxch(right, 1);
701 x87_stack_.Fxch(left);
705 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
706 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
707 bool double_inputs = instr->HasDoubleRegisterInput();
709 // Flush stack from tos down, since FreeX87() will mess with tos
710 for (int i = stack_depth_-1; i >= 0; i--) {
711 X87Register reg = stack_[i];
712 // Skip registers which contain the inputs for the next instruction
713 // when flushing the stack
714 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
718 if (i < stack_depth_-1) i++;
721 if (instr->IsReturn()) {
722 while (stack_depth_ > 0) {
726 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
731 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
732 ASSERT(stack_depth_ <= 1);
733 // If ever used for new stubs producing two pairs of doubles joined into two
734 // phis this assert hits. That situation is not handled, since the two stacks
735 // might have st0 and st1 swapped.
736 if (current_block_id + 1 != goto_instr->block_id()) {
737 // If we have a value on the x87 stack on leaving a block, it must be a
738 // phi input. If the next block we compile is not the join block, we have
739 // to discard the stack state.
745 void LCodeGen::EmitFlushX87ForDeopt() {
746 // The deoptimizer does not support X87 Registers. But as long as we
747 // deopt from a stub its not a problem, since we will re-materialize the
748 // original stub inputs, which can't be double registers.
749 ASSERT(info()->IsStub());
750 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
752 __ VerifyX87StackDepth(x87_stack_.depth());
755 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
759 Register LCodeGen::ToRegister(LOperand* op) const {
760 ASSERT(op->IsRegister());
761 return ToRegister(op->index());
765 X87Register LCodeGen::ToX87Register(LOperand* op) const {
766 ASSERT(op->IsDoubleRegister());
767 return ToX87Register(op->index());
771 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
772 ASSERT(op->IsDoubleRegister());
773 return ToDoubleRegister(op->index());
777 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
778 ASSERT(op->IsFloat32x4Register());
779 return ToSIMD128Register(op->index());
783 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
784 ASSERT(op->IsInt32x4Register());
785 return ToSIMD128Register(op->index());
789 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
790 ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
791 return ToSIMD128Register(op->index());
795 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
796 return ToRepresentation(op, Representation::Integer32());
800 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
801 const Representation& r) const {
802 HConstant* constant = chunk_->LookupConstant(op);
803 int32_t value = constant->Integer32Value();
804 if (r.IsInteger32()) return value;
805 ASSERT(r.IsSmiOrTagged());
806 return reinterpret_cast<int32_t>(Smi::FromInt(value));
810 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
811 HConstant* constant = chunk_->LookupConstant(op);
812 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
813 return constant->handle(isolate());
817 double LCodeGen::ToDouble(LConstantOperand* op) const {
818 HConstant* constant = chunk_->LookupConstant(op);
819 ASSERT(constant->HasDoubleValue());
820 return constant->DoubleValue();
824 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
825 HConstant* constant = chunk_->LookupConstant(op);
826 ASSERT(constant->HasExternalReferenceValue());
827 return constant->ExternalReferenceValue();
831 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
832 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
836 bool LCodeGen::IsSmi(LConstantOperand* op) const {
837 return chunk_->LookupLiteralRepresentation(op).IsSmi();
841 static int ArgumentsOffsetWithoutFrame(int index) {
843 return -(index + 1) * kPointerSize + kPCOnStackSize;
847 Operand LCodeGen::ToOperand(LOperand* op) const {
848 if (op->IsRegister()) return Operand(ToRegister(op));
849 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
850 if (op->IsFloat32x4Register()) return Operand(ToFloat32x4Register(op));
851 if (op->IsInt32x4Register()) return Operand(ToInt32x4Register(op));
852 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
853 op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
854 if (NeedsEagerFrame()) {
855 return Operand(ebp, StackSlotOffset(op->index()));
857 // Retrieve parameter without eager stack-frame relative to the
859 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
864 Operand LCodeGen::HighOperand(LOperand* op) {
865 ASSERT(op->IsDoubleStackSlot());
866 if (NeedsEagerFrame()) {
867 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
869 // Retrieve parameter without eager stack-frame relative to the
872 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
877 void LCodeGen::WriteTranslation(LEnvironment* environment,
878 Translation* translation) {
879 if (environment == NULL) return;
881 // The translation includes one command per value in the environment.
882 int translation_size = environment->translation_size();
883 // The output frame height does not include the parameters.
884 int height = translation_size - environment->parameter_count();
886 WriteTranslation(environment->outer(), translation);
887 bool has_closure_id = !info()->closure().is_null() &&
888 !info()->closure().is_identical_to(environment->closure());
889 int closure_id = has_closure_id
890 ? DefineDeoptimizationLiteral(environment->closure())
891 : Translation::kSelfLiteralId;
892 switch (environment->frame_type()) {
894 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
897 translation->BeginConstructStubFrame(closure_id, translation_size);
900 ASSERT(translation_size == 1);
902 translation->BeginGetterStubFrame(closure_id);
905 ASSERT(translation_size == 2);
907 translation->BeginSetterStubFrame(closure_id);
909 case ARGUMENTS_ADAPTOR:
910 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
913 translation->BeginCompiledStubFrame();
919 int object_index = 0;
920 int dematerialized_index = 0;
921 for (int i = 0; i < translation_size; ++i) {
922 LOperand* value = environment->values()->at(i);
923 AddToTranslation(environment,
926 environment->HasTaggedValueAt(i),
927 environment->HasUint32ValueAt(i),
929 &dematerialized_index);
934 void LCodeGen::AddToTranslation(LEnvironment* environment,
935 Translation* translation,
939 int* object_index_pointer,
940 int* dematerialized_index_pointer) {
941 if (op == LEnvironment::materialization_marker()) {
942 int object_index = (*object_index_pointer)++;
943 if (environment->ObjectIsDuplicateAt(object_index)) {
944 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
945 translation->DuplicateObject(dupe_of);
948 int object_length = environment->ObjectLengthAt(object_index);
949 if (environment->ObjectIsArgumentsAt(object_index)) {
950 translation->BeginArgumentsObject(object_length);
952 translation->BeginCapturedObject(object_length);
954 int dematerialized_index = *dematerialized_index_pointer;
955 int env_offset = environment->translation_size() + dematerialized_index;
956 *dematerialized_index_pointer += object_length;
957 for (int i = 0; i < object_length; ++i) {
958 LOperand* value = environment->values()->at(env_offset + i);
959 AddToTranslation(environment,
962 environment->HasTaggedValueAt(env_offset + i),
963 environment->HasUint32ValueAt(env_offset + i),
964 object_index_pointer,
965 dematerialized_index_pointer);
970 if (op->IsStackSlot()) {
972 translation->StoreStackSlot(op->index());
973 } else if (is_uint32) {
974 translation->StoreUint32StackSlot(op->index());
976 translation->StoreInt32StackSlot(op->index());
978 } else if (op->IsDoubleStackSlot()) {
979 translation->StoreDoubleStackSlot(op->index());
980 } else if (op->IsFloat32x4StackSlot()) {
981 translation->StoreSIMD128StackSlot(op->index(),
982 Translation::FLOAT32x4_STACK_SLOT);
983 } else if (op->IsInt32x4StackSlot()) {
984 translation->StoreSIMD128StackSlot(op->index(),
985 Translation::INT32x4_STACK_SLOT);
986 } else if (op->IsArgument()) {
988 int src_index = GetStackSlotCount() + op->index();
989 translation->StoreStackSlot(src_index);
990 } else if (op->IsRegister()) {
991 Register reg = ToRegister(op);
993 translation->StoreRegister(reg);
994 } else if (is_uint32) {
995 translation->StoreUint32Register(reg);
997 translation->StoreInt32Register(reg);
999 } else if (op->IsDoubleRegister()) {
1000 XMMRegister reg = ToDoubleRegister(op);
1001 translation->StoreDoubleRegister(reg);
1002 } else if (op->IsFloat32x4Register()) {
1003 XMMRegister reg = ToFloat32x4Register(op);
1004 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
1005 } else if (op->IsInt32x4Register()) {
1006 XMMRegister reg = ToInt32x4Register(op);
1007 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
1008 } else if (op->IsConstantOperand()) {
1009 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
1010 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
1011 translation->StoreLiteral(src_index);
1018 void LCodeGen::CallCodeGeneric(Handle<Code> code,
1019 RelocInfo::Mode mode,
1020 LInstruction* instr,
1021 SafepointMode safepoint_mode) {
1022 ASSERT(instr != NULL);
1023 __ call(code, mode);
1024 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
1026 // Signal that we don't inline smi code before these stubs in the
1027 // optimizing code generator.
1028 if (code->kind() == Code::BINARY_OP_IC ||
1029 code->kind() == Code::COMPARE_IC) {
1035 void LCodeGen::CallCode(Handle<Code> code,
1036 RelocInfo::Mode mode,
1037 LInstruction* instr) {
1038 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
1042 void LCodeGen::CallRuntime(const Runtime::Function* fun,
1044 LInstruction* instr,
1045 SaveFPRegsMode save_doubles) {
1046 ASSERT(instr != NULL);
1047 ASSERT(instr->HasPointerMap());
1049 __ CallRuntime(fun, argc, save_doubles);
1051 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1053 ASSERT(info()->is_calling());
1057 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1058 if (context->IsRegister()) {
1059 if (!ToRegister(context).is(esi)) {
1060 __ mov(esi, ToRegister(context));
1062 } else if (context->IsStackSlot()) {
1063 __ mov(esi, ToOperand(context));
1064 } else if (context->IsConstantOperand()) {
1065 HConstant* constant =
1066 chunk_->LookupConstant(LConstantOperand::cast(context));
1067 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1073 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1075 LInstruction* instr,
1076 LOperand* context) {
1077 LoadContextFromDeferred(context);
1079 __ CallRuntimeSaveDoubles(id);
1080 RecordSafepointWithRegisters(
1081 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1083 ASSERT(info()->is_calling());
1087 void LCodeGen::RegisterEnvironmentForDeoptimization(
1088 LEnvironment* environment, Safepoint::DeoptMode mode) {
1089 if (!environment->HasBeenRegistered()) {
1090 // Physical stack frame layout:
1091 // -x ............. -4 0 ..................................... y
1092 // [incoming arguments] [spill slots] [pushed outgoing arguments]
1094 // Layout of the environment:
1095 // 0 ..................................................... size-1
1096 // [parameters] [locals] [expression stack including arguments]
1098 // Layout of the translation:
1099 // 0 ........................................................ size - 1 + 4
1100 // [expression stack including arguments] [locals] [4 words] [parameters]
1101 // |>------------ translation_size ------------<|
1103 int frame_count = 0;
1104 int jsframe_count = 0;
1105 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1107 if (e->frame_type() == JS_FUNCTION) {
1111 Translation translation(&translations_, frame_count, jsframe_count, zone());
1112 WriteTranslation(environment, &translation);
1113 int deoptimization_index = deoptimizations_.length();
1114 int pc_offset = masm()->pc_offset();
1115 environment->Register(deoptimization_index,
1116 translation.index(),
1117 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1118 deoptimizations_.Add(environment, zone());
1123 void LCodeGen::DeoptimizeIf(Condition cc,
1124 LEnvironment* environment,
1125 Deoptimizer::BailoutType bailout_type) {
1126 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1127 ASSERT(environment->HasBeenRegistered());
1128 int id = environment->deoptimization_index();
1129 ASSERT(info()->IsOptimizing() || info()->IsStub());
1131 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1132 if (entry == NULL) {
1133 Abort(kBailoutWasNotPrepared);
1137 if (DeoptEveryNTimes()) {
1138 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1142 __ mov(eax, Operand::StaticVariable(count));
1143 __ sub(eax, Immediate(1));
1144 __ j(not_zero, &no_deopt, Label::kNear);
1145 if (FLAG_trap_on_deopt) __ int3();
1146 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1147 __ mov(Operand::StaticVariable(count), eax);
1150 ASSERT(frame_is_built_);
1151 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1153 __ mov(Operand::StaticVariable(count), eax);
1158 // Before Instructions which can deopt, we normally flush the x87 stack. But
1159 // we can have inputs or outputs of the current instruction on the stack,
1160 // thus we need to flush them here from the physical stack to leave it in a
1161 // consistent state.
1162 if (x87_stack_.depth() > 0) {
1164 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1165 EmitFlushX87ForDeopt();
1169 if (info()->ShouldTrapOnDeopt()) {
1171 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1176 ASSERT(info()->IsStub() || frame_is_built_);
1177 if (cc == no_condition && frame_is_built_) {
1178 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1180 // We often have several deopts to the same entry, reuse the last
1181 // jump entry if this is the case.
1182 if (jump_table_.is_empty() ||
1183 jump_table_.last().address != entry ||
1184 jump_table_.last().needs_frame != !frame_is_built_ ||
1185 jump_table_.last().bailout_type != bailout_type) {
1186 Deoptimizer::JumpTableEntry table_entry(entry,
1189 jump_table_.Add(table_entry, zone());
1191 if (cc == no_condition) {
1192 __ jmp(&jump_table_.last().label);
1194 __ j(cc, &jump_table_.last().label);
1200 void LCodeGen::DeoptimizeIf(Condition cc,
1201 LEnvironment* environment) {
1202 Deoptimizer::BailoutType bailout_type = info()->IsStub()
1204 : Deoptimizer::EAGER;
1205 DeoptimizeIf(cc, environment, bailout_type);
1209 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1210 int length = deoptimizations_.length();
1211 if (length == 0) return;
1212 Handle<DeoptimizationInputData> data =
1213 factory()->NewDeoptimizationInputData(length, TENURED);
1215 Handle<ByteArray> translations =
1216 translations_.CreateByteArray(isolate()->factory());
1217 data->SetTranslationByteArray(*translations);
1218 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1220 Handle<FixedArray> literals =
1221 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1222 { AllowDeferredHandleDereference copy_handles;
1223 for (int i = 0; i < deoptimization_literals_.length(); i++) {
1224 literals->set(i, *deoptimization_literals_[i]);
1226 data->SetLiteralArray(*literals);
1229 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1230 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1232 // Populate the deoptimization entries.
1233 for (int i = 0; i < length; i++) {
1234 LEnvironment* env = deoptimizations_[i];
1235 data->SetAstId(i, env->ast_id());
1236 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1237 data->SetArgumentsStackHeight(i,
1238 Smi::FromInt(env->arguments_stack_height()));
1239 data->SetPc(i, Smi::FromInt(env->pc_offset()));
1241 code->set_deoptimization_data(*data);
1245 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1246 int result = deoptimization_literals_.length();
1247 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1248 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1250 deoptimization_literals_.Add(literal, zone());
1255 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1256 ASSERT(deoptimization_literals_.length() == 0);
1258 const ZoneList<Handle<JSFunction> >* inlined_closures =
1259 chunk()->inlined_closures();
1261 for (int i = 0, length = inlined_closures->length();
1264 DefineDeoptimizationLiteral(inlined_closures->at(i));
1267 inlined_function_count_ = deoptimization_literals_.length();
1271 void LCodeGen::RecordSafepointWithLazyDeopt(
1272 LInstruction* instr, SafepointMode safepoint_mode) {
1273 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1274 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1276 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1277 RecordSafepointWithRegisters(
1278 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1283 void LCodeGen::RecordSafepoint(
1284 LPointerMap* pointers,
1285 Safepoint::Kind kind,
1287 Safepoint::DeoptMode deopt_mode) {
1288 ASSERT(kind == expected_safepoint_kind_);
1289 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1290 Safepoint safepoint =
1291 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1292 for (int i = 0; i < operands->length(); i++) {
1293 LOperand* pointer = operands->at(i);
1294 if (pointer->IsStackSlot()) {
1295 safepoint.DefinePointerSlot(pointer->index(), zone());
1296 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1297 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1303 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1304 Safepoint::DeoptMode mode) {
1305 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1309 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1310 LPointerMap empty_pointers(zone());
1311 RecordSafepoint(&empty_pointers, mode);
1315 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1317 Safepoint::DeoptMode mode) {
1318 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1322 void LCodeGen::RecordAndWritePosition(int position) {
1323 if (position == RelocInfo::kNoPosition) return;
1324 masm()->positions_recorder()->RecordPosition(position);
1325 masm()->positions_recorder()->WriteRecordedPositions();
1329 static const char* LabelType(LLabel* label) {
1330 if (label->is_loop_header()) return " (loop header)";
1331 if (label->is_osr_entry()) return " (OSR entry)";
1336 void LCodeGen::DoLabel(LLabel* label) {
1337 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1338 current_instruction_,
1339 label->hydrogen_value()->id(),
1342 __ bind(label->label());
1343 current_block_ = label->block_id();
1348 void LCodeGen::DoParallelMove(LParallelMove* move) {
1349 resolver_.Resolve(move);
1353 void LCodeGen::DoGap(LGap* gap) {
1354 for (int i = LGap::FIRST_INNER_POSITION;
1355 i <= LGap::LAST_INNER_POSITION;
1357 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1358 LParallelMove* move = gap->GetParallelMove(inner_pos);
1359 if (move != NULL) DoParallelMove(move);
1364 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1369 void LCodeGen::DoParameter(LParameter* instr) {
1374 void LCodeGen::DoCallStub(LCallStub* instr) {
1375 ASSERT(ToRegister(instr->context()).is(esi));
1376 ASSERT(ToRegister(instr->result()).is(eax));
1377 switch (instr->hydrogen()->major_key()) {
1378 case CodeStub::RegExpExec: {
1379 RegExpExecStub stub;
1380 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1383 case CodeStub::SubString: {
1385 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1388 case CodeStub::StringCompare: {
1389 StringCompareStub stub;
1390 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1399 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1400 GenerateOsrPrologue();
1404 void LCodeGen::DoModI(LModI* instr) {
1405 HMod* hmod = instr->hydrogen();
1406 HValue* left = hmod->left();
1407 HValue* right = hmod->right();
1408 if (hmod->RightIsPowerOf2()) {
1409 // TODO(svenpanne) We should really do the strength reduction on the
1411 Register left_reg = ToRegister(instr->left());
1412 ASSERT(left_reg.is(ToRegister(instr->result())));
1414 // Note: The code below even works when right contains kMinInt.
1415 int32_t divisor = Abs(right->GetInteger32Constant());
1417 Label left_is_not_negative, done;
1418 if (left->CanBeNegative()) {
1419 __ test(left_reg, Operand(left_reg));
1420 __ j(not_sign, &left_is_not_negative, Label::kNear);
1422 __ and_(left_reg, divisor - 1);
1424 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1425 DeoptimizeIf(zero, instr->environment());
1427 __ jmp(&done, Label::kNear);
1430 __ bind(&left_is_not_negative);
1431 __ and_(left_reg, divisor - 1);
1434 Register left_reg = ToRegister(instr->left());
1435 ASSERT(left_reg.is(eax));
1436 Register right_reg = ToRegister(instr->right());
1437 ASSERT(!right_reg.is(eax));
1438 ASSERT(!right_reg.is(edx));
1439 Register result_reg = ToRegister(instr->result());
1440 ASSERT(result_reg.is(edx));
1443 // Check for x % 0, idiv would signal a divide error. We have to
1444 // deopt in this case because we can't return a NaN.
1445 if (right->CanBeZero()) {
1446 __ test(right_reg, Operand(right_reg));
1447 DeoptimizeIf(zero, instr->environment());
1450 // Check for kMinInt % -1, idiv would signal a divide error. We
1451 // have to deopt if we care about -0, because we can't return that.
1452 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1453 Label no_overflow_possible;
1454 __ cmp(left_reg, kMinInt);
1455 __ j(not_equal, &no_overflow_possible, Label::kNear);
1456 __ cmp(right_reg, -1);
1457 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1458 DeoptimizeIf(equal, instr->environment());
1460 __ j(not_equal, &no_overflow_possible, Label::kNear);
1461 __ Set(result_reg, Immediate(0));
1462 __ jmp(&done, Label::kNear);
1464 __ bind(&no_overflow_possible);
1467 // Sign extend dividend in eax into edx:eax.
1470 // If we care about -0, test if the dividend is <0 and the result is 0.
1471 if (left->CanBeNegative() &&
1472 hmod->CanBeZero() &&
1473 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1474 Label positive_left;
1475 __ test(left_reg, Operand(left_reg));
1476 __ j(not_sign, &positive_left, Label::kNear);
1478 __ test(result_reg, Operand(result_reg));
1479 DeoptimizeIf(zero, instr->environment());
1480 __ jmp(&done, Label::kNear);
1481 __ bind(&positive_left);
1489 void LCodeGen::DoDivI(LDivI* instr) {
1490 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
1491 Register dividend = ToRegister(instr->left());
1492 int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1493 int32_t test_value = 0;
1497 test_value = divisor - 1;
1498 power = WhichPowerOf2(divisor);
1500 // Check for (0 / -x) that will produce negative zero.
1501 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1502 __ test(dividend, Operand(dividend));
1503 DeoptimizeIf(zero, instr->environment());
1505 // Check for (kMinInt / -1).
1506 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1507 __ cmp(dividend, kMinInt);
1508 DeoptimizeIf(zero, instr->environment());
1510 test_value = - divisor - 1;
1511 power = WhichPowerOf2(-divisor);
1514 if (test_value != 0) {
1515 if (instr->hydrogen()->CheckFlag(
1516 HInstruction::kAllUsesTruncatingToInt32)) {
1517 Label done, negative;
1518 __ cmp(dividend, 0);
1519 __ j(less, &negative, Label::kNear);
1520 __ sar(dividend, power);
1521 if (divisor < 0) __ neg(dividend);
1522 __ jmp(&done, Label::kNear);
1526 __ sar(dividend, power);
1527 if (divisor > 0) __ neg(dividend);
1529 return; // Don't fall through to "__ neg" below.
1531 // Deoptimize if remainder is not 0.
1532 __ test(dividend, Immediate(test_value));
1533 DeoptimizeIf(not_zero, instr->environment());
1534 __ sar(dividend, power);
1538 if (divisor < 0) __ neg(dividend);
1543 LOperand* right = instr->right();
1544 ASSERT(ToRegister(instr->result()).is(eax));
1545 ASSERT(ToRegister(instr->left()).is(eax));
1546 ASSERT(!ToRegister(instr->right()).is(eax));
1547 ASSERT(!ToRegister(instr->right()).is(edx));
1549 Register left_reg = eax;
1552 Register right_reg = ToRegister(right);
1553 if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1554 __ test(right_reg, ToOperand(right));
1555 DeoptimizeIf(zero, instr->environment());
1558 // Check for (0 / -x) that will produce negative zero.
1559 if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1560 Label left_not_zero;
1561 __ test(left_reg, Operand(left_reg));
1562 __ j(not_zero, &left_not_zero, Label::kNear);
1563 __ test(right_reg, ToOperand(right));
1564 DeoptimizeIf(sign, instr->environment());
1565 __ bind(&left_not_zero);
1568 // Check for (kMinInt / -1).
1569 if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
1570 Label left_not_min_int;
1571 __ cmp(left_reg, kMinInt);
1572 __ j(not_zero, &left_not_min_int, Label::kNear);
1573 __ cmp(right_reg, -1);
1574 DeoptimizeIf(zero, instr->environment());
1575 __ bind(&left_not_min_int);
1578 // Sign extend to edx.
1582 if (instr->is_flooring()) {
1585 __ j(zero, &done, Label::kNear);
1586 __ xor_(edx, right_reg);
1590 } else if (!instr->hydrogen()->CheckFlag(
1591 HInstruction::kAllUsesTruncatingToInt32)) {
1592 // Deoptimize if remainder is not 0.
1593 __ test(edx, Operand(edx));
1594 DeoptimizeIf(not_zero, instr->environment());
1599 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1600 ASSERT(instr->right()->IsConstantOperand());
1602 Register dividend = ToRegister(instr->left());
1603 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1604 Register result = ToRegister(instr->result());
1608 DeoptimizeIf(no_condition, instr->environment());
1612 __ Move(result, dividend);
1616 __ Move(result, dividend);
1618 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1619 DeoptimizeIf(zero, instr->environment());
1621 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1622 DeoptimizeIf(overflow, instr->environment());
1627 uint32_t divisor_abs = abs(divisor);
1628 if (IsPowerOf2(divisor_abs)) {
1629 int32_t power = WhichPowerOf2(divisor_abs);
1631 // Input[dividend] is clobbered.
1632 // The sequence is tedious because neg(dividend) might overflow.
1633 __ mov(result, dividend);
1634 __ sar(dividend, 31);
1636 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1637 DeoptimizeIf(zero, instr->environment());
1639 __ shl(dividend, 32 - power);
1640 __ sar(result, power);
1642 // Clear result.sign if dividend.sign is set.
1643 __ and_(result, dividend);
1645 __ Move(result, dividend);
1646 __ sar(result, power);
1649 ASSERT(ToRegister(instr->left()).is(eax));
1650 ASSERT(ToRegister(instr->result()).is(edx));
1651 Register scratch = ToRegister(instr->temp());
1653 // Find b which: 2^b < divisor_abs < 2^(b+1).
1654 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1655 unsigned shift = 32 + b; // Precision +1bit (effectively).
1656 double multiplier_f =
1657 static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
1659 if (multiplier_f - std::floor(multiplier_f) < 0.5) {
1660 multiplier = static_cast<int64_t>(std::floor(multiplier_f));
1662 multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
1664 // The multiplier is a uint32.
1665 ASSERT(multiplier > 0 &&
1666 multiplier < (static_cast<int64_t>(1) << 32));
1667 __ mov(scratch, dividend);
1669 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1670 __ test(dividend, dividend);
1671 DeoptimizeIf(zero, instr->environment());
1673 __ mov(edx, static_cast<int32_t>(multiplier));
1675 if (static_cast<int32_t>(multiplier) < 0) {
1676 __ add(edx, scratch);
1678 Register reg_lo = eax;
1679 Register reg_byte_scratch = scratch;
1680 if (!reg_byte_scratch.is_byte_register()) {
1681 __ xchg(reg_lo, reg_byte_scratch);
1683 reg_byte_scratch = eax;
1686 __ xor_(reg_byte_scratch, reg_byte_scratch);
1687 __ cmp(reg_lo, 0x40000000);
1688 __ setcc(above, reg_byte_scratch);
1690 __ sub(edx, reg_byte_scratch);
1692 __ xor_(reg_byte_scratch, reg_byte_scratch);
1693 __ cmp(reg_lo, 0xC0000000);
1694 __ setcc(above_equal, reg_byte_scratch);
1695 __ add(edx, reg_byte_scratch);
1697 __ sar(edx, shift - 32);
1702 void LCodeGen::DoMulI(LMulI* instr) {
1703 Register left = ToRegister(instr->left());
1704 LOperand* right = instr->right();
1706 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1707 __ mov(ToRegister(instr->temp()), left);
1710 if (right->IsConstantOperand()) {
1711 // Try strength reductions on the multiplication.
1712 // All replacement instructions are at most as long as the imul
1713 // and have better latency.
1714 int constant = ToInteger32(LConstantOperand::cast(right));
1715 if (constant == -1) {
1717 } else if (constant == 0) {
1718 __ xor_(left, Operand(left));
1719 } else if (constant == 2) {
1720 __ add(left, Operand(left));
1721 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1722 // If we know that the multiplication can't overflow, it's safe to
1723 // use instructions that don't set the overflow flag for the
1730 __ lea(left, Operand(left, left, times_2, 0));
1736 __ lea(left, Operand(left, left, times_4, 0));
1742 __ lea(left, Operand(left, left, times_8, 0));
1748 __ imul(left, left, constant);
1752 __ imul(left, left, constant);
1755 if (instr->hydrogen()->representation().IsSmi()) {
1758 __ imul(left, ToOperand(right));
1761 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1762 DeoptimizeIf(overflow, instr->environment());
1765 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1766 // Bail out if the result is supposed to be negative zero.
1768 __ test(left, Operand(left));
1769 __ j(not_zero, &done, Label::kNear);
1770 if (right->IsConstantOperand()) {
1771 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1772 DeoptimizeIf(no_condition, instr->environment());
1773 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1774 __ cmp(ToRegister(instr->temp()), Immediate(0));
1775 DeoptimizeIf(less, instr->environment());
1778 // Test the non-zero operand for negative sign.
1779 __ or_(ToRegister(instr->temp()), ToOperand(right));
1780 DeoptimizeIf(sign, instr->environment());
1787 void LCodeGen::DoBitI(LBitI* instr) {
1788 LOperand* left = instr->left();
1789 LOperand* right = instr->right();
1790 ASSERT(left->Equals(instr->result()));
1791 ASSERT(left->IsRegister());
1793 if (right->IsConstantOperand()) {
1794 int32_t right_operand =
1795 ToRepresentation(LConstantOperand::cast(right),
1796 instr->hydrogen()->representation());
1797 switch (instr->op()) {
1798 case Token::BIT_AND:
1799 __ and_(ToRegister(left), right_operand);
1802 __ or_(ToRegister(left), right_operand);
1804 case Token::BIT_XOR:
1805 if (right_operand == int32_t(~0)) {
1806 __ not_(ToRegister(left));
1808 __ xor_(ToRegister(left), right_operand);
1816 switch (instr->op()) {
1817 case Token::BIT_AND:
1818 __ and_(ToRegister(left), ToOperand(right));
1821 __ or_(ToRegister(left), ToOperand(right));
1823 case Token::BIT_XOR:
1824 __ xor_(ToRegister(left), ToOperand(right));
1834 void LCodeGen::DoShiftI(LShiftI* instr) {
1835 LOperand* left = instr->left();
1836 LOperand* right = instr->right();
1837 ASSERT(left->Equals(instr->result()));
1838 ASSERT(left->IsRegister());
1839 if (right->IsRegister()) {
1840 ASSERT(ToRegister(right).is(ecx));
1842 switch (instr->op()) {
1844 __ ror_cl(ToRegister(left));
1845 if (instr->can_deopt()) {
1846 __ test(ToRegister(left), ToRegister(left));
1847 DeoptimizeIf(sign, instr->environment());
1851 __ sar_cl(ToRegister(left));
1854 __ shr_cl(ToRegister(left));
1855 if (instr->can_deopt()) {
1856 __ test(ToRegister(left), ToRegister(left));
1857 DeoptimizeIf(sign, instr->environment());
1861 __ shl_cl(ToRegister(left));
1868 int value = ToInteger32(LConstantOperand::cast(right));
1869 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1870 switch (instr->op()) {
1872 if (shift_count == 0 && instr->can_deopt()) {
1873 __ test(ToRegister(left), ToRegister(left));
1874 DeoptimizeIf(sign, instr->environment());
1876 __ ror(ToRegister(left), shift_count);
1880 if (shift_count != 0) {
1881 __ sar(ToRegister(left), shift_count);
1885 if (shift_count == 0 && instr->can_deopt()) {
1886 __ test(ToRegister(left), ToRegister(left));
1887 DeoptimizeIf(sign, instr->environment());
1889 __ shr(ToRegister(left), shift_count);
1893 if (shift_count != 0) {
1894 if (instr->hydrogen_value()->representation().IsSmi() &&
1895 instr->can_deopt()) {
1896 if (shift_count != 1) {
1897 __ shl(ToRegister(left), shift_count - 1);
1899 __ SmiTag(ToRegister(left));
1900 DeoptimizeIf(overflow, instr->environment());
1902 __ shl(ToRegister(left), shift_count);
1914 void LCodeGen::DoSubI(LSubI* instr) {
1915 LOperand* left = instr->left();
1916 LOperand* right = instr->right();
1917 ASSERT(left->Equals(instr->result()));
1919 if (right->IsConstantOperand()) {
1920 __ sub(ToOperand(left),
1921 ToImmediate(right, instr->hydrogen()->representation()));
1923 __ sub(ToRegister(left), ToOperand(right));
1925 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1926 DeoptimizeIf(overflow, instr->environment());
1931 void LCodeGen::DoConstantI(LConstantI* instr) {
1932 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1936 void LCodeGen::DoConstantS(LConstantS* instr) {
1937 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1941 void LCodeGen::DoConstantD(LConstantD* instr) {
1942 double v = instr->value();
1943 uint64_t int_val = BitCast<uint64_t, double>(v);
1944 int32_t lower = static_cast<int32_t>(int_val);
1945 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1946 ASSERT(instr->result()->IsDoubleRegister());
1948 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1949 __ push(Immediate(upper));
1950 __ push(Immediate(lower));
1951 X87Register reg = ToX87Register(instr->result());
1952 X87Mov(reg, Operand(esp, 0));
1953 __ add(Operand(esp), Immediate(kDoubleSize));
1955 CpuFeatureScope scope1(masm(), SSE2);
1956 XMMRegister res = ToDoubleRegister(instr->result());
1960 Register temp = ToRegister(instr->temp());
1961 if (CpuFeatures::IsSupported(SSE4_1)) {
1962 CpuFeatureScope scope2(masm(), SSE4_1);
1964 __ Set(temp, Immediate(lower));
1965 __ movd(res, Operand(temp));
1966 __ Set(temp, Immediate(upper));
1967 __ pinsrd(res, Operand(temp), 1);
1970 __ Set(temp, Immediate(upper));
1971 __ pinsrd(res, Operand(temp), 1);
1974 __ Set(temp, Immediate(upper));
1975 __ movd(res, Operand(temp));
1978 XMMRegister xmm_scratch = double_scratch0();
1979 __ Set(temp, Immediate(lower));
1980 __ movd(xmm_scratch, Operand(temp));
1981 __ orps(res, xmm_scratch);
1989 void LCodeGen::DoConstantE(LConstantE* instr) {
1990 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1994 void LCodeGen::DoConstantT(LConstantT* instr) {
1995 Register reg = ToRegister(instr->result());
1996 Handle<Object> handle = instr->value(isolate());
1997 AllowDeferredHandleDereference smi_check;
1998 __ LoadObject(reg, handle);
2002 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2003 Register result = ToRegister(instr->result());
2004 Register map = ToRegister(instr->value());
2005 __ EnumLength(result, map);
2009 void LCodeGen::DoDateField(LDateField* instr) {
2010 Register object = ToRegister(instr->date());
2011 Register result = ToRegister(instr->result());
2012 Register scratch = ToRegister(instr->temp());
2013 Smi* index = instr->index();
2014 Label runtime, done;
2015 ASSERT(object.is(result));
2016 ASSERT(object.is(eax));
2018 __ test(object, Immediate(kSmiTagMask));
2019 DeoptimizeIf(zero, instr->environment());
2020 __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2021 DeoptimizeIf(not_equal, instr->environment());
2023 if (index->value() == 0) {
2024 __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2026 if (index->value() < JSDate::kFirstUncachedField) {
2027 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2028 __ mov(scratch, Operand::StaticVariable(stamp));
2029 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2030 __ j(not_equal, &runtime, Label::kNear);
2031 __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2032 kPointerSize * index->value()));
2033 __ jmp(&done, Label::kNear);
2036 __ PrepareCallCFunction(2, scratch);
2037 __ mov(Operand(esp, 0), object);
2038 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2039 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2045 Operand LCodeGen::BuildSeqStringOperand(Register string,
2047 String::Encoding encoding) {
2048 if (index->IsConstantOperand()) {
2049 int offset = ToRepresentation(LConstantOperand::cast(index),
2050 Representation::Integer32());
2051 if (encoding == String::TWO_BYTE_ENCODING) {
2052 offset *= kUC16Size;
2054 STATIC_ASSERT(kCharSize == 1);
2055 return FieldOperand(string, SeqString::kHeaderSize + offset);
2057 return FieldOperand(
2058 string, ToRegister(index),
2059 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2060 SeqString::kHeaderSize);
2064 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2065 String::Encoding encoding = instr->hydrogen()->encoding();
2066 Register result = ToRegister(instr->result());
2067 Register string = ToRegister(instr->string());
2069 if (FLAG_debug_code) {
2071 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2072 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2074 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2075 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2076 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2077 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2078 ? one_byte_seq_type : two_byte_seq_type));
2079 __ Check(equal, kUnexpectedStringType);
2083 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2084 if (encoding == String::ONE_BYTE_ENCODING) {
2085 __ movzx_b(result, operand);
2087 __ movzx_w(result, operand);
2092 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2093 String::Encoding encoding = instr->hydrogen()->encoding();
2094 Register string = ToRegister(instr->string());
2096 if (FLAG_debug_code) {
2097 Register value = ToRegister(instr->value());
2098 Register index = ToRegister(instr->index());
2099 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2100 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2102 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2103 ? one_byte_seq_type : two_byte_seq_type;
2104 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2107 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2108 if (instr->value()->IsConstantOperand()) {
2109 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2110 Representation::Integer32());
2111 ASSERT_LE(0, value);
2112 if (encoding == String::ONE_BYTE_ENCODING) {
2113 ASSERT_LE(value, String::kMaxOneByteCharCode);
2114 __ mov_b(operand, static_cast<int8_t>(value));
2116 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
2117 __ mov_w(operand, static_cast<int16_t>(value));
2120 Register value = ToRegister(instr->value());
2121 if (encoding == String::ONE_BYTE_ENCODING) {
2122 __ mov_b(operand, value);
2124 __ mov_w(operand, value);
2130 void LCodeGen::DoAddI(LAddI* instr) {
2131 LOperand* left = instr->left();
2132 LOperand* right = instr->right();
2134 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2135 if (right->IsConstantOperand()) {
2136 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2137 instr->hydrogen()->representation());
2138 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2140 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2141 __ lea(ToRegister(instr->result()), address);
2144 if (right->IsConstantOperand()) {
2145 __ add(ToOperand(left),
2146 ToImmediate(right, instr->hydrogen()->representation()));
2148 __ add(ToRegister(left), ToOperand(right));
2150 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2151 DeoptimizeIf(overflow, instr->environment());
2157 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2158 CpuFeatureScope scope(masm(), SSE2);
2159 LOperand* left = instr->left();
2160 LOperand* right = instr->right();
2161 ASSERT(left->Equals(instr->result()));
2162 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2163 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2165 Condition condition = (operation == HMathMinMax::kMathMin)
2168 if (right->IsConstantOperand()) {
2169 Operand left_op = ToOperand(left);
2170 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2171 instr->hydrogen()->representation());
2172 __ cmp(left_op, immediate);
2173 __ j(condition, &return_left, Label::kNear);
2174 __ mov(left_op, immediate);
2176 Register left_reg = ToRegister(left);
2177 Operand right_op = ToOperand(right);
2178 __ cmp(left_reg, right_op);
2179 __ j(condition, &return_left, Label::kNear);
2180 __ mov(left_reg, right_op);
2182 __ bind(&return_left);
2184 ASSERT(instr->hydrogen()->representation().IsDouble());
2185 Label check_nan_left, check_zero, return_left, return_right;
2186 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2187 XMMRegister left_reg = ToDoubleRegister(left);
2188 XMMRegister right_reg = ToDoubleRegister(right);
2189 __ ucomisd(left_reg, right_reg);
2190 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2191 __ j(equal, &check_zero, Label::kNear); // left == right.
2192 __ j(condition, &return_left, Label::kNear);
2193 __ jmp(&return_right, Label::kNear);
2195 __ bind(&check_zero);
2196 XMMRegister xmm_scratch = double_scratch0();
2197 __ xorps(xmm_scratch, xmm_scratch);
2198 __ ucomisd(left_reg, xmm_scratch);
2199 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2200 // At this point, both left and right are either 0 or -0.
2201 if (operation == HMathMinMax::kMathMin) {
2202 __ orpd(left_reg, right_reg);
2204 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2205 __ addsd(left_reg, right_reg);
2207 __ jmp(&return_left, Label::kNear);
2209 __ bind(&check_nan_left);
2210 __ ucomisd(left_reg, left_reg); // NaN check.
2211 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2212 __ bind(&return_right);
2213 __ movaps(left_reg, right_reg);
2215 __ bind(&return_left);
2220 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2221 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2222 CpuFeatureScope scope(masm(), SSE2);
2223 XMMRegister left = ToDoubleRegister(instr->left());
2224 XMMRegister right = ToDoubleRegister(instr->right());
2225 XMMRegister result = ToDoubleRegister(instr->result());
2226 switch (instr->op()) {
2228 __ addsd(left, right);
2231 __ subsd(left, right);
2234 __ mulsd(left, right);
2237 __ divsd(left, right);
2238 // Don't delete this mov. It may improve performance on some CPUs,
2239 // when there is a mulsd depending on the result
2240 __ movaps(left, left);
2243 // Pass two doubles as arguments on the stack.
2244 __ PrepareCallCFunction(4, eax);
2245 __ movsd(Operand(esp, 0 * kDoubleSize), left);
2246 __ movsd(Operand(esp, 1 * kDoubleSize), right);
2248 ExternalReference::mod_two_doubles_operation(isolate()),
2251 // Return value is in st(0) on ia32.
2252 // Store it into the result register.
2253 __ sub(Operand(esp), Immediate(kDoubleSize));
2254 __ fstp_d(Operand(esp, 0));
2255 __ movsd(result, Operand(esp, 0));
2256 __ add(Operand(esp), Immediate(kDoubleSize));
2264 X87Register left = ToX87Register(instr->left());
2265 X87Register right = ToX87Register(instr->right());
2266 X87Register result = ToX87Register(instr->result());
2267 if (instr->op() != Token::MOD) {
2268 X87PrepareBinaryOp(left, right, result);
2270 switch (instr->op()) {
2284 // Pass two doubles as arguments on the stack.
2285 __ PrepareCallCFunction(4, eax);
2286 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2287 X87Mov(Operand(esp, 0), left);
2289 ASSERT(left.is(result));
2290 X87PrepareToWrite(result);
2292 ExternalReference::mod_two_doubles_operation(isolate()),
2295 // Return value is in st(0) on ia32.
2296 X87CommitWrite(result);
2307 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2308 ASSERT(ToRegister(instr->context()).is(esi));
2309 ASSERT(ToRegister(instr->left()).is(edx));
2310 ASSERT(ToRegister(instr->right()).is(eax));
2311 ASSERT(ToRegister(instr->result()).is(eax));
2313 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2314 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2315 __ nop(); // Signals no inlined code.
2319 template<class InstrType>
2320 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2321 int left_block = instr->TrueDestination(chunk_);
2322 int right_block = instr->FalseDestination(chunk_);
2324 int next_block = GetNextEmittedBlock();
2326 if (right_block == left_block || cc == no_condition) {
2327 EmitGoto(left_block);
2328 } else if (left_block == next_block) {
2329 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2330 } else if (right_block == next_block) {
2331 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2333 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2334 __ jmp(chunk_->GetAssemblyLabel(right_block));
2339 template<class InstrType>
2340 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2341 int false_block = instr->FalseDestination(chunk_);
2342 if (cc == no_condition) {
2343 __ jmp(chunk_->GetAssemblyLabel(false_block));
2345 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2350 void LCodeGen::DoBranch(LBranch* instr) {
2351 Representation r = instr->hydrogen()->value()->representation();
2352 if (r.IsSmiOrInteger32()) {
2353 Register reg = ToRegister(instr->value());
2354 __ test(reg, Operand(reg));
2355 EmitBranch(instr, not_zero);
2356 } else if (r.IsDouble()) {
2357 ASSERT(!info()->IsStub());
2358 CpuFeatureScope scope(masm(), SSE2);
2359 XMMRegister reg = ToDoubleRegister(instr->value());
2360 XMMRegister xmm_scratch = double_scratch0();
2361 __ xorps(xmm_scratch, xmm_scratch);
2362 __ ucomisd(reg, xmm_scratch);
2363 EmitBranch(instr, not_equal);
2365 ASSERT(r.IsTagged());
2366 Register reg = ToRegister(instr->value());
2367 HType type = instr->hydrogen()->value()->type();
2368 if (type.IsBoolean()) {
2369 ASSERT(!info()->IsStub());
2370 __ cmp(reg, factory()->true_value());
2371 EmitBranch(instr, equal);
2372 } else if (type.IsSmi()) {
2373 ASSERT(!info()->IsStub());
2374 __ test(reg, Operand(reg));
2375 EmitBranch(instr, not_equal);
2376 } else if (type.IsJSArray()) {
2377 ASSERT(!info()->IsStub());
2378 EmitBranch(instr, no_condition);
2379 } else if (type.IsHeapNumber()) {
2380 ASSERT(!info()->IsStub());
2381 CpuFeatureScope scope(masm(), SSE2);
2382 XMMRegister xmm_scratch = double_scratch0();
2383 __ xorps(xmm_scratch, xmm_scratch);
2384 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2385 EmitBranch(instr, not_equal);
2386 } else if (type.IsString()) {
2387 ASSERT(!info()->IsStub());
2388 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2389 EmitBranch(instr, not_equal);
2391 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2392 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2394 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2395 // undefined -> false.
2396 __ cmp(reg, factory()->undefined_value());
2397 __ j(equal, instr->FalseLabel(chunk_));
2399 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2401 __ cmp(reg, factory()->true_value());
2402 __ j(equal, instr->TrueLabel(chunk_));
2404 __ cmp(reg, factory()->false_value());
2405 __ j(equal, instr->FalseLabel(chunk_));
2407 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2409 __ cmp(reg, factory()->null_value());
2410 __ j(equal, instr->FalseLabel(chunk_));
2413 if (expected.Contains(ToBooleanStub::SMI)) {
2414 // Smis: 0 -> false, all other -> true.
2415 __ test(reg, Operand(reg));
2416 __ j(equal, instr->FalseLabel(chunk_));
2417 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2418 } else if (expected.NeedsMap()) {
2419 // If we need a map later and have a Smi -> deopt.
2420 __ test(reg, Immediate(kSmiTagMask));
2421 DeoptimizeIf(zero, instr->environment());
2424 Register map = no_reg; // Keep the compiler happy.
2425 if (expected.NeedsMap()) {
2426 map = ToRegister(instr->temp());
2427 ASSERT(!map.is(reg));
2428 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2430 if (expected.CanBeUndetectable()) {
2431 // Undetectable -> false.
2432 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2433 1 << Map::kIsUndetectable);
2434 __ j(not_zero, instr->FalseLabel(chunk_));
2438 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2439 // spec object -> true.
2440 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2441 __ j(above_equal, instr->TrueLabel(chunk_));
2444 if (expected.Contains(ToBooleanStub::STRING)) {
2445 // String value -> false iff empty.
2447 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2448 __ j(above_equal, ¬_string, Label::kNear);
2449 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2450 __ j(not_zero, instr->TrueLabel(chunk_));
2451 __ jmp(instr->FalseLabel(chunk_));
2452 __ bind(¬_string);
2455 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2456 // Symbol value -> true.
2457 __ CmpInstanceType(map, SYMBOL_TYPE);
2458 __ j(equal, instr->TrueLabel(chunk_));
2461 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2462 // heap number -> false iff +0, -0, or NaN.
2463 Label not_heap_number;
2464 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2465 factory()->heap_number_map());
2466 __ j(not_equal, ¬_heap_number, Label::kNear);
2467 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2468 CpuFeatureScope scope(masm(), SSE2);
2469 XMMRegister xmm_scratch = double_scratch0();
2470 __ xorps(xmm_scratch, xmm_scratch);
2471 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2474 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2477 __ j(zero, instr->FalseLabel(chunk_));
2478 __ jmp(instr->TrueLabel(chunk_));
2479 __ bind(¬_heap_number);
2482 if (!expected.IsGeneric()) {
2483 // We've seen something for the first time -> deopt.
2484 // This can only happen if we are not generic already.
2485 DeoptimizeIf(no_condition, instr->environment());
2492 void LCodeGen::EmitGoto(int block) {
2493 if (!IsNextEmittedBlock(block)) {
2494 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2499 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2503 void LCodeGen::DoGoto(LGoto* instr) {
2504 EmitGoto(instr->block_id());
2508 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2509 Condition cond = no_condition;
2512 case Token::EQ_STRICT:
2516 case Token::NE_STRICT:
2520 cond = is_unsigned ? below : less;
2523 cond = is_unsigned ? above : greater;
2526 cond = is_unsigned ? below_equal : less_equal;
2529 cond = is_unsigned ? above_equal : greater_equal;
2532 case Token::INSTANCEOF:
2540 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2541 LOperand* left = instr->left();
2542 LOperand* right = instr->right();
2543 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2545 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2546 // We can statically evaluate the comparison.
2547 double left_val = ToDouble(LConstantOperand::cast(left));
2548 double right_val = ToDouble(LConstantOperand::cast(right));
2549 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2550 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2551 EmitGoto(next_block);
2553 if (instr->is_double()) {
2554 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2555 CpuFeatureScope scope(masm(), SSE2);
2556 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2558 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2561 // Don't base result on EFLAGS when a NaN is involved. Instead
2562 // jump to the false block.
2563 __ j(parity_even, instr->FalseLabel(chunk_));
2565 if (right->IsConstantOperand()) {
2566 __ cmp(ToOperand(left),
2567 ToImmediate(right, instr->hydrogen()->representation()));
2568 } else if (left->IsConstantOperand()) {
2569 __ cmp(ToOperand(right),
2570 ToImmediate(left, instr->hydrogen()->representation()));
2571 // We transposed the operands. Reverse the condition.
2572 cc = ReverseCondition(cc);
2574 __ cmp(ToRegister(left), ToOperand(right));
2577 EmitBranch(instr, cc);
2582 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2583 Register left = ToRegister(instr->left());
2585 if (instr->right()->IsConstantOperand()) {
2586 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2587 __ CmpObject(left, right);
2589 Operand right = ToOperand(instr->right());
2590 __ cmp(left, right);
2592 EmitBranch(instr, equal);
2596 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2597 if (instr->hydrogen()->representation().IsTagged()) {
2598 Register input_reg = ToRegister(instr->object());
2599 __ cmp(input_reg, factory()->the_hole_value());
2600 EmitBranch(instr, equal);
2604 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
2606 CpuFeatureScope scope(masm(), SSE2);
2607 XMMRegister input_reg = ToDoubleRegister(instr->object());
2608 __ ucomisd(input_reg, input_reg);
2609 EmitFalseBranch(instr, parity_odd);
2611 // Put the value to the top of stack
2612 X87Register src = ToX87Register(instr->object());
2613 X87LoadForUsage(src);
2618 __ j(parity_even, &ok, Label::kNear);
2620 EmitFalseBranch(instr, no_condition);
2625 __ sub(esp, Immediate(kDoubleSize));
2627 CpuFeatureScope scope(masm(), SSE2);
2628 XMMRegister input_reg = ToDoubleRegister(instr->object());
2629 __ movsd(MemOperand(esp, 0), input_reg);
2631 __ fstp_d(MemOperand(esp, 0));
2634 __ add(esp, Immediate(kDoubleSize));
2635 int offset = sizeof(kHoleNanUpper32);
2636 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2637 EmitBranch(instr, equal);
2641 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2642 Representation rep = instr->hydrogen()->value()->representation();
2643 ASSERT(!rep.IsInteger32());
2644 Register scratch = ToRegister(instr->temp());
2646 if (rep.IsDouble()) {
2647 CpuFeatureScope use_sse2(masm(), SSE2);
2648 XMMRegister value = ToDoubleRegister(instr->value());
2649 XMMRegister xmm_scratch = double_scratch0();
2650 __ xorps(xmm_scratch, xmm_scratch);
2651 __ ucomisd(xmm_scratch, value);
2652 EmitFalseBranch(instr, not_equal);
2653 __ movmskpd(scratch, value);
2654 __ test(scratch, Immediate(1));
2655 EmitBranch(instr, not_zero);
2657 Register value = ToRegister(instr->value());
2658 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2659 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2660 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2661 Immediate(0x80000000));
2662 EmitFalseBranch(instr, not_equal);
2663 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2664 Immediate(0x00000000));
2665 EmitBranch(instr, equal);
2670 Condition LCodeGen::EmitIsObject(Register input,
2672 Label* is_not_object,
2674 __ JumpIfSmi(input, is_not_object);
2676 __ cmp(input, isolate()->factory()->null_value());
2677 __ j(equal, is_object);
2679 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2680 // Undetectable objects behave like undefined.
2681 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2682 1 << Map::kIsUndetectable);
2683 __ j(not_zero, is_not_object);
2685 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2686 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2687 __ j(below, is_not_object);
2688 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2693 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2694 Register reg = ToRegister(instr->value());
2695 Register temp = ToRegister(instr->temp());
2697 Condition true_cond = EmitIsObject(
2698 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2700 EmitBranch(instr, true_cond);
2704 Condition LCodeGen::EmitIsString(Register input,
2706 Label* is_not_string,
2707 SmiCheck check_needed = INLINE_SMI_CHECK) {
2708 if (check_needed == INLINE_SMI_CHECK) {
2709 __ JumpIfSmi(input, is_not_string);
2712 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2718 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2719 Register reg = ToRegister(instr->value());
2720 Register temp = ToRegister(instr->temp());
2722 SmiCheck check_needed =
2723 instr->hydrogen()->value()->IsHeapObject()
2724 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2726 Condition true_cond = EmitIsString(
2727 reg, temp, instr->FalseLabel(chunk_), check_needed);
2729 EmitBranch(instr, true_cond);
2733 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2734 Operand input = ToOperand(instr->value());
2736 __ test(input, Immediate(kSmiTagMask));
2737 EmitBranch(instr, zero);
2741 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2742 Register input = ToRegister(instr->value());
2743 Register temp = ToRegister(instr->temp());
2745 if (!instr->hydrogen()->value()->IsHeapObject()) {
2746 STATIC_ASSERT(kSmiTag == 0);
2747 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2749 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2750 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2751 1 << Map::kIsUndetectable);
2752 EmitBranch(instr, not_zero);
2756 static Condition ComputeCompareCondition(Token::Value op) {
2758 case Token::EQ_STRICT:
2768 return greater_equal;
2771 return no_condition;
2776 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2777 Token::Value op = instr->op();
2779 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2780 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2782 Condition condition = ComputeCompareCondition(op);
2783 __ test(eax, Operand(eax));
2785 EmitBranch(instr, condition);
2789 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2790 InstanceType from = instr->from();
2791 InstanceType to = instr->to();
2792 if (from == FIRST_TYPE) return to;
2793 ASSERT(from == to || to == LAST_TYPE);
2798 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2799 InstanceType from = instr->from();
2800 InstanceType to = instr->to();
2801 if (from == to) return equal;
2802 if (to == LAST_TYPE) return above_equal;
2803 if (from == FIRST_TYPE) return below_equal;
2809 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2810 Register input = ToRegister(instr->value());
2811 Register temp = ToRegister(instr->temp());
2813 if (!instr->hydrogen()->value()->IsHeapObject()) {
2814 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2817 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2818 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2822 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2823 Register input = ToRegister(instr->value());
2824 Register result = ToRegister(instr->result());
2826 __ AssertString(input);
2828 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2829 __ IndexFromHash(result, result);
2833 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2834 LHasCachedArrayIndexAndBranch* instr) {
2835 Register input = ToRegister(instr->value());
2837 __ test(FieldOperand(input, String::kHashFieldOffset),
2838 Immediate(String::kContainsCachedArrayIndexMask));
2839 EmitBranch(instr, equal);
2843 // Branches to a label or falls through with the answer in the z flag. Trashes
2844 // the temp registers, but not the input.
2845 void LCodeGen::EmitClassOfTest(Label* is_true,
2847 Handle<String>class_name,
2851 ASSERT(!input.is(temp));
2852 ASSERT(!input.is(temp2));
2853 ASSERT(!temp.is(temp2));
2854 __ JumpIfSmi(input, is_false);
2856 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2857 // Assuming the following assertions, we can use the same compares to test
2858 // for both being a function type and being in the object type range.
2859 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2860 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2861 FIRST_SPEC_OBJECT_TYPE + 1);
2862 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2863 LAST_SPEC_OBJECT_TYPE - 1);
2864 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2865 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2866 __ j(below, is_false);
2867 __ j(equal, is_true);
2868 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2869 __ j(equal, is_true);
2871 // Faster code path to avoid two compares: subtract lower bound from the
2872 // actual type and do a signed compare with the width of the type range.
2873 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2874 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2875 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2876 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2877 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2878 __ j(above, is_false);
2881 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2882 // Check if the constructor in the map is a function.
2883 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2884 // Objects with a non-function constructor have class 'Object'.
2885 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2886 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2887 __ j(not_equal, is_true);
2889 __ j(not_equal, is_false);
2892 // temp now contains the constructor function. Grab the
2893 // instance class name from there.
2894 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2895 __ mov(temp, FieldOperand(temp,
2896 SharedFunctionInfo::kInstanceClassNameOffset));
2897 // The class name we are testing against is internalized since it's a literal.
2898 // The name in the constructor is internalized because of the way the context
2899 // is booted. This routine isn't expected to work for random API-created
2900 // classes and it doesn't have to because you can't access it with natives
2901 // syntax. Since both sides are internalized it is sufficient to use an
2902 // identity comparison.
2903 __ cmp(temp, class_name);
2904 // End with the answer in the z flag.
2908 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2909 Register input = ToRegister(instr->value());
2910 Register temp = ToRegister(instr->temp());
2911 Register temp2 = ToRegister(instr->temp2());
2913 Handle<String> class_name = instr->hydrogen()->class_name();
2915 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2916 class_name, input, temp, temp2);
2918 EmitBranch(instr, equal);
2922 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2923 Register reg = ToRegister(instr->value());
2924 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2925 EmitBranch(instr, equal);
2929 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2930 // Object and function are in fixed registers defined by the stub.
2931 ASSERT(ToRegister(instr->context()).is(esi));
2932 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2933 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2935 Label true_value, done;
2936 __ test(eax, Operand(eax));
2937 __ j(zero, &true_value, Label::kNear);
2938 __ mov(ToRegister(instr->result()), factory()->false_value());
2939 __ jmp(&done, Label::kNear);
2940 __ bind(&true_value);
2941 __ mov(ToRegister(instr->result()), factory()->true_value());
2946 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2947 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2949 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2950 LInstanceOfKnownGlobal* instr,
2951 const X87Stack& x87_stack)
2952 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2953 virtual void Generate() V8_OVERRIDE {
2954 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2956 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2957 Label* map_check() { return &map_check_; }
2959 LInstanceOfKnownGlobal* instr_;
2963 DeferredInstanceOfKnownGlobal* deferred;
2964 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2966 Label done, false_result;
2967 Register object = ToRegister(instr->value());
2968 Register temp = ToRegister(instr->temp());
2970 // A Smi is not an instance of anything.
2971 __ JumpIfSmi(object, &false_result, Label::kNear);
2973 // This is the inlined call site instanceof cache. The two occurences of the
2974 // hole value will be patched to the last map/result pair generated by the
2977 Register map = ToRegister(instr->temp());
2978 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2979 __ bind(deferred->map_check()); // Label for calculating code patching.
2980 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2981 __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2982 __ j(not_equal, &cache_miss, Label::kNear);
2983 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2984 __ jmp(&done, Label::kNear);
2986 // The inlined call site cache did not match. Check for null and string
2987 // before calling the deferred code.
2988 __ bind(&cache_miss);
2989 // Null is not an instance of anything.
2990 __ cmp(object, factory()->null_value());
2991 __ j(equal, &false_result, Label::kNear);
2993 // String values are not instances of anything.
2994 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2995 __ j(is_string, &false_result, Label::kNear);
2997 // Go to the deferred code.
2998 __ jmp(deferred->entry());
3000 __ bind(&false_result);
3001 __ mov(ToRegister(instr->result()), factory()->false_value());
3003 // Here result has either true or false. Deferred code also produces true or
3005 __ bind(deferred->exit());
3010 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
3012 PushSafepointRegistersScope scope(this);
3014 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3015 flags = static_cast<InstanceofStub::Flags>(
3016 flags | InstanceofStub::kArgsInRegisters);
3017 flags = static_cast<InstanceofStub::Flags>(
3018 flags | InstanceofStub::kCallSiteInlineCheck);
3019 flags = static_cast<InstanceofStub::Flags>(
3020 flags | InstanceofStub::kReturnTrueFalseObject);
3021 InstanceofStub stub(flags);
3023 // Get the temp register reserved by the instruction. This needs to be a
3024 // register which is pushed last by PushSafepointRegisters as top of the
3025 // stack is used to pass the offset to the location of the map check to
3027 Register temp = ToRegister(instr->temp());
3028 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3029 __ LoadHeapObject(InstanceofStub::right(), instr->function());
3030 static const int kAdditionalDelta = 13;
3031 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3032 __ mov(temp, Immediate(delta));
3033 __ StoreToSafepointRegisterSlot(temp, temp);
3034 CallCodeGeneric(stub.GetCode(isolate()),
3035 RelocInfo::CODE_TARGET,
3037 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3038 // Get the deoptimization index of the LLazyBailout-environment that
3039 // corresponds to this instruction.
3040 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3041 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3043 // Put the result value into the eax slot and restore all registers.
3044 __ StoreToSafepointRegisterSlot(eax, eax);
3048 void LCodeGen::DoCmpT(LCmpT* instr) {
3049 Token::Value op = instr->op();
3051 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
3052 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3054 Condition condition = ComputeCompareCondition(op);
3055 Label true_value, done;
3056 __ test(eax, Operand(eax));
3057 __ j(condition, &true_value, Label::kNear);
3058 __ mov(ToRegister(instr->result()), factory()->false_value());
3059 __ jmp(&done, Label::kNear);
3060 __ bind(&true_value);
3061 __ mov(ToRegister(instr->result()), factory()->true_value());
3066 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3067 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3069 if (instr->has_constant_parameter_count()) {
3070 int parameter_count = ToInteger32(instr->constant_parameter_count());
3071 if (dynamic_frame_alignment && FLAG_debug_code) {
3073 (parameter_count + extra_value_count) * kPointerSize),
3074 Immediate(kAlignmentZapValue));
3075 __ Assert(equal, kExpectedAlignmentMarker);
3077 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3079 Register reg = ToRegister(instr->parameter_count());
3080 // The argument count parameter is a smi
3082 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3083 if (dynamic_frame_alignment && FLAG_debug_code) {
3084 ASSERT(extra_value_count == 2);
3085 __ cmp(Operand(esp, reg, times_pointer_size,
3086 extra_value_count * kPointerSize),
3087 Immediate(kAlignmentZapValue));
3088 __ Assert(equal, kExpectedAlignmentMarker);
3091 // emit code to restore stack based on instr->parameter_count()
3092 __ pop(return_addr_reg); // save return address
3093 if (dynamic_frame_alignment) {
3094 __ inc(reg); // 1 more for alignment
3096 __ shl(reg, kPointerSizeLog2);
3098 __ jmp(return_addr_reg);
3103 void LCodeGen::DoReturn(LReturn* instr) {
3104 if (FLAG_trace && info()->IsOptimizing()) {
3105 // Preserve the return value on the stack and rely on the runtime call
3106 // to return the value in the same register. We're leaving the code
3107 // managed by the register allocator and tearing down the frame, it's
3108 // safe to write to the context register.
3110 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3111 __ CallRuntime(Runtime::kTraceExit, 1);
3113 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3114 RestoreCallerDoubles();
3116 if (dynamic_frame_alignment_) {
3117 // Fetch the state of the dynamic frame alignment.
3118 __ mov(edx, Operand(ebp,
3119 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3121 int no_frame_start = -1;
3122 if (NeedsEagerFrame()) {
3125 no_frame_start = masm_->pc_offset();
3127 if (dynamic_frame_alignment_) {
3129 __ cmp(edx, Immediate(kNoAlignmentPadding));
3130 __ j(equal, &no_padding, Label::kNear);
3132 EmitReturn(instr, true);
3133 __ bind(&no_padding);
3136 EmitReturn(instr, false);
3137 if (no_frame_start != -1) {
3138 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3143 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3144 Register result = ToRegister(instr->result());
3145 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3146 if (instr->hydrogen()->RequiresHoleCheck()) {
3147 __ cmp(result, factory()->the_hole_value());
3148 DeoptimizeIf(equal, instr->environment());
3153 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3154 ASSERT(ToRegister(instr->context()).is(esi));
3155 ASSERT(ToRegister(instr->global_object()).is(edx));
3156 ASSERT(ToRegister(instr->result()).is(eax));
3158 __ mov(ecx, instr->name());
3159 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3160 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3161 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3165 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3166 Register value = ToRegister(instr->value());
3167 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3169 // If the cell we are storing to contains the hole it could have
3170 // been deleted from the property dictionary. In that case, we need
3171 // to update the property details in the property dictionary to mark
3172 // it as no longer deleted. We deoptimize in that case.
3173 if (instr->hydrogen()->RequiresHoleCheck()) {
3174 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3175 DeoptimizeIf(equal, instr->environment());
3179 __ mov(Operand::ForCell(cell_handle), value);
3180 // Cells are always rescanned, so no write barrier here.
3184 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3185 Register context = ToRegister(instr->context());
3186 Register result = ToRegister(instr->result());
3187 __ mov(result, ContextOperand(context, instr->slot_index()));
3189 if (instr->hydrogen()->RequiresHoleCheck()) {
3190 __ cmp(result, factory()->the_hole_value());
3191 if (instr->hydrogen()->DeoptimizesOnHole()) {
3192 DeoptimizeIf(equal, instr->environment());
3195 __ j(not_equal, &is_not_hole, Label::kNear);
3196 __ mov(result, factory()->undefined_value());
3197 __ bind(&is_not_hole);
3203 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3204 Register context = ToRegister(instr->context());
3205 Register value = ToRegister(instr->value());
3207 Label skip_assignment;
3209 Operand target = ContextOperand(context, instr->slot_index());
3210 if (instr->hydrogen()->RequiresHoleCheck()) {
3211 __ cmp(target, factory()->the_hole_value());
3212 if (instr->hydrogen()->DeoptimizesOnHole()) {
3213 DeoptimizeIf(equal, instr->environment());
3215 __ j(not_equal, &skip_assignment, Label::kNear);
3219 __ mov(target, value);
3220 if (instr->hydrogen()->NeedsWriteBarrier()) {
3221 SmiCheck check_needed =
3222 instr->hydrogen()->value()->IsHeapObject()
3223 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3224 Register temp = ToRegister(instr->temp());
3225 int offset = Context::SlotOffset(instr->slot_index());
3226 __ RecordWriteContextSlot(context,
3230 GetSaveFPRegsMode(),
3231 EMIT_REMEMBERED_SET,
3235 __ bind(&skip_assignment);
3239 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3240 HObjectAccess access = instr->hydrogen()->access();
3241 int offset = access.offset();
3243 if (access.IsExternalMemory()) {
3244 Register result = ToRegister(instr->result());
3245 MemOperand operand = instr->object()->IsConstantOperand()
3246 ? MemOperand::StaticVariable(ToExternalReference(
3247 LConstantOperand::cast(instr->object())))
3248 : MemOperand(ToRegister(instr->object()), offset);
3249 __ Load(result, operand, access.representation());
3253 Register object = ToRegister(instr->object());
3254 if (instr->hydrogen()->representation().IsDouble()) {
3255 if (CpuFeatures::IsSupported(SSE2)) {
3256 CpuFeatureScope scope(masm(), SSE2);
3257 XMMRegister result = ToDoubleRegister(instr->result());
3258 __ movsd(result, FieldOperand(object, offset));
3260 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3265 Register result = ToRegister(instr->result());
3266 if (!access.IsInobject()) {
3267 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3270 __ Load(result, FieldOperand(object, offset), access.representation());
3274 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3275 ASSERT(!operand->IsDoubleRegister());
3276 if (operand->IsConstantOperand()) {
3277 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3278 AllowDeferredHandleDereference smi_check;
3279 if (object->IsSmi()) {
3280 __ Push(Handle<Smi>::cast(object));
3282 __ PushHeapObject(Handle<HeapObject>::cast(object));
3284 } else if (operand->IsRegister()) {
3285 __ push(ToRegister(operand));
3287 __ push(ToOperand(operand));
3292 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3293 ASSERT(ToRegister(instr->context()).is(esi));
3294 ASSERT(ToRegister(instr->object()).is(edx));
3295 ASSERT(ToRegister(instr->result()).is(eax));
3297 __ mov(ecx, instr->name());
3298 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3299 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3303 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3304 Register function = ToRegister(instr->function());
3305 Register temp = ToRegister(instr->temp());
3306 Register result = ToRegister(instr->result());
3308 // Check that the function really is a function.
3309 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3310 DeoptimizeIf(not_equal, instr->environment());
3312 // Check whether the function has an instance prototype.
3314 __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3315 1 << Map::kHasNonInstancePrototype);
3316 __ j(not_zero, &non_instance, Label::kNear);
3318 // Get the prototype or initial map from the function.
3320 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3322 // Check that the function has a prototype or an initial map.
3323 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3324 DeoptimizeIf(equal, instr->environment());
3326 // If the function does not have an initial map, we're done.
3328 __ CmpObjectType(result, MAP_TYPE, temp);
3329 __ j(not_equal, &done, Label::kNear);
3331 // Get the prototype from the initial map.
3332 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3333 __ jmp(&done, Label::kNear);
3335 // Non-instance prototype: Fetch prototype from constructor field
3336 // in the function's map.
3337 __ bind(&non_instance);
3338 __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3345 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3346 Register result = ToRegister(instr->result());
3347 __ LoadRoot(result, instr->index());
3351 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3352 Register arguments = ToRegister(instr->arguments());
3353 Register result = ToRegister(instr->result());
3354 if (instr->length()->IsConstantOperand() &&
3355 instr->index()->IsConstantOperand()) {
3356 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3357 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3358 int index = (const_length - const_index) + 1;
3359 __ mov(result, Operand(arguments, index * kPointerSize));
3361 Register length = ToRegister(instr->length());
3362 Operand index = ToOperand(instr->index());
3363 // There are two words between the frame pointer and the last argument.
3364 // Subtracting from length accounts for one of them add one more.
3365 __ sub(length, index);
3366 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3371 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3372 Runtime::FunctionId id) {
3373 // TODO(3095996): Get rid of this. For now, we need to make the
3374 // result register contain a valid pointer because it is already
3375 // contained in the register pointer map.
3376 Register reg = ToRegister(instr->result());
3377 __ Set(reg, Immediate(0));
3379 PushSafepointRegistersScope scope(this);
3380 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3381 __ CallRuntimeSaveDoubles(id);
3382 RecordSafepointWithRegisters(
3383 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3384 __ StoreToSafepointRegisterSlot(reg, eax);
3388 void LCodeGen::HandleExternalArrayOpRequiresTemp(
3390 Representation key_representation,
3391 ElementsKind elements_kind) {
3392 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3393 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
3394 static_cast<int>(maximal_scale_factor);
3395 if (key_representation.IsSmi()) {
3396 pre_shift_size -= kSmiTagSize;
3398 ASSERT(pre_shift_size > 0);
3399 __ shl(ToRegister(key), pre_shift_size);
3401 __ SmiUntag(ToRegister(key));
3407 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3408 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
3410 DeferredSIMD128ToTagged(LCodeGen* codegen,
3411 LInstruction* instr,
3412 Runtime::FunctionId id,
3413 const X87Stack& x87_stack)
3414 : LDeferredCode(codegen, x87_stack), instr_(instr), id_(id) { }
3415 virtual void Generate() V8_OVERRIDE {
3416 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3418 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3420 LInstruction* instr_;
3421 Runtime::FunctionId id_;
3424 LOperand* key = instr->key();
3425 ElementsKind elements_kind = instr->elements_kind();
3427 if (CpuFeatures::IsSupported(SSE2)) {
3428 CpuFeatureScope scope(masm(), SSE2);
3429 Operand operand(BuildFastArrayOperand(
3432 instr->hydrogen()->key()->representation(),
3435 instr->additional_index()));
3436 __ movups(ToSIMD128Register(instr->result()), operand);
3438 // Allocate a SIMD128 object on the heap.
3439 Register reg = ToRegister(instr->result());
3440 Register tmp = ToRegister(instr->temp());
3441 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3442 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()),
3444 if (FLAG_inline_new) {
3445 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
3446 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
3448 __ jmp(deferred->entry());
3450 __ bind(deferred->exit());
3452 // Copy the SIMD128 value from the external array to the heap object.
3453 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3454 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3455 Operand operand(BuildFastArrayOperand(
3458 instr->hydrogen()->key()->representation(),
3461 instr->additional_index()));
3462 __ mov(tmp, operand);
3463 __ mov(FieldOperand(reg, T::kValueOffset + offset), tmp);
3469 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3470 ElementsKind elements_kind = instr->elements_kind();
3471 LOperand* key = instr->key();
3472 if (!key->IsConstantOperand() &&
3473 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3475 HandleExternalArrayOpRequiresTemp(key,
3476 instr->hydrogen()->key()->representation(), elements_kind);
3479 Operand operand(BuildFastArrayOperand(
3482 instr->hydrogen()->key()->representation(),
3485 instr->additional_index()));
3486 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3487 elements_kind == FLOAT32_ELEMENTS) {
3488 if (CpuFeatures::IsSupported(SSE2)) {
3489 CpuFeatureScope scope(masm(), SSE2);
3490 XMMRegister result(ToDoubleRegister(instr->result()));
3491 __ movss(result, operand);
3492 __ cvtss2sd(result, result);
3494 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3496 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3497 elements_kind == FLOAT64_ELEMENTS) {
3498 if (CpuFeatures::IsSupported(SSE2)) {
3499 CpuFeatureScope scope(masm(), SSE2);
3500 __ movsd(ToDoubleRegister(instr->result()), operand);
3502 X87Mov(ToX87Register(instr->result()), operand);
3504 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3505 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3506 } else if (IsInt32x4ElementsKind(elements_kind)) {
3507 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3509 Register result(ToRegister(instr->result()));
3510 switch (elements_kind) {
3511 case EXTERNAL_INT8_ELEMENTS:
3513 __ movsx_b(result, operand);
3515 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3516 case EXTERNAL_UINT8_ELEMENTS:
3517 case UINT8_ELEMENTS:
3518 case UINT8_CLAMPED_ELEMENTS:
3519 __ movzx_b(result, operand);
3521 case EXTERNAL_INT16_ELEMENTS:
3522 case INT16_ELEMENTS:
3523 __ movsx_w(result, operand);
3525 case EXTERNAL_UINT16_ELEMENTS:
3526 case UINT16_ELEMENTS:
3527 __ movzx_w(result, operand);
3529 case EXTERNAL_INT32_ELEMENTS:
3530 case INT32_ELEMENTS:
3531 __ mov(result, operand);
3533 case EXTERNAL_UINT32_ELEMENTS:
3534 case UINT32_ELEMENTS:
3535 __ mov(result, operand);
3536 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3537 __ test(result, Operand(result));
3538 DeoptimizeIf(negative, instr->environment());
3541 case EXTERNAL_FLOAT32_ELEMENTS:
3542 case EXTERNAL_FLOAT64_ELEMENTS:
3543 case EXTERNAL_FLOAT32x4_ELEMENTS:
3544 case EXTERNAL_INT32x4_ELEMENTS:
3545 case FLOAT32_ELEMENTS:
3546 case FLOAT64_ELEMENTS:
3547 case FLOAT32x4_ELEMENTS:
3548 case INT32x4_ELEMENTS:
3549 case FAST_SMI_ELEMENTS:
3551 case FAST_DOUBLE_ELEMENTS:
3552 case FAST_HOLEY_SMI_ELEMENTS:
3553 case FAST_HOLEY_ELEMENTS:
3554 case FAST_HOLEY_DOUBLE_ELEMENTS:
3555 case DICTIONARY_ELEMENTS:
3556 case NON_STRICT_ARGUMENTS_ELEMENTS:
3564 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3565 if (instr->hydrogen()->RequiresHoleCheck()) {
3566 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3567 sizeof(kHoleNanLower32);
3568 Operand hole_check_operand = BuildFastArrayOperand(
3569 instr->elements(), instr->key(),
3570 instr->hydrogen()->key()->representation(),
3571 FAST_DOUBLE_ELEMENTS,
3573 instr->additional_index());
3574 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3575 DeoptimizeIf(equal, instr->environment());
3578 Operand double_load_operand = BuildFastArrayOperand(
3581 instr->hydrogen()->key()->representation(),
3582 FAST_DOUBLE_ELEMENTS,
3583 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3584 instr->additional_index());
3585 if (CpuFeatures::IsSupported(SSE2)) {
3586 CpuFeatureScope scope(masm(), SSE2);
3587 XMMRegister result = ToDoubleRegister(instr->result());
3588 __ movsd(result, double_load_operand);
3590 X87Mov(ToX87Register(instr->result()), double_load_operand);
3595 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3596 Register result = ToRegister(instr->result());
3600 BuildFastArrayOperand(instr->elements(),
3602 instr->hydrogen()->key()->representation(),
3604 FixedArray::kHeaderSize - kHeapObjectTag,
3605 instr->additional_index()));
3607 // Check for the hole value.
3608 if (instr->hydrogen()->RequiresHoleCheck()) {
3609 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3610 __ test(result, Immediate(kSmiTagMask));
3611 DeoptimizeIf(not_equal, instr->environment());
3613 __ cmp(result, factory()->the_hole_value());
3614 DeoptimizeIf(equal, instr->environment());
3620 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3621 if (instr->is_typed_elements()) {
3622 DoLoadKeyedExternalArray(instr);
3623 } else if (instr->hydrogen()->representation().IsDouble()) {
3624 DoLoadKeyedFixedDoubleArray(instr);
3626 DoLoadKeyedFixedArray(instr);
3631 Operand LCodeGen::BuildFastArrayOperand(
3632 LOperand* elements_pointer,
3634 Representation key_representation,
3635 ElementsKind elements_kind,
3637 uint32_t additional_index) {
3638 Register elements_pointer_reg = ToRegister(elements_pointer);
3639 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3640 if (IsFixedTypedArrayElementsKind(elements_kind)) {
3641 offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
3643 int shift_size = element_shift_size;
3644 if (key->IsConstantOperand()) {
3645 int constant_value = ToInteger32(LConstantOperand::cast(key));
3646 if (constant_value & 0xF0000000) {
3647 Abort(kArrayIndexConstantValueTooBig);
3649 return Operand(elements_pointer_reg,
3650 ((constant_value + additional_index) << shift_size)
3653 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3654 // Make sure the key is pre-scaled against maximal_scale_factor.
3655 shift_size = static_cast<int>(maximal_scale_factor);
3656 } else if (key_representation.IsSmi() && (shift_size >= 1)) {
3657 // Take the tag bit into account while computing the shift size.
3658 shift_size -= kSmiTagSize;
3660 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3661 return Operand(elements_pointer_reg,
3664 offset + (additional_index << element_shift_size));
3669 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3670 ASSERT(ToRegister(instr->context()).is(esi));
3671 ASSERT(ToRegister(instr->object()).is(edx));
3672 ASSERT(ToRegister(instr->key()).is(ecx));
3674 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3675 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3679 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3680 Register result = ToRegister(instr->result());
3682 if (instr->hydrogen()->from_inlined()) {
3683 __ lea(result, Operand(esp, -2 * kPointerSize));
3685 // Check for arguments adapter frame.
3686 Label done, adapted;
3687 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3688 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3689 __ cmp(Operand(result),
3690 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3691 __ j(equal, &adapted, Label::kNear);
3693 // No arguments adaptor frame.
3694 __ mov(result, Operand(ebp));
3695 __ jmp(&done, Label::kNear);
3697 // Arguments adaptor frame present.
3699 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3701 // Result is the frame pointer for the frame if not adapted and for the real
3702 // frame below the adaptor frame if adapted.
3708 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3709 Operand elem = ToOperand(instr->elements());
3710 Register result = ToRegister(instr->result());
3714 // If no arguments adaptor frame the number of arguments is fixed.
3716 __ mov(result, Immediate(scope()->num_parameters()));
3717 __ j(equal, &done, Label::kNear);
3719 // Arguments adaptor frame present. Get argument length from there.
3720 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3721 __ mov(result, Operand(result,
3722 ArgumentsAdaptorFrameConstants::kLengthOffset));
3723 __ SmiUntag(result);
3725 // Argument length is in result register.
3730 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3731 Register receiver = ToRegister(instr->receiver());
3732 Register function = ToRegister(instr->function());
3734 // If the receiver is null or undefined, we have to pass the global
3735 // object as a receiver to normal functions. Values have to be
3736 // passed unchanged to builtins and strict-mode functions.
3737 Label receiver_ok, global_object;
3738 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3739 Register scratch = ToRegister(instr->temp());
3741 if (!instr->hydrogen()->known_function()) {
3742 // Do not transform the receiver to object for strict mode
3745 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3746 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3747 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3748 __ j(not_equal, &receiver_ok, dist);
3750 // Do not transform the receiver to object for builtins.
3751 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3752 1 << SharedFunctionInfo::kNativeBitWithinByte);
3753 __ j(not_equal, &receiver_ok, dist);
3756 // Normal function. Replace undefined or null with global receiver.
3757 __ cmp(receiver, factory()->null_value());
3758 __ j(equal, &global_object, Label::kNear);
3759 __ cmp(receiver, factory()->undefined_value());
3760 __ j(equal, &global_object, Label::kNear);
3762 // The receiver should be a JS object.
3763 __ test(receiver, Immediate(kSmiTagMask));
3764 DeoptimizeIf(equal, instr->environment());
3765 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3766 DeoptimizeIf(below, instr->environment());
3768 __ jmp(&receiver_ok, Label::kNear);
3769 __ bind(&global_object);
3770 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3771 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3772 __ mov(receiver, Operand(receiver, global_offset));
3773 const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
3774 __ mov(receiver, FieldOperand(receiver, receiver_offset));
3775 __ bind(&receiver_ok);
3779 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3780 Register receiver = ToRegister(instr->receiver());
3781 Register function = ToRegister(instr->function());
3782 Register length = ToRegister(instr->length());
3783 Register elements = ToRegister(instr->elements());
3784 ASSERT(receiver.is(eax)); // Used for parameter count.
3785 ASSERT(function.is(edi)); // Required by InvokeFunction.
3786 ASSERT(ToRegister(instr->result()).is(eax));
3788 // Copy the arguments to this function possibly from the
3789 // adaptor frame below it.
3790 const uint32_t kArgumentsLimit = 1 * KB;
3791 __ cmp(length, kArgumentsLimit);
3792 DeoptimizeIf(above, instr->environment());
3795 __ mov(receiver, length);
3797 // Loop through the arguments pushing them onto the execution
3800 // length is a small non-negative integer, due to the test above.
3801 __ test(length, Operand(length));
3802 __ j(zero, &invoke, Label::kNear);
3804 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3806 __ j(not_zero, &loop);
3808 // Invoke the function.
3810 ASSERT(instr->HasPointerMap());
3811 LPointerMap* pointers = instr->pointer_map();
3812 SafepointGenerator safepoint_generator(
3813 this, pointers, Safepoint::kLazyDeopt);
3814 ParameterCount actual(eax);
3815 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3819 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3824 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3825 LOperand* argument = instr->value();
3826 EmitPushTaggedOperand(argument);
3830 void LCodeGen::DoDrop(LDrop* instr) {
3831 __ Drop(instr->count());
3835 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3836 Register result = ToRegister(instr->result());
3837 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3841 void LCodeGen::DoContext(LContext* instr) {
3842 Register result = ToRegister(instr->result());
3843 if (info()->IsOptimizing()) {
3844 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3846 // If there is no frame, the context must be in esi.
3847 ASSERT(result.is(esi));
3852 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3853 ASSERT(ToRegister(instr->context()).is(esi));
3854 __ push(esi); // The context is the first argument.
3855 __ push(Immediate(instr->hydrogen()->pairs()));
3856 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3857 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3861 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3862 int formal_parameter_count,
3864 LInstruction* instr,
3865 EDIState edi_state) {
3866 bool dont_adapt_arguments =
3867 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3868 bool can_invoke_directly =
3869 dont_adapt_arguments || formal_parameter_count == arity;
3871 if (can_invoke_directly) {
3872 if (edi_state == EDI_UNINITIALIZED) {
3873 __ LoadHeapObject(edi, function);
3877 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3879 // Set eax to arguments count if adaption is not needed. Assumes that eax
3880 // is available to write to at this point.
3881 if (dont_adapt_arguments) {
3885 // Invoke function directly.
3886 if (function.is_identical_to(info()->closure())) {
3889 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3891 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3893 // We need to adapt arguments.
3894 LPointerMap* pointers = instr->pointer_map();
3895 SafepointGenerator generator(
3896 this, pointers, Safepoint::kLazyDeopt);
3897 ParameterCount count(arity);
3898 ParameterCount expected(formal_parameter_count);
3899 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3904 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3905 ASSERT(ToRegister(instr->result()).is(eax));
3907 LPointerMap* pointers = instr->pointer_map();
3908 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3910 if (instr->target()->IsConstantOperand()) {
3911 LConstantOperand* target = LConstantOperand::cast(instr->target());
3912 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3913 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3914 __ call(code, RelocInfo::CODE_TARGET);
3916 ASSERT(instr->target()->IsRegister());
3917 Register target = ToRegister(instr->target());
3918 generator.BeforeCall(__ CallSize(Operand(target)));
3919 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3922 generator.AfterCall();
3926 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3927 ASSERT(ToRegister(instr->function()).is(edi));
3928 ASSERT(ToRegister(instr->result()).is(eax));
3930 if (instr->hydrogen()->pass_argument_count()) {
3931 __ mov(eax, instr->arity());
3935 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3937 bool is_self_call = false;
3938 if (instr->hydrogen()->function()->IsConstant()) {
3939 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3940 Handle<JSFunction> jsfun =
3941 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3942 is_self_call = jsfun.is_identical_to(info()->closure());
3948 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3951 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3955 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3956 Register input_reg = ToRegister(instr->value());
3957 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3958 factory()->heap_number_map());
3959 DeoptimizeIf(not_equal, instr->environment());
3961 Label slow, allocated, done;
3962 Register tmp = input_reg.is(eax) ? ecx : eax;
3963 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3965 // Preserve the value of all registers.
3966 PushSafepointRegistersScope scope(this);
3968 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3969 // Check the sign of the argument. If the argument is positive, just
3970 // return it. We do not need to patch the stack since |input| and
3971 // |result| are the same register and |input| will be restored
3972 // unchanged by popping safepoint registers.
3973 __ test(tmp, Immediate(HeapNumber::kSignMask));
3974 __ j(zero, &done, Label::kNear);
3976 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3977 __ jmp(&allocated, Label::kNear);
3979 // Slow case: Call the runtime system to do the number allocation.
3981 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3982 instr, instr->context());
3983 // Set the pointer to the new heap number in tmp.
3984 if (!tmp.is(eax)) __ mov(tmp, eax);
3985 // Restore input_reg after call to runtime.
3986 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3988 __ bind(&allocated);
3989 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3990 __ and_(tmp2, ~HeapNumber::kSignMask);
3991 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3992 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3993 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3994 __ StoreToSafepointRegisterSlot(input_reg, tmp);
4000 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
4001 Register input_reg = ToRegister(instr->value());
4002 __ test(input_reg, Operand(input_reg));
4004 __ j(not_sign, &is_positive, Label::kNear);
4005 __ neg(input_reg); // Sets flags.
4006 DeoptimizeIf(negative, instr->environment());
4007 __ bind(&is_positive);
4011 void LCodeGen::DoMathAbs(LMathAbs* instr) {
4012 // Class for deferred case.
4013 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
4015 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
4017 const X87Stack& x87_stack)
4018 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4019 virtual void Generate() V8_OVERRIDE {
4020 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
4022 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4027 ASSERT(instr->value()->Equals(instr->result()));
4028 Representation r = instr->hydrogen()->value()->representation();
4030 CpuFeatureScope scope(masm(), SSE2);
4032 XMMRegister scratch = double_scratch0();
4033 XMMRegister input_reg = ToDoubleRegister(instr->value());
4034 __ xorps(scratch, scratch);
4035 __ subsd(scratch, input_reg);
4036 __ andps(input_reg, scratch);
4037 } else if (r.IsSmiOrInteger32()) {
4038 EmitIntegerMathAbs(instr);
4039 } else { // Tagged case.
4040 DeferredMathAbsTaggedHeapNumber* deferred =
4041 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
4042 Register input_reg = ToRegister(instr->value());
4044 __ JumpIfNotSmi(input_reg, deferred->entry());
4045 EmitIntegerMathAbs(instr);
4046 __ bind(deferred->exit());
4051 void LCodeGen::DoMathFloor(LMathFloor* instr) {
4052 CpuFeatureScope scope(masm(), SSE2);
4053 XMMRegister xmm_scratch = double_scratch0();
4054 Register output_reg = ToRegister(instr->result());
4055 XMMRegister input_reg = ToDoubleRegister(instr->value());
4057 if (CpuFeatures::IsSupported(SSE4_1)) {
4058 CpuFeatureScope scope(masm(), SSE4_1);
4059 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4060 // Deoptimize on negative zero.
4062 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
4063 __ ucomisd(input_reg, xmm_scratch);
4064 __ j(not_equal, &non_zero, Label::kNear);
4065 __ movmskpd(output_reg, input_reg);
4066 __ test(output_reg, Immediate(1));
4067 DeoptimizeIf(not_zero, instr->environment());
4070 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
4071 __ cvttsd2si(output_reg, Operand(xmm_scratch));
4072 // Overflow is signalled with minint.
4073 __ cmp(output_reg, 0x80000000u);
4074 DeoptimizeIf(equal, instr->environment());
4076 Label negative_sign, done;
4077 // Deoptimize on unordered.
4078 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
4079 __ ucomisd(input_reg, xmm_scratch);
4080 DeoptimizeIf(parity_even, instr->environment());
4081 __ j(below, &negative_sign, Label::kNear);
4083 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4084 // Check for negative zero.
4085 Label positive_sign;
4086 __ j(above, &positive_sign, Label::kNear);
4087 __ movmskpd(output_reg, input_reg);
4088 __ test(output_reg, Immediate(1));
4089 DeoptimizeIf(not_zero, instr->environment());
4090 __ Set(output_reg, Immediate(0));
4091 __ jmp(&done, Label::kNear);
4092 __ bind(&positive_sign);
4095 // Use truncating instruction (OK because input is positive).
4096 __ cvttsd2si(output_reg, Operand(input_reg));
4097 // Overflow is signalled with minint.
4098 __ cmp(output_reg, 0x80000000u);
4099 DeoptimizeIf(equal, instr->environment());
4100 __ jmp(&done, Label::kNear);
4102 // Non-zero negative reaches here.
4103 __ bind(&negative_sign);
4104 // Truncate, then compare and compensate.
4105 __ cvttsd2si(output_reg, Operand(input_reg));
4106 __ Cvtsi2sd(xmm_scratch, output_reg);
4107 __ ucomisd(input_reg, xmm_scratch);
4108 __ j(equal, &done, Label::kNear);
4109 __ sub(output_reg, Immediate(1));
4110 DeoptimizeIf(overflow, instr->environment());
4117 void LCodeGen::DoMathRound(LMathRound* instr) {
4118 CpuFeatureScope scope(masm(), SSE2);
4119 Register output_reg = ToRegister(instr->result());
4120 XMMRegister input_reg = ToDoubleRegister(instr->value());
4121 XMMRegister xmm_scratch = double_scratch0();
4122 XMMRegister input_temp = ToDoubleRegister(instr->temp());
4123 ExternalReference one_half = ExternalReference::address_of_one_half();
4124 ExternalReference minus_one_half =
4125 ExternalReference::address_of_minus_one_half();
4127 Label done, round_to_zero, below_one_half, do_not_compensate;
4128 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4130 __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4131 __ ucomisd(xmm_scratch, input_reg);
4132 __ j(above, &below_one_half, Label::kNear);
4134 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
4135 __ addsd(xmm_scratch, input_reg);
4136 __ cvttsd2si(output_reg, Operand(xmm_scratch));
4137 // Overflow is signalled with minint.
4138 __ cmp(output_reg, 0x80000000u);
4139 __ RecordComment("D2I conversion overflow");
4140 DeoptimizeIf(equal, instr->environment());
4141 __ jmp(&done, dist);
4143 __ bind(&below_one_half);
4144 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4145 __ ucomisd(xmm_scratch, input_reg);
4146 __ j(below_equal, &round_to_zero, Label::kNear);
4148 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4149 // compare and compensate.
4150 __ movaps(input_temp, input_reg); // Do not alter input_reg.
4151 __ subsd(input_temp, xmm_scratch);
4152 __ cvttsd2si(output_reg, Operand(input_temp));
4153 // Catch minint due to overflow, and to prevent overflow when compensating.
4154 __ cmp(output_reg, 0x80000000u);
4155 __ RecordComment("D2I conversion overflow");
4156 DeoptimizeIf(equal, instr->environment());
4158 __ Cvtsi2sd(xmm_scratch, output_reg);
4159 __ ucomisd(xmm_scratch, input_temp);
4160 __ j(equal, &done, dist);
4161 __ sub(output_reg, Immediate(1));
4162 // No overflow because we already ruled out minint.
4163 __ jmp(&done, dist);
4165 __ bind(&round_to_zero);
4166 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4167 // we can ignore the difference between a result of -0 and +0.
4168 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4169 // If the sign is positive, we return +0.
4170 __ movmskpd(output_reg, input_reg);
4171 __ test(output_reg, Immediate(1));
4172 __ RecordComment("Minus zero");
4173 DeoptimizeIf(not_zero, instr->environment());
4175 __ Set(output_reg, Immediate(0));
4180 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4181 CpuFeatureScope scope(masm(), SSE2);
4182 XMMRegister input_reg = ToDoubleRegister(instr->value());
4183 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4184 __ sqrtsd(input_reg, input_reg);
4188 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4189 CpuFeatureScope scope(masm(), SSE2);
4190 XMMRegister xmm_scratch = double_scratch0();
4191 XMMRegister input_reg = ToDoubleRegister(instr->value());
4192 Register scratch = ToRegister(instr->temp());
4193 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4195 // Note that according to ECMA-262 15.8.2.13:
4196 // Math.pow(-Infinity, 0.5) == Infinity
4197 // Math.sqrt(-Infinity) == NaN
4199 // Check base for -Infinity. According to IEEE-754, single-precision
4200 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
4201 __ mov(scratch, 0xFF800000);
4202 __ movd(xmm_scratch, scratch);
4203 __ cvtss2sd(xmm_scratch, xmm_scratch);
4204 __ ucomisd(input_reg, xmm_scratch);
4205 // Comparing -Infinity with NaN results in "unordered", which sets the
4206 // zero flag as if both were equal. However, it also sets the carry flag.
4207 __ j(not_equal, &sqrt, Label::kNear);
4208 __ j(carry, &sqrt, Label::kNear);
4209 // If input is -Infinity, return Infinity.
4210 __ xorps(input_reg, input_reg);
4211 __ subsd(input_reg, xmm_scratch);
4212 __ jmp(&done, Label::kNear);
4216 __ xorps(xmm_scratch, xmm_scratch);
4217 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
4218 __ sqrtsd(input_reg, input_reg);
4223 void LCodeGen::DoPower(LPower* instr) {
4224 Representation exponent_type = instr->hydrogen()->right()->representation();
4225 // Having marked this as a call, we can use any registers.
4226 // Just make sure that the input/output registers are the expected ones.
4227 ASSERT(!instr->right()->IsDoubleRegister() ||
4228 ToDoubleRegister(instr->right()).is(xmm1));
4229 ASSERT(!instr->right()->IsRegister() ||
4230 ToRegister(instr->right()).is(eax));
4231 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4232 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4234 if (exponent_type.IsSmi()) {
4235 MathPowStub stub(MathPowStub::TAGGED);
4237 } else if (exponent_type.IsTagged()) {
4239 __ JumpIfSmi(eax, &no_deopt);
4240 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
4241 DeoptimizeIf(not_equal, instr->environment());
4243 MathPowStub stub(MathPowStub::TAGGED);
4245 } else if (exponent_type.IsInteger32()) {
4246 MathPowStub stub(MathPowStub::INTEGER);
4249 ASSERT(exponent_type.IsDouble());
4250 MathPowStub stub(MathPowStub::DOUBLE);
4256 void LCodeGen::DoMathLog(LMathLog* instr) {
4257 CpuFeatureScope scope(masm(), SSE2);
4258 ASSERT(instr->value()->Equals(instr->result()));
4259 XMMRegister input_reg = ToDoubleRegister(instr->value());
4260 XMMRegister xmm_scratch = double_scratch0();
4261 Label positive, done, zero;
4262 __ xorps(xmm_scratch, xmm_scratch);
4263 __ ucomisd(input_reg, xmm_scratch);
4264 __ j(above, &positive, Label::kNear);
4265 __ j(not_carry, &zero, Label::kNear);
4266 ExternalReference nan =
4267 ExternalReference::address_of_canonical_non_hole_nan();
4268 __ movsd(input_reg, Operand::StaticVariable(nan));
4269 __ jmp(&done, Label::kNear);
4271 ExternalReference ninf =
4272 ExternalReference::address_of_negative_infinity();
4273 __ movsd(input_reg, Operand::StaticVariable(ninf));
4274 __ jmp(&done, Label::kNear);
4277 __ sub(Operand(esp), Immediate(kDoubleSize));
4278 __ movsd(Operand(esp, 0), input_reg);
4279 __ fld_d(Operand(esp, 0));
4281 __ fstp_d(Operand(esp, 0));
4282 __ movsd(input_reg, Operand(esp, 0));
4283 __ add(Operand(esp), Immediate(kDoubleSize));
4288 void LCodeGen::DoMathExp(LMathExp* instr) {
4289 CpuFeatureScope scope(masm(), SSE2);
4290 XMMRegister input = ToDoubleRegister(instr->value());
4291 XMMRegister result = ToDoubleRegister(instr->result());
4292 XMMRegister temp0 = double_scratch0();
4293 Register temp1 = ToRegister(instr->temp1());
4294 Register temp2 = ToRegister(instr->temp2());
4296 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4300 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4301 ASSERT(ToRegister(instr->context()).is(esi));
4302 ASSERT(ToRegister(instr->function()).is(edi));
4303 ASSERT(instr->HasPointerMap());
4305 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4306 if (known_function.is_null()) {
4307 LPointerMap* pointers = instr->pointer_map();
4308 SafepointGenerator generator(
4309 this, pointers, Safepoint::kLazyDeopt);
4310 ParameterCount count(instr->arity());
4311 __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
4313 CallKnownFunction(known_function,
4314 instr->hydrogen()->formal_parameter_count(),
4317 EDI_CONTAINS_TARGET);
4322 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4323 ASSERT(ToRegister(instr->context()).is(esi));
4324 ASSERT(ToRegister(instr->function()).is(edi));
4325 ASSERT(ToRegister(instr->result()).is(eax));
4327 int arity = instr->arity();
4328 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4329 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4333 void LCodeGen::DoCallNew(LCallNew* instr) {
4334 ASSERT(ToRegister(instr->context()).is(esi));
4335 ASSERT(ToRegister(instr->constructor()).is(edi));
4336 ASSERT(ToRegister(instr->result()).is(eax));
4338 // No cell in ebx for construct type feedback in optimized code
4339 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4340 __ mov(ebx, Immediate(undefined_value));
4341 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4342 __ Set(eax, Immediate(instr->arity()));
4343 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4347 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4348 ASSERT(ToRegister(instr->context()).is(esi));
4349 ASSERT(ToRegister(instr->constructor()).is(edi));
4350 ASSERT(ToRegister(instr->result()).is(eax));
4352 __ Set(eax, Immediate(instr->arity()));
4353 __ mov(ebx, factory()->undefined_value());
4354 ElementsKind kind = instr->hydrogen()->elements_kind();
4355 AllocationSiteOverrideMode override_mode =
4356 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4357 ? DISABLE_ALLOCATION_SITES
4360 if (instr->arity() == 0) {
4361 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4362 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4363 } else if (instr->arity() == 1) {
4365 if (IsFastPackedElementsKind(kind)) {
4367 // We might need a change here
4368 // look at the first argument
4369 __ mov(ecx, Operand(esp, 0));
4371 __ j(zero, &packed_case, Label::kNear);
4373 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4374 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4375 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4376 __ jmp(&done, Label::kNear);
4377 __ bind(&packed_case);
4380 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4381 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4384 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4385 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4390 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4391 ASSERT(ToRegister(instr->context()).is(esi));
4392 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4396 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4397 Register function = ToRegister(instr->function());
4398 Register code_object = ToRegister(instr->code_object());
4399 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4400 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4404 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4405 Register result = ToRegister(instr->result());
4406 Register base = ToRegister(instr->base_object());
4407 if (instr->offset()->IsConstantOperand()) {
4408 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4409 __ lea(result, Operand(base, ToInteger32(offset)));
4411 Register offset = ToRegister(instr->offset());
4412 __ lea(result, Operand(base, offset, times_1, 0));
4417 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4418 Representation representation = instr->representation();
4420 HObjectAccess access = instr->hydrogen()->access();
4421 int offset = access.offset();
4423 if (access.IsExternalMemory()) {
4424 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4425 MemOperand operand = instr->object()->IsConstantOperand()
4426 ? MemOperand::StaticVariable(
4427 ToExternalReference(LConstantOperand::cast(instr->object())))
4428 : MemOperand(ToRegister(instr->object()), offset);
4429 if (instr->value()->IsConstantOperand()) {
4430 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4431 __ mov(operand, Immediate(ToInteger32(operand_value)));
4433 Register value = ToRegister(instr->value());
4434 __ Store(value, operand, representation);
4439 Register object = ToRegister(instr->object());
4440 Handle<Map> transition = instr->transition();
4442 if (FLAG_track_fields && representation.IsSmi()) {
4443 if (instr->value()->IsConstantOperand()) {
4444 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4445 if (!IsSmi(operand_value)) {
4446 DeoptimizeIf(no_condition, instr->environment());
4449 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4450 if (instr->value()->IsConstantOperand()) {
4451 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4452 if (IsInteger32(operand_value)) {
4453 DeoptimizeIf(no_condition, instr->environment());
4456 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4457 Register value = ToRegister(instr->value());
4458 __ test(value, Immediate(kSmiTagMask));
4459 DeoptimizeIf(zero, instr->environment());
4462 } else if (representation.IsDouble()) {
4463 ASSERT(transition.is_null());
4464 ASSERT(access.IsInobject());
4465 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4466 if (CpuFeatures::IsSupported(SSE2)) {
4467 CpuFeatureScope scope(masm(), SSE2);
4468 XMMRegister value = ToDoubleRegister(instr->value());
4469 __ movsd(FieldOperand(object, offset), value);
4471 X87Register value = ToX87Register(instr->value());
4472 X87Mov(FieldOperand(object, offset), value);
4477 if (!transition.is_null()) {
4478 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4479 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4481 Register temp = ToRegister(instr->temp());
4482 Register temp_map = ToRegister(instr->temp_map());
4483 __ mov(temp_map, transition);
4484 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4485 // Update the write barrier for the map field.
4486 __ RecordWriteField(object,
4487 HeapObject::kMapOffset,
4490 GetSaveFPRegsMode(),
4491 OMIT_REMEMBERED_SET,
4497 SmiCheck check_needed =
4498 instr->hydrogen()->value()->IsHeapObject()
4499 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4501 Register write_register = object;
4502 if (!access.IsInobject()) {
4503 write_register = ToRegister(instr->temp());
4504 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4507 MemOperand operand = FieldOperand(write_register, offset);
4508 if (instr->value()->IsConstantOperand()) {
4509 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4510 if (operand_value->IsRegister()) {
4511 Register value = ToRegister(operand_value);
4512 __ Store(value, operand, representation);
4513 } else if (representation.IsInteger32()) {
4514 Immediate immediate = ToImmediate(operand_value, representation);
4515 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4516 __ mov(operand, immediate);
4518 Handle<Object> handle_value = ToHandle(operand_value);
4519 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4520 __ mov(operand, handle_value);
4523 Register value = ToRegister(instr->value());
4524 __ Store(value, operand, representation);
4527 if (instr->hydrogen()->NeedsWriteBarrier()) {
4528 Register value = ToRegister(instr->value());
4529 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4530 // Update the write barrier for the object for in-object properties.
4531 __ RecordWriteField(write_register,
4535 GetSaveFPRegsMode(),
4536 EMIT_REMEMBERED_SET,
4542 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4543 ASSERT(ToRegister(instr->context()).is(esi));
4544 ASSERT(ToRegister(instr->object()).is(edx));
4545 ASSERT(ToRegister(instr->value()).is(eax));
4547 __ mov(ecx, instr->name());
4548 Handle<Code> ic = StoreIC::initialize_stub(isolate(),
4549 instr->strict_mode_flag());
4550 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4554 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4555 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4557 __ j(NegateCondition(cc), &done, Label::kNear);
4561 DeoptimizeIf(cc, check->environment());
4566 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4567 if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
4569 if (instr->index()->IsConstantOperand()) {
4570 Immediate immediate =
4571 ToImmediate(LConstantOperand::cast(instr->index()),
4572 instr->hydrogen()->length()->representation());
4573 __ cmp(ToOperand(instr->length()), immediate);
4574 Condition condition =
4575 instr->hydrogen()->allow_equality() ? below : below_equal;
4576 ApplyCheckIf(condition, instr);
4578 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4579 Condition condition =
4580 instr->hydrogen()->allow_equality() ? above : above_equal;
4581 ApplyCheckIf(condition, instr);
4587 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4588 LOperand* key = instr->key();
4589 ElementsKind elements_kind = instr->elements_kind();
4591 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4592 CpuFeatureScope scope(masm(), SSE2);
4593 Operand operand(BuildFastArrayOperand(
4596 instr->hydrogen()->key()->representation(),
4599 instr->additional_index()));
4600 __ movups(operand, ToSIMD128Register(instr->value()));
4602 ASSERT(instr->value()->IsRegister());
4603 Register temp = ToRegister(instr->temp());
4604 Register input_reg = ToRegister(instr->value());
4605 __ test(input_reg, Immediate(kSmiTagMask));
4606 DeoptimizeIf(zero, instr->environment());
4607 __ CmpObjectType(input_reg, T::kInstanceType, temp);
4608 DeoptimizeIf(not_equal, instr->environment());
4610 // Copy the SIMD128 value from the heap object to the external array.
4611 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4612 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4613 Operand operand(BuildFastArrayOperand(
4616 instr->hydrogen()->key()->representation(),
4619 instr->additional_index()));
4620 __ mov(temp, FieldOperand(input_reg, T::kValueOffset + offset));
4621 __ mov(operand, temp);
4627 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4628 ElementsKind elements_kind = instr->elements_kind();
4629 LOperand* key = instr->key();
4630 if (!key->IsConstantOperand() &&
4631 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4633 HandleExternalArrayOpRequiresTemp(key,
4634 instr->hydrogen()->key()->representation(), elements_kind);
4637 Operand operand(BuildFastArrayOperand(
4640 instr->hydrogen()->key()->representation(),
4643 instr->additional_index()));
4644 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4645 elements_kind == FLOAT32_ELEMENTS) {
4646 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4647 CpuFeatureScope scope(masm(), SSE2);
4648 XMMRegister xmm_scratch = double_scratch0();
4649 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4650 __ movss(operand, xmm_scratch);
4655 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4656 elements_kind == FLOAT64_ELEMENTS) {
4657 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4658 CpuFeatureScope scope(masm(), SSE2);
4659 __ movsd(operand, ToDoubleRegister(instr->value()));
4661 X87Mov(operand, ToX87Register(instr->value()));
4663 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4664 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4665 } else if (IsInt32x4ElementsKind(elements_kind)) {
4666 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4668 Register value = ToRegister(instr->value());
4669 switch (elements_kind) {
4670 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4671 case EXTERNAL_UINT8_ELEMENTS:
4672 case EXTERNAL_INT8_ELEMENTS:
4673 case UINT8_ELEMENTS:
4675 case UINT8_CLAMPED_ELEMENTS:
4676 __ mov_b(operand, value);
4678 case EXTERNAL_INT16_ELEMENTS:
4679 case EXTERNAL_UINT16_ELEMENTS:
4680 case UINT16_ELEMENTS:
4681 case INT16_ELEMENTS:
4682 __ mov_w(operand, value);
4684 case EXTERNAL_INT32_ELEMENTS:
4685 case EXTERNAL_UINT32_ELEMENTS:
4686 case UINT32_ELEMENTS:
4687 case INT32_ELEMENTS:
4688 __ mov(operand, value);
4690 case EXTERNAL_FLOAT32_ELEMENTS:
4691 case EXTERNAL_FLOAT64_ELEMENTS:
4692 case EXTERNAL_FLOAT32x4_ELEMENTS:
4693 case EXTERNAL_INT32x4_ELEMENTS:
4694 case FLOAT32_ELEMENTS:
4695 case FLOAT64_ELEMENTS:
4696 case FLOAT32x4_ELEMENTS:
4697 case INT32x4_ELEMENTS:
4698 case FAST_SMI_ELEMENTS:
4700 case FAST_DOUBLE_ELEMENTS:
4701 case FAST_HOLEY_SMI_ELEMENTS:
4702 case FAST_HOLEY_ELEMENTS:
4703 case FAST_HOLEY_DOUBLE_ELEMENTS:
4704 case DICTIONARY_ELEMENTS:
4705 case NON_STRICT_ARGUMENTS_ELEMENTS:
4713 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4714 ExternalReference canonical_nan_reference =
4715 ExternalReference::address_of_canonical_non_hole_nan();
4716 Operand double_store_operand = BuildFastArrayOperand(
4719 instr->hydrogen()->key()->representation(),
4720 FAST_DOUBLE_ELEMENTS,
4721 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4722 instr->additional_index());
4724 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4725 CpuFeatureScope scope(masm(), SSE2);
4726 XMMRegister value = ToDoubleRegister(instr->value());
4728 if (instr->NeedsCanonicalization()) {
4731 __ ucomisd(value, value);
4732 __ j(parity_odd, &have_value, Label::kNear); // NaN.
4734 __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4735 __ bind(&have_value);
4738 __ movsd(double_store_operand, value);
4740 // Can't use SSE2 in the serializer
4741 if (instr->hydrogen()->IsConstantHoleStore()) {
4742 // This means we should store the (double) hole. No floating point
4743 // registers required.
4744 double nan_double = FixedDoubleArray::hole_nan_as_double();
4745 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4746 int32_t lower = static_cast<int32_t>(int_val);
4747 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4749 __ mov(double_store_operand, Immediate(lower));
4750 Operand double_store_operand2 = BuildFastArrayOperand(
4753 instr->hydrogen()->key()->representation(),
4754 FAST_DOUBLE_ELEMENTS,
4755 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4756 instr->additional_index());
4757 __ mov(double_store_operand2, Immediate(upper));
4759 Label no_special_nan_handling;
4760 X87Register value = ToX87Register(instr->value());
4763 if (instr->NeedsCanonicalization()) {
4768 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4769 __ sub(esp, Immediate(kDoubleSize));
4770 __ fst_d(MemOperand(esp, 0));
4771 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4772 Immediate(kHoleNanUpper32));
4773 __ add(esp, Immediate(kDoubleSize));
4775 __ j(not_equal, &canonicalize, Label::kNear);
4776 __ jmp(&no_special_nan_handling, Label::kNear);
4777 __ bind(&canonicalize);
4779 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4782 __ bind(&no_special_nan_handling);
4783 __ fst_d(double_store_operand);
4789 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4790 Register elements = ToRegister(instr->elements());
4791 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4793 Operand operand = BuildFastArrayOperand(
4796 instr->hydrogen()->key()->representation(),
4798 FixedArray::kHeaderSize - kHeapObjectTag,
4799 instr->additional_index());
4800 if (instr->value()->IsRegister()) {
4801 __ mov(operand, ToRegister(instr->value()));
4803 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4804 if (IsSmi(operand_value)) {
4805 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4806 __ mov(operand, immediate);
4808 ASSERT(!IsInteger32(operand_value));
4809 Handle<Object> handle_value = ToHandle(operand_value);
4810 __ mov(operand, handle_value);
4814 if (instr->hydrogen()->NeedsWriteBarrier()) {
4815 ASSERT(instr->value()->IsRegister());
4816 Register value = ToRegister(instr->value());
4817 ASSERT(!instr->key()->IsConstantOperand());
4818 SmiCheck check_needed =
4819 instr->hydrogen()->value()->IsHeapObject()
4820 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4821 // Compute address of modified element and store it into key register.
4822 __ lea(key, operand);
4823 __ RecordWrite(elements,
4826 GetSaveFPRegsMode(),
4827 EMIT_REMEMBERED_SET,
4833 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4834 // By cases...external, fast-double, fast
4835 if (instr->is_typed_elements()) {
4836 DoStoreKeyedExternalArray(instr);
4837 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4838 DoStoreKeyedFixedDoubleArray(instr);
4840 DoStoreKeyedFixedArray(instr);
4845 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4846 ASSERT(ToRegister(instr->context()).is(esi));
4847 ASSERT(ToRegister(instr->object()).is(edx));
4848 ASSERT(ToRegister(instr->key()).is(ecx));
4849 ASSERT(ToRegister(instr->value()).is(eax));
4851 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4852 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4853 : isolate()->builtins()->KeyedStoreIC_Initialize();
4854 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4858 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4859 Register object = ToRegister(instr->object());
4860 Register temp = ToRegister(instr->temp());
4861 Label no_memento_found;
4862 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4863 DeoptimizeIf(equal, instr->environment());
4864 __ bind(&no_memento_found);
4868 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4869 Register object_reg = ToRegister(instr->object());
4871 Handle<Map> from_map = instr->original_map();
4872 Handle<Map> to_map = instr->transitioned_map();
4873 ElementsKind from_kind = instr->from_kind();
4874 ElementsKind to_kind = instr->to_kind();
4876 Label not_applicable;
4877 bool is_simple_map_transition =
4878 IsSimpleMapChangeTransition(from_kind, to_kind);
4879 Label::Distance branch_distance =
4880 is_simple_map_transition ? Label::kNear : Label::kFar;
4881 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4882 __ j(not_equal, ¬_applicable, branch_distance);
4883 if (is_simple_map_transition) {
4884 Register new_map_reg = ToRegister(instr->new_map_temp());
4885 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4888 ASSERT_NE(instr->temp(), NULL);
4889 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4890 ToRegister(instr->temp()),
4893 ASSERT(ToRegister(instr->context()).is(esi));
4894 PushSafepointRegistersScope scope(this);
4895 if (!object_reg.is(eax)) {
4896 __ mov(eax, object_reg);
4898 __ mov(ebx, to_map);
4899 TransitionElementsKindStub stub(from_kind, to_kind);
4901 RecordSafepointWithRegisters(
4902 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4904 __ bind(¬_applicable);
4908 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4909 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4911 DeferredStringCharCodeAt(LCodeGen* codegen,
4912 LStringCharCodeAt* instr,
4913 const X87Stack& x87_stack)
4914 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4915 virtual void Generate() V8_OVERRIDE {
4916 codegen()->DoDeferredStringCharCodeAt(instr_);
4918 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4920 LStringCharCodeAt* instr_;
4923 DeferredStringCharCodeAt* deferred =
4924 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4926 StringCharLoadGenerator::Generate(masm(),
4928 ToRegister(instr->string()),
4929 ToRegister(instr->index()),
4930 ToRegister(instr->result()),
4932 __ bind(deferred->exit());
4936 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4937 Register string = ToRegister(instr->string());
4938 Register result = ToRegister(instr->result());
4940 // TODO(3095996): Get rid of this. For now, we need to make the
4941 // result register contain a valid pointer because it is already
4942 // contained in the register pointer map.
4943 __ Set(result, Immediate(0));
4945 PushSafepointRegistersScope scope(this);
4947 // Push the index as a smi. This is safe because of the checks in
4948 // DoStringCharCodeAt above.
4949 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4950 if (instr->index()->IsConstantOperand()) {
4951 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4952 Representation::Smi());
4955 Register index = ToRegister(instr->index());
4959 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
4960 instr, instr->context());
4963 __ StoreToSafepointRegisterSlot(result, eax);
4967 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4968 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4970 DeferredStringCharFromCode(LCodeGen* codegen,
4971 LStringCharFromCode* instr,
4972 const X87Stack& x87_stack)
4973 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4974 virtual void Generate() V8_OVERRIDE {
4975 codegen()->DoDeferredStringCharFromCode(instr_);
4977 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4979 LStringCharFromCode* instr_;
4982 DeferredStringCharFromCode* deferred =
4983 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4985 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4986 Register char_code = ToRegister(instr->char_code());
4987 Register result = ToRegister(instr->result());
4988 ASSERT(!char_code.is(result));
4990 __ cmp(char_code, String::kMaxOneByteCharCode);
4991 __ j(above, deferred->entry());
4992 __ Set(result, Immediate(factory()->single_character_string_cache()));
4993 __ mov(result, FieldOperand(result,
4994 char_code, times_pointer_size,
4995 FixedArray::kHeaderSize));
4996 __ cmp(result, factory()->undefined_value());
4997 __ j(equal, deferred->entry());
4998 __ bind(deferred->exit());
5002 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5003 Register char_code = ToRegister(instr->char_code());
5004 Register result = ToRegister(instr->result());
5006 // TODO(3095996): Get rid of this. For now, we need to make the
5007 // result register contain a valid pointer because it is already
5008 // contained in the register pointer map.
5009 __ Set(result, Immediate(0));
5011 PushSafepointRegistersScope scope(this);
5012 __ SmiTag(char_code);
5014 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5015 __ StoreToSafepointRegisterSlot(result, eax);
5019 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5020 ASSERT(ToRegister(instr->context()).is(esi));
5021 ASSERT(ToRegister(instr->left()).is(edx));
5022 ASSERT(ToRegister(instr->right()).is(eax));
5023 StringAddStub stub(instr->hydrogen()->flags(),
5024 instr->hydrogen()->pretenure_flag());
5025 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5029 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5030 LOperand* input = instr->value();
5031 LOperand* output = instr->result();
5032 ASSERT(input->IsRegister() || input->IsStackSlot());
5033 ASSERT(output->IsDoubleRegister());
5034 if (CpuFeatures::IsSupported(SSE2)) {
5035 CpuFeatureScope scope(masm(), SSE2);
5036 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
5037 } else if (input->IsRegister()) {
5038 Register input_reg = ToRegister(input);
5040 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
5043 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
5048 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
5049 Register input = ToRegister(instr->value());
5051 if (!instr->hydrogen()->value()->HasRange() ||
5052 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5053 DeoptimizeIf(overflow, instr->environment());
5058 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5059 LOperand* input = instr->value();
5060 LOperand* output = instr->result();
5061 if (CpuFeatures::IsSupported(SSE2)) {
5062 CpuFeatureScope scope(masm(), SSE2);
5063 LOperand* temp = instr->temp();
5065 __ LoadUint32(ToDoubleRegister(output),
5067 ToDoubleRegister(temp));
5069 X87Register res = ToX87Register(output);
5070 X87PrepareToWrite(res);
5071 __ LoadUint32NoSSE2(ToRegister(input));
5072 X87CommitWrite(res);
5077 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5078 Register input = ToRegister(instr->value());
5079 if (!instr->hydrogen()->value()->HasRange() ||
5080 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5081 __ test(input, Immediate(0xc0000000));
5082 DeoptimizeIf(not_zero, instr->environment());
5088 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5089 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
5091 DeferredNumberTagI(LCodeGen* codegen,
5093 const X87Stack& x87_stack)
5094 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5095 virtual void Generate() V8_OVERRIDE {
5096 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
5098 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5100 LNumberTagI* instr_;
5103 LOperand* input = instr->value();
5104 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5105 Register reg = ToRegister(input);
5107 DeferredNumberTagI* deferred =
5108 new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
5110 __ j(overflow, deferred->entry());
5111 __ bind(deferred->exit());
5115 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5116 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5118 DeferredNumberTagU(LCodeGen* codegen,
5120 const X87Stack& x87_stack)
5121 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5122 virtual void Generate() V8_OVERRIDE {
5123 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
5125 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5127 LNumberTagU* instr_;
5130 LOperand* input = instr->value();
5131 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5132 Register reg = ToRegister(input);
5134 DeferredNumberTagU* deferred =
5135 new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
5136 __ cmp(reg, Immediate(Smi::kMaxValue));
5137 __ j(above, deferred->entry());
5139 __ bind(deferred->exit());
5143 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
5145 IntegerSignedness signedness) {
5147 Register reg = ToRegister(value);
5148 Register tmp = reg.is(eax) ? ecx : eax;
5149 XMMRegister xmm_scratch = double_scratch0();
5151 // Preserve the value of all registers.
5152 PushSafepointRegistersScope scope(this);
5156 if (signedness == SIGNED_INT32) {
5157 // There was overflow, so bits 30 and 31 of the original integer
5158 // disagree. Try to allocate a heap number in new space and store
5159 // the value in there. If that fails, call the runtime system.
5161 __ xor_(reg, 0x80000000);
5162 if (CpuFeatures::IsSupported(SSE2)) {
5163 CpuFeatureScope feature_scope(masm(), SSE2);
5164 __ Cvtsi2sd(xmm_scratch, Operand(reg));
5167 __ fild_s(Operand(esp, 0));
5171 if (CpuFeatures::IsSupported(SSE2)) {
5172 CpuFeatureScope feature_scope(masm(), SSE2);
5173 __ LoadUint32(xmm_scratch, reg,
5174 ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
5176 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5178 __ push(Immediate(0));
5180 __ fild_d(Operand(esp, 0));
5186 if (FLAG_inline_new) {
5187 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5188 __ jmp(&done, Label::kNear);
5191 // Slow case: Call the runtime system to do the number allocation.
5194 // TODO(3095996): Put a valid pointer value in the stack slot where the result
5195 // register is stored, as this register is in the pointer map, but contains an
5197 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
5198 // NumberTagI and NumberTagD use the context from the frame, rather than
5199 // the environment's HContext or HInlinedContext value.
5200 // They only call Runtime::kAllocateHeapNumber.
5201 // The corresponding HChange instructions are added in a phase that does
5202 // not have easy access to the local context.
5203 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5204 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5205 RecordSafepointWithRegisters(
5206 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5207 if (!reg.is(eax)) __ mov(reg, eax);
5209 // Done. Put the value in xmm_scratch into the value of the allocated heap
5212 if (CpuFeatures::IsSupported(SSE2)) {
5213 CpuFeatureScope feature_scope(masm(), SSE2);
5214 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5216 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5218 __ StoreToSafepointRegisterSlot(reg, reg);
5222 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5223 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5225 DeferredNumberTagD(LCodeGen* codegen,
5227 const X87Stack& x87_stack)
5228 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5229 virtual void Generate() V8_OVERRIDE {
5230 codegen()->DoDeferredNumberTagD(instr_);
5232 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5234 LNumberTagD* instr_;
5237 Register reg = ToRegister(instr->result());
5239 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5241 // Put the value to the top of stack
5242 X87Register src = ToX87Register(instr->value());
5243 X87LoadForUsage(src);
5246 DeferredNumberTagD* deferred =
5247 new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5248 if (FLAG_inline_new) {
5249 Register tmp = ToRegister(instr->temp());
5250 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5252 __ jmp(deferred->entry());
5254 __ bind(deferred->exit());
5256 CpuFeatureScope scope(masm(), SSE2);
5257 XMMRegister input_reg = ToDoubleRegister(instr->value());
5258 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5260 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5265 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5266 // TODO(3095996): Get rid of this. For now, we need to make the
5267 // result register contain a valid pointer because it is already
5268 // contained in the register pointer map.
5269 Register reg = ToRegister(instr->result());
5270 __ Set(reg, Immediate(0));
5272 PushSafepointRegistersScope scope(this);
5273 // NumberTagI and NumberTagD use the context from the frame, rather than
5274 // the environment's HContext or HInlinedContext value.
5275 // They only call Runtime::kAllocateHeapNumber.
5276 // The corresponding HChange instructions are added in a phase that does
5277 // not have easy access to the local context.
5278 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5279 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5280 RecordSafepointWithRegisters(
5281 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5282 __ StoreToSafepointRegisterSlot(reg, eax);
5286 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5287 LOperand* input = instr->value();
5288 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5289 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
5290 __ SmiTag(ToRegister(input));
5294 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5295 LOperand* input = instr->value();
5296 Register result = ToRegister(input);
5297 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5298 if (instr->needs_check()) {
5299 __ test(result, Immediate(kSmiTagMask));
5300 DeoptimizeIf(not_zero, instr->environment());
5302 __ AssertSmi(result);
5304 __ SmiUntag(result);
5308 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5310 X87Register res_reg,
5311 bool can_convert_undefined_to_nan,
5312 bool deoptimize_on_minus_zero,
5314 NumberUntagDMode mode) {
5315 Label load_smi, done;
5317 X87PrepareToWrite(res_reg);
5318 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5320 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5322 // Heap number map check.
5323 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5324 factory()->heap_number_map());
5325 if (!can_convert_undefined_to_nan) {
5326 DeoptimizeIf(not_equal, env);
5328 Label heap_number, convert;
5329 __ j(equal, &heap_number, Label::kNear);
5331 // Convert undefined (or hole) to NaN.
5332 __ cmp(input_reg, factory()->undefined_value());
5333 DeoptimizeIf(not_equal, env);
5336 ExternalReference nan =
5337 ExternalReference::address_of_canonical_non_hole_nan();
5338 __ fld_d(Operand::StaticVariable(nan));
5339 __ jmp(&done, Label::kNear);
5341 __ bind(&heap_number);
5343 // Heap number to x87 conversion.
5344 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5345 if (deoptimize_on_minus_zero) {
5348 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5349 __ j(not_zero, &done, Label::kNear);
5351 // Use general purpose registers to check if we have -0.0
5352 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5353 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5354 __ j(zero, &done, Label::kNear);
5356 // Pop FPU stack before deoptimizing.
5358 DeoptimizeIf(not_zero, env);
5360 __ jmp(&done, Label::kNear);
5362 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5366 // Clobbering a temp is faster than re-tagging the
5367 // input register since we avoid dependencies.
5368 __ mov(temp_reg, input_reg);
5369 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5371 __ fild_s(Operand(esp, 0));
5372 __ add(esp, Immediate(kPointerSize));
5374 X87CommitWrite(res_reg);
5378 void LCodeGen::EmitNumberUntagD(Register input_reg,
5380 XMMRegister result_reg,
5381 bool can_convert_undefined_to_nan,
5382 bool deoptimize_on_minus_zero,
5384 NumberUntagDMode mode) {
5385 Label convert, load_smi, done;
5387 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5389 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5391 // Heap number map check.
5392 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5393 factory()->heap_number_map());
5394 if (can_convert_undefined_to_nan) {
5395 __ j(not_equal, &convert, Label::kNear);
5397 DeoptimizeIf(not_equal, env);
5400 // Heap number to XMM conversion.
5401 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5403 if (deoptimize_on_minus_zero) {
5404 XMMRegister xmm_scratch = double_scratch0();
5405 __ xorps(xmm_scratch, xmm_scratch);
5406 __ ucomisd(result_reg, xmm_scratch);
5407 __ j(not_zero, &done, Label::kNear);
5408 __ movmskpd(temp_reg, result_reg);
5409 __ test_b(temp_reg, 1);
5410 DeoptimizeIf(not_zero, env);
5412 __ jmp(&done, Label::kNear);
5414 if (can_convert_undefined_to_nan) {
5417 // Convert undefined (and hole) to NaN.
5418 __ cmp(input_reg, factory()->undefined_value());
5419 DeoptimizeIf(not_equal, env);
5421 ExternalReference nan =
5422 ExternalReference::address_of_canonical_non_hole_nan();
5423 __ movsd(result_reg, Operand::StaticVariable(nan));
5424 __ jmp(&done, Label::kNear);
5427 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5431 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5432 // input register since we avoid dependencies.
5433 __ mov(temp_reg, input_reg);
5434 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5435 __ Cvtsi2sd(result_reg, Operand(temp_reg));
5440 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5441 Register input_reg = ToRegister(instr->value());
5443 if (instr->truncating()) {
5444 Label no_heap_number, check_bools, check_false;
5446 // Heap number map check.
5447 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5448 factory()->heap_number_map());
5449 __ j(not_equal, &no_heap_number, Label::kNear);
5450 __ TruncateHeapNumberToI(input_reg, input_reg);
5453 __ bind(&no_heap_number);
5454 // Check for Oddballs. Undefined/False is converted to zero and True to one
5455 // for truncating conversions.
5456 __ cmp(input_reg, factory()->undefined_value());
5457 __ j(not_equal, &check_bools, Label::kNear);
5458 __ Set(input_reg, Immediate(0));
5461 __ bind(&check_bools);
5462 __ cmp(input_reg, factory()->true_value());
5463 __ j(not_equal, &check_false, Label::kNear);
5464 __ Set(input_reg, Immediate(1));
5467 __ bind(&check_false);
5468 __ cmp(input_reg, factory()->false_value());
5469 __ RecordComment("Deferred TaggedToI: cannot truncate");
5470 DeoptimizeIf(not_equal, instr->environment());
5471 __ Set(input_reg, Immediate(0));
5475 XMMRegister scratch = (instr->temp() != NULL)
5476 ? ToDoubleRegister(instr->temp())
5478 __ TaggedToI(input_reg, input_reg, scratch,
5479 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5482 DeoptimizeIf(no_condition, instr->environment());
5487 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5488 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5490 DeferredTaggedToI(LCodeGen* codegen,
5492 const X87Stack& x87_stack)
5493 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5494 virtual void Generate() V8_OVERRIDE {
5495 codegen()->DoDeferredTaggedToI(instr_, done());
5497 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5502 LOperand* input = instr->value();
5503 ASSERT(input->IsRegister());
5504 Register input_reg = ToRegister(input);
5505 ASSERT(input_reg.is(ToRegister(instr->result())));
5507 if (instr->hydrogen()->value()->representation().IsSmi()) {
5508 __ SmiUntag(input_reg);
5510 DeferredTaggedToI* deferred =
5511 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5513 __ JumpIfNotSmi(input_reg, deferred->entry());
5514 __ SmiUntag(input_reg);
5515 __ bind(deferred->exit());
5520 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5521 LOperand* input = instr->value();
5522 ASSERT(input->IsRegister());
5523 LOperand* temp = instr->temp();
5524 ASSERT(temp->IsRegister());
5525 LOperand* result = instr->result();
5526 ASSERT(result->IsDoubleRegister());
5528 Register input_reg = ToRegister(input);
5529 bool deoptimize_on_minus_zero =
5530 instr->hydrogen()->deoptimize_on_minus_zero();
5531 Register temp_reg = ToRegister(temp);
5533 HValue* value = instr->hydrogen()->value();
5534 NumberUntagDMode mode = value->representation().IsSmi()
5535 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5537 if (CpuFeatures::IsSupported(SSE2)) {
5538 CpuFeatureScope scope(masm(), SSE2);
5539 XMMRegister result_reg = ToDoubleRegister(result);
5540 EmitNumberUntagD(input_reg,
5543 instr->hydrogen()->can_convert_undefined_to_nan(),
5544 deoptimize_on_minus_zero,
5545 instr->environment(),
5548 EmitNumberUntagDNoSSE2(input_reg,
5550 ToX87Register(instr->result()),
5551 instr->hydrogen()->can_convert_undefined_to_nan(),
5552 deoptimize_on_minus_zero,
5553 instr->environment(),
5559 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5560 LOperand* input = instr->value();
5561 ASSERT(input->IsDoubleRegister());
5562 LOperand* result = instr->result();
5563 ASSERT(result->IsRegister());
5564 Register result_reg = ToRegister(result);
5566 if (instr->truncating()) {
5567 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5568 CpuFeatureScope scope(masm(), SSE2);
5569 XMMRegister input_reg = ToDoubleRegister(input);
5570 __ TruncateDoubleToI(result_reg, input_reg);
5572 X87Register input_reg = ToX87Register(input);
5574 __ TruncateX87TOSToI(result_reg);
5577 Label bailout, done;
5578 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5579 CpuFeatureScope scope(masm(), SSE2);
5580 XMMRegister input_reg = ToDoubleRegister(input);
5581 XMMRegister xmm_scratch = double_scratch0();
5582 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5583 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5585 X87Register input_reg = ToX87Register(input);
5587 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5588 &bailout, Label::kNear);
5590 __ jmp(&done, Label::kNear);
5592 DeoptimizeIf(no_condition, instr->environment());
5598 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5599 LOperand* input = instr->value();
5600 ASSERT(input->IsDoubleRegister());
5601 LOperand* result = instr->result();
5602 ASSERT(result->IsRegister());
5603 Register result_reg = ToRegister(result);
5605 Label bailout, done;
5606 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5607 CpuFeatureScope scope(masm(), SSE2);
5608 XMMRegister input_reg = ToDoubleRegister(input);
5609 XMMRegister xmm_scratch = double_scratch0();
5610 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5611 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5613 X87Register input_reg = ToX87Register(input);
5615 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5616 &bailout, Label::kNear);
5618 __ jmp(&done, Label::kNear);
5620 DeoptimizeIf(no_condition, instr->environment());
5623 __ SmiTag(result_reg);
5624 DeoptimizeIf(overflow, instr->environment());
5628 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5629 LOperand* input = instr->value();
5630 __ test(ToOperand(input), Immediate(kSmiTagMask));
5631 DeoptimizeIf(not_zero, instr->environment());
5635 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5636 if (!instr->hydrogen()->value()->IsHeapObject()) {
5637 LOperand* input = instr->value();
5638 __ test(ToOperand(input), Immediate(kSmiTagMask));
5639 DeoptimizeIf(zero, instr->environment());
5644 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5645 Register input = ToRegister(instr->value());
5646 Register temp = ToRegister(instr->temp());
5648 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5650 if (instr->hydrogen()->is_interval_check()) {
5653 instr->hydrogen()->GetCheckInterval(&first, &last);
5655 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5656 static_cast<int8_t>(first));
5658 // If there is only one type in the interval check for equality.
5659 if (first == last) {
5660 DeoptimizeIf(not_equal, instr->environment());
5662 DeoptimizeIf(below, instr->environment());
5663 // Omit check for the last type.
5664 if (last != LAST_TYPE) {
5665 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5666 static_cast<int8_t>(last));
5667 DeoptimizeIf(above, instr->environment());
5673 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5675 if (IsPowerOf2(mask)) {
5676 ASSERT(tag == 0 || IsPowerOf2(tag));
5677 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5678 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5680 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5681 __ and_(temp, mask);
5683 DeoptimizeIf(not_equal, instr->environment());
5689 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5690 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5691 if (instr->hydrogen()->object_in_new_space()) {
5692 Register reg = ToRegister(instr->value());
5693 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5694 __ cmp(reg, Operand::ForCell(cell));
5696 Operand operand = ToOperand(instr->value());
5697 __ cmp(operand, object);
5699 DeoptimizeIf(not_equal, instr->environment());
5703 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5705 PushSafepointRegistersScope scope(this);
5708 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5709 RecordSafepointWithRegisters(
5710 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5712 __ test(eax, Immediate(kSmiTagMask));
5714 DeoptimizeIf(zero, instr->environment());
5718 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5719 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5721 DeferredCheckMaps(LCodeGen* codegen,
5724 const X87Stack& x87_stack)
5725 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5726 SetExit(check_maps());
5728 virtual void Generate() V8_OVERRIDE {
5729 codegen()->DoDeferredInstanceMigration(instr_, object_);
5731 Label* check_maps() { return &check_maps_; }
5732 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5739 if (instr->hydrogen()->CanOmitMapChecks()) return;
5741 LOperand* input = instr->value();
5742 ASSERT(input->IsRegister());
5743 Register reg = ToRegister(input);
5745 DeferredCheckMaps* deferred = NULL;
5746 if (instr->hydrogen()->has_migration_target()) {
5747 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5748 __ bind(deferred->check_maps());
5751 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5753 for (int i = 0; i < map_set.size() - 1; i++) {
5754 Handle<Map> map = map_set.at(i).handle();
5755 __ CompareMap(reg, map);
5756 __ j(equal, &success, Label::kNear);
5759 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5760 __ CompareMap(reg, map);
5761 if (instr->hydrogen()->has_migration_target()) {
5762 __ j(not_equal, deferred->entry());
5764 DeoptimizeIf(not_equal, instr->environment());
5771 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5772 CpuFeatureScope scope(masm(), SSE2);
5773 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5774 XMMRegister xmm_scratch = double_scratch0();
5775 Register result_reg = ToRegister(instr->result());
5776 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5780 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5781 ASSERT(instr->unclamped()->Equals(instr->result()));
5782 Register value_reg = ToRegister(instr->result());
5783 __ ClampUint8(value_reg);
5787 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5788 CpuFeatureScope scope(masm(), SSE2);
5790 ASSERT(instr->unclamped()->Equals(instr->result()));
5791 Register input_reg = ToRegister(instr->unclamped());
5792 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5793 XMMRegister xmm_scratch = double_scratch0();
5794 Label is_smi, done, heap_number;
5796 __ JumpIfSmi(input_reg, &is_smi);
5798 // Check for heap number
5799 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5800 factory()->heap_number_map());
5801 __ j(equal, &heap_number, Label::kNear);
5803 // Check for undefined. Undefined is converted to zero for clamping
5805 __ cmp(input_reg, factory()->undefined_value());
5806 DeoptimizeIf(not_equal, instr->environment());
5807 __ mov(input_reg, 0);
5808 __ jmp(&done, Label::kNear);
5811 __ bind(&heap_number);
5812 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5813 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5814 __ jmp(&done, Label::kNear);
5818 __ SmiUntag(input_reg);
5819 __ ClampUint8(input_reg);
5824 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5825 Register input_reg = ToRegister(instr->unclamped());
5826 Register result_reg = ToRegister(instr->result());
5827 Register scratch = ToRegister(instr->scratch());
5828 Register scratch2 = ToRegister(instr->scratch2());
5829 Register scratch3 = ToRegister(instr->scratch3());
5830 Label is_smi, done, heap_number, valid_exponent,
5831 largest_value, zero_result, maybe_nan_or_infinity;
5833 __ JumpIfSmi(input_reg, &is_smi);
5835 // Check for heap number
5836 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5837 factory()->heap_number_map());
5838 __ j(equal, &heap_number, Label::kNear);
5840 // Check for undefined. Undefined is converted to zero for clamping
5842 __ cmp(input_reg, factory()->undefined_value());
5843 DeoptimizeIf(not_equal, instr->environment());
5844 __ jmp(&zero_result, Label::kNear);
5847 __ bind(&heap_number);
5849 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5850 // faster than the x86 FPU built-in instruction, especially since "banker's
5851 // rounding" would be additionally very expensive
5853 // Get exponent word.
5854 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5855 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5857 // Test for negative values --> clamp to zero
5858 __ test(scratch, scratch);
5859 __ j(negative, &zero_result, Label::kNear);
5861 // Get exponent alone in scratch2.
5862 __ mov(scratch2, scratch);
5863 __ and_(scratch2, HeapNumber::kExponentMask);
5864 __ shr(scratch2, HeapNumber::kExponentShift);
5865 __ j(zero, &zero_result, Label::kNear);
5866 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5867 __ j(negative, &zero_result, Label::kNear);
5869 const uint32_t non_int8_exponent = 7;
5870 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5871 // If the exponent is too big, check for special values.
5872 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5874 __ bind(&valid_exponent);
5875 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5876 // < 7. The shift bias is the number of bits to shift the mantissa such that
5877 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5878 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5880 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5881 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5882 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5883 // top bits of the mantissa.
5884 __ and_(scratch, HeapNumber::kMantissaMask);
5885 // Put back the implicit 1 of the mantissa
5886 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5887 // Shift up to round
5889 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5890 // use the bit in the "ones" place and add it to the "halves" place, which has
5891 // the effect of rounding to even.
5892 __ mov(scratch2, scratch);
5893 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5894 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5895 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5896 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5898 __ j(less, &no_round, Label::kNear);
5900 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5901 __ j(greater, &round_up, Label::kNear);
5902 __ test(scratch3, scratch3);
5903 __ j(not_zero, &round_up, Label::kNear);
5904 __ mov(scratch2, scratch);
5905 __ and_(scratch2, Immediate(1 << one_bit_shift));
5906 __ shr(scratch2, 1);
5908 __ add(scratch, scratch2);
5909 __ j(overflow, &largest_value, Label::kNear);
5911 __ shr(scratch, 23);
5912 __ mov(result_reg, scratch);
5913 __ jmp(&done, Label::kNear);
5915 __ bind(&maybe_nan_or_infinity);
5916 // Check for NaN/Infinity, all other values map to 255
5917 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5918 __ j(not_equal, &largest_value, Label::kNear);
5920 // Check for NaN, which differs from Infinity in that at least one mantissa
5922 __ and_(scratch, HeapNumber::kMantissaMask);
5923 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5924 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5925 // Infinity -> Fall through to map to 255.
5927 __ bind(&largest_value);
5928 __ mov(result_reg, Immediate(255));
5929 __ jmp(&done, Label::kNear);
5931 __ bind(&zero_result);
5932 __ xor_(result_reg, result_reg);
5933 __ jmp(&done, Label::kNear);
5937 if (!input_reg.is(result_reg)) {
5938 __ mov(result_reg, input_reg);
5940 __ SmiUntag(result_reg);
5941 __ ClampUint8(result_reg);
5946 void LCodeGen::DoAllocate(LAllocate* instr) {
5947 class DeferredAllocate V8_FINAL : public LDeferredCode {
5949 DeferredAllocate(LCodeGen* codegen,
5951 const X87Stack& x87_stack)
5952 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5953 virtual void Generate() V8_OVERRIDE {
5954 codegen()->DoDeferredAllocate(instr_);
5956 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5961 DeferredAllocate* deferred =
5962 new(zone()) DeferredAllocate(this, instr, x87_stack_);
5964 Register result = ToRegister(instr->result());
5965 Register temp = ToRegister(instr->temp());
5967 // Allocate memory for the object.
5968 AllocationFlags flags = TAG_OBJECT;
5969 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5970 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5972 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5973 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5974 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5975 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5976 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5977 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5978 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5981 if (instr->size()->IsConstantOperand()) {
5982 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5983 if (size <= Page::kMaxRegularHeapObjectSize) {
5984 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5986 __ jmp(deferred->entry());
5989 Register size = ToRegister(instr->size());
5990 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5993 __ bind(deferred->exit());
5995 if (instr->hydrogen()->MustPrefillWithFiller()) {
5996 if (instr->size()->IsConstantOperand()) {
5997 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5998 __ mov(temp, (size / kPointerSize) - 1);
6000 temp = ToRegister(instr->size());
6001 __ shr(temp, kPointerSizeLog2);
6006 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
6007 isolate()->factory()->one_pointer_filler_map());
6009 __ j(not_zero, &loop);
6014 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6015 Register result = ToRegister(instr->result());
6017 // TODO(3095996): Get rid of this. For now, we need to make the
6018 // result register contain a valid pointer because it is already
6019 // contained in the register pointer map.
6020 __ Set(result, Immediate(Smi::FromInt(0)));
6022 PushSafepointRegistersScope scope(this);
6023 if (instr->size()->IsRegister()) {
6024 Register size = ToRegister(instr->size());
6025 ASSERT(!size.is(result));
6026 __ SmiTag(ToRegister(instr->size()));
6029 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6030 __ push(Immediate(Smi::FromInt(size)));
6033 int flags = AllocateDoubleAlignFlag::encode(
6034 instr->hydrogen()->MustAllocateDoubleAligned());
6035 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6036 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6037 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6038 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6039 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6040 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6041 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6043 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6045 __ push(Immediate(Smi::FromInt(flags)));
6047 CallRuntimeFromDeferred(
6048 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
6049 __ StoreToSafepointRegisterSlot(result, eax);
6053 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6054 ASSERT(ToRegister(instr->value()).is(eax));
6056 CallRuntime(Runtime::kToFastProperties, 1, instr);
6060 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6061 ASSERT(ToRegister(instr->context()).is(esi));
6063 // Registers will be used as follows:
6064 // ecx = literals array.
6065 // ebx = regexp literal.
6066 // eax = regexp literal clone.
6068 int literal_offset =
6069 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6070 __ LoadHeapObject(ecx, instr->hydrogen()->literals());
6071 __ mov(ebx, FieldOperand(ecx, literal_offset));
6072 __ cmp(ebx, factory()->undefined_value());
6073 __ j(not_equal, &materialized, Label::kNear);
6075 // Create regexp literal using runtime function
6076 // Result will be in eax.
6078 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
6079 __ push(Immediate(instr->hydrogen()->pattern()));
6080 __ push(Immediate(instr->hydrogen()->flags()));
6081 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
6084 __ bind(&materialized);
6085 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6086 Label allocated, runtime_allocate;
6087 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
6088 __ jmp(&allocated, Label::kNear);
6090 __ bind(&runtime_allocate);
6092 __ push(Immediate(Smi::FromInt(size)));
6093 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
6096 __ bind(&allocated);
6097 // Copy the content into the newly allocated memory.
6098 // (Unroll copy loop once for better throughput).
6099 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6100 __ mov(edx, FieldOperand(ebx, i));
6101 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
6102 __ mov(FieldOperand(eax, i), edx);
6103 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
6105 if ((size % (2 * kPointerSize)) != 0) {
6106 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
6107 __ mov(FieldOperand(eax, size - kPointerSize), edx);
6112 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6113 ASSERT(ToRegister(instr->context()).is(esi));
6114 // Use the fast case closure allocation code that allocates in new
6115 // space for nested functions that don't need literals cloning.
6116 bool pretenure = instr->hydrogen()->pretenure();
6117 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6118 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
6119 instr->hydrogen()->is_generator());
6120 __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
6121 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6124 __ push(Immediate(instr->hydrogen()->shared_info()));
6125 __ push(Immediate(pretenure ? factory()->true_value()
6126 : factory()->false_value()));
6127 CallRuntime(Runtime::kNewClosure, 3, instr);
6132 void LCodeGen::DoTypeof(LTypeof* instr) {
6133 ASSERT(ToRegister(instr->context()).is(esi));
6134 LOperand* input = instr->value();
6135 EmitPushTaggedOperand(input);
6136 CallRuntime(Runtime::kTypeof, 1, instr);
6140 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6141 Register input = ToRegister(instr->value());
6142 Condition final_branch_condition = EmitTypeofIs(instr, input);
6143 if (final_branch_condition != no_condition) {
6144 EmitBranch(instr, final_branch_condition);
6149 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6150 Label* true_label = instr->TrueLabel(chunk_);
6151 Label* false_label = instr->FalseLabel(chunk_);
6152 Handle<String> type_name = instr->type_literal();
6153 int left_block = instr->TrueDestination(chunk_);
6154 int right_block = instr->FalseDestination(chunk_);
6155 int next_block = GetNextEmittedBlock();
6157 Label::Distance true_distance = left_block == next_block ? Label::kNear
6159 Label::Distance false_distance = right_block == next_block ? Label::kNear
6161 Condition final_branch_condition = no_condition;
6162 if (type_name->Equals(heap()->number_string())) {
6163 __ JumpIfSmi(input, true_label, true_distance);
6164 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
6165 factory()->heap_number_map());
6166 final_branch_condition = equal;
6168 } else if (type_name->Equals(heap()->float32x4_string())) {
6169 __ JumpIfSmi(input, false_label, false_distance);
6170 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6171 final_branch_condition = equal;
6173 } else if (type_name->Equals(heap()->int32x4_string())) {
6174 __ JumpIfSmi(input, false_label, false_distance);
6175 __ CmpObjectType(input, INT32x4_TYPE, input);
6176 final_branch_condition = equal;
6178 } else if (type_name->Equals(heap()->string_string())) {
6179 __ JumpIfSmi(input, false_label, false_distance);
6180 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6181 __ j(above_equal, false_label, false_distance);
6182 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6183 1 << Map::kIsUndetectable);
6184 final_branch_condition = zero;
6186 } else if (type_name->Equals(heap()->symbol_string())) {
6187 __ JumpIfSmi(input, false_label, false_distance);
6188 __ CmpObjectType(input, SYMBOL_TYPE, input);
6189 final_branch_condition = equal;
6191 } else if (type_name->Equals(heap()->boolean_string())) {
6192 __ cmp(input, factory()->true_value());
6193 __ j(equal, true_label, true_distance);
6194 __ cmp(input, factory()->false_value());
6195 final_branch_condition = equal;
6197 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6198 __ cmp(input, factory()->null_value());
6199 final_branch_condition = equal;
6201 } else if (type_name->Equals(heap()->undefined_string())) {
6202 __ cmp(input, factory()->undefined_value());
6203 __ j(equal, true_label, true_distance);
6204 __ JumpIfSmi(input, false_label, false_distance);
6205 // Check for undetectable objects => true.
6206 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
6207 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6208 1 << Map::kIsUndetectable);
6209 final_branch_condition = not_zero;
6211 } else if (type_name->Equals(heap()->function_string())) {
6212 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6213 __ JumpIfSmi(input, false_label, false_distance);
6214 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6215 __ j(equal, true_label, true_distance);
6216 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6217 final_branch_condition = equal;
6219 } else if (type_name->Equals(heap()->object_string())) {
6220 __ JumpIfSmi(input, false_label, false_distance);
6221 if (!FLAG_harmony_typeof) {
6222 __ cmp(input, factory()->null_value());
6223 __ j(equal, true_label, true_distance);
6225 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6226 __ j(below, false_label, false_distance);
6227 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6228 __ j(above, false_label, false_distance);
6229 // Check for undetectable objects => false.
6230 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6231 1 << Map::kIsUndetectable);
6232 final_branch_condition = zero;
6235 __ jmp(false_label, false_distance);
6237 return final_branch_condition;
6241 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6242 Register temp = ToRegister(instr->temp());
6244 EmitIsConstructCall(temp);
6245 EmitBranch(instr, equal);
6249 void LCodeGen::EmitIsConstructCall(Register temp) {
6250 // Get the frame pointer for the calling frame.
6251 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6253 // Skip the arguments adaptor frame if it exists.
6254 Label check_frame_marker;
6255 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6256 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6257 __ j(not_equal, &check_frame_marker, Label::kNear);
6258 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6260 // Check the marker in the calling frame.
6261 __ bind(&check_frame_marker);
6262 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6263 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6267 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6268 if (!info()->IsStub()) {
6269 // Ensure that we have enough space after the previous lazy-bailout
6270 // instruction for patching the code here.
6271 int current_pc = masm()->pc_offset();
6272 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6273 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6274 __ Nop(padding_size);
6277 last_lazy_deopt_pc_ = masm()->pc_offset();
6281 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6282 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6283 ASSERT(instr->HasEnvironment());
6284 LEnvironment* env = instr->environment();
6285 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6286 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6290 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6291 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6292 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6293 // needed return address), even though the implementation of LAZY and EAGER is
6294 // now identical. When LAZY is eventually completely folded into EAGER, remove
6295 // the special case below.
6296 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6297 type = Deoptimizer::LAZY;
6299 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6300 DeoptimizeIf(no_condition, instr->environment(), type);
6304 void LCodeGen::DoDummy(LDummy* instr) {
6305 // Nothing to see here, move on!
6309 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6310 // Nothing to see here, move on!
6314 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6315 PushSafepointRegistersScope scope(this);
6316 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6317 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6318 RecordSafepointWithLazyDeopt(
6319 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6320 ASSERT(instr->HasEnvironment());
6321 LEnvironment* env = instr->environment();
6322 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6326 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6327 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6329 DeferredStackCheck(LCodeGen* codegen,
6331 const X87Stack& x87_stack)
6332 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6333 virtual void Generate() V8_OVERRIDE {
6334 codegen()->DoDeferredStackCheck(instr_);
6336 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6338 LStackCheck* instr_;
6341 ASSERT(instr->HasEnvironment());
6342 LEnvironment* env = instr->environment();
6343 // There is no LLazyBailout instruction for stack-checks. We have to
6344 // prepare for lazy deoptimization explicitly here.
6345 if (instr->hydrogen()->is_function_entry()) {
6346 // Perform stack overflow check.
6348 ExternalReference stack_limit =
6349 ExternalReference::address_of_stack_limit(isolate());
6350 __ cmp(esp, Operand::StaticVariable(stack_limit));
6351 __ j(above_equal, &done, Label::kNear);
6353 ASSERT(instr->context()->IsRegister());
6354 ASSERT(ToRegister(instr->context()).is(esi));
6355 CallCode(isolate()->builtins()->StackCheck(),
6356 RelocInfo::CODE_TARGET,
6358 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6360 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6361 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6363 ASSERT(instr->hydrogen()->is_backwards_branch());
6364 // Perform stack overflow check if this goto needs it before jumping.
6365 DeferredStackCheck* deferred_stack_check =
6366 new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6367 ExternalReference stack_limit =
6368 ExternalReference::address_of_stack_limit(isolate());
6369 __ cmp(esp, Operand::StaticVariable(stack_limit));
6370 __ j(below, deferred_stack_check->entry());
6371 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6372 __ bind(instr->done_label());
6373 deferred_stack_check->SetExit(instr->done_label());
6374 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6375 // Don't record a deoptimization index for the safepoint here.
6376 // This will be done explicitly when emitting call and the safepoint in
6377 // the deferred code.
6382 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6383 // This is a pseudo-instruction that ensures that the environment here is
6384 // properly registered for deoptimization and records the assembler's PC
6386 LEnvironment* environment = instr->environment();
6388 // If the environment were already registered, we would have no way of
6389 // backpatching it with the spill slot operands.
6390 ASSERT(!environment->HasBeenRegistered());
6391 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6393 GenerateOsrPrologue();
6397 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6398 ASSERT(ToRegister(instr->context()).is(esi));
6399 __ cmp(eax, isolate()->factory()->undefined_value());
6400 DeoptimizeIf(equal, instr->environment());
6402 __ cmp(eax, isolate()->factory()->null_value());
6403 DeoptimizeIf(equal, instr->environment());
6405 __ test(eax, Immediate(kSmiTagMask));
6406 DeoptimizeIf(zero, instr->environment());
6408 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6409 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6410 DeoptimizeIf(below_equal, instr->environment());
6412 Label use_cache, call_runtime;
6413 __ CheckEnumCache(&call_runtime);
6415 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
6416 __ jmp(&use_cache, Label::kNear);
6418 // Get the set of properties to enumerate.
6419 __ bind(&call_runtime);
6421 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6423 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6424 isolate()->factory()->meta_map());
6425 DeoptimizeIf(not_equal, instr->environment());
6426 __ bind(&use_cache);
6430 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6431 Register map = ToRegister(instr->map());
6432 Register result = ToRegister(instr->result());
6433 Label load_cache, done;
6434 __ EnumLength(result, map);
6435 __ cmp(result, Immediate(Smi::FromInt(0)));
6436 __ j(not_equal, &load_cache, Label::kNear);
6437 __ mov(result, isolate()->factory()->empty_fixed_array());
6438 __ jmp(&done, Label::kNear);
6440 __ bind(&load_cache);
6441 __ LoadInstanceDescriptors(map, result);
6443 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6445 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6447 __ test(result, result);
6448 DeoptimizeIf(equal, instr->environment());
6452 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6453 Register object = ToRegister(instr->value());
6454 __ cmp(ToRegister(instr->map()),
6455 FieldOperand(object, HeapObject::kMapOffset));
6456 DeoptimizeIf(not_equal, instr->environment());
6460 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6461 Register object = ToRegister(instr->object());
6462 Register index = ToRegister(instr->index());
6464 Label out_of_object, done;
6465 __ cmp(index, Immediate(0));
6466 __ j(less, &out_of_object, Label::kNear);
6467 __ mov(object, FieldOperand(object,
6469 times_half_pointer_size,
6470 JSObject::kHeaderSize));
6471 __ jmp(&done, Label::kNear);
6473 __ bind(&out_of_object);
6474 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6476 // Index is now equal to out of object property index plus 1.
6477 __ mov(object, FieldOperand(object,
6479 times_half_pointer_size,
6480 FixedArray::kHeaderSize - kPointerSize));
6486 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
6487 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
6489 DeferredSIMD128ToTagged(LCodeGen* codegen,
6490 LInstruction* instr,
6491 Runtime::FunctionId id,
6492 const X87Stack& x87_stack)
6493 : LDeferredCode(codegen, x87_stack), instr_(instr), id_(id) { }
6494 virtual void Generate() V8_OVERRIDE {
6495 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
6497 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6499 LInstruction* instr_;
6500 Runtime::FunctionId id_;
6503 CpuFeatureScope scope(masm(), SSE2);
6504 XMMRegister input_reg = ToSIMD128Register(instr->value());
6505 Register reg = ToRegister(instr->result());
6506 Register tmp = ToRegister(instr->temp());
6508 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
6509 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()),
6511 if (FLAG_inline_new) {
6512 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
6513 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
6515 __ jmp(deferred->entry());
6517 __ bind(deferred->exit());
6518 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
6522 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
6523 if (instr->value()->IsFloat32x4Register()) {
6524 HandleSIMD128ToTagged<Float32x4>(instr);
6526 ASSERT(instr->value()->IsInt32x4Register());
6527 HandleSIMD128ToTagged<Int32x4>(instr);
6533 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
6534 LOperand* input = instr->value();
6535 ASSERT(input->IsRegister());
6536 LOperand* result = instr->result();
6537 ASSERT(result->IsSIMD128Register());
6539 Register input_reg = ToRegister(input);
6540 Register temp_reg = ToRegister(instr->temp());
6541 XMMRegister result_reg = ToSIMD128Register(result);
6543 CpuFeatureScope scope(masm(), SSE2);
6544 __ test(input_reg, Immediate(kSmiTagMask));
6545 DeoptimizeIf(zero, instr->environment());
6546 __ CmpObjectType(input_reg, T::kInstanceType, temp_reg);
6547 DeoptimizeIf(not_equal, instr->environment());
6548 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
6552 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
6553 if (instr->representation().IsFloat32x4()) {
6554 HandleTaggedToSIMD128<Float32x4>(instr);
6556 ASSERT(instr->representation().IsInt32x4());
6557 HandleTaggedToSIMD128<Int32x4>(instr);
6562 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
6563 CpuFeatureScope scope(masm(), SSE2);
6564 switch (instr->op()) {
6565 case kFloat32x4Zero: {
6566 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6567 __ xorps(result_reg, result_reg);
6570 case kInt32x4Zero: {
6571 XMMRegister result_reg = ToInt32x4Register(instr->result());
6572 __ xorps(result_reg, result_reg);
6582 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
6583 CpuFeatureScope scope(masm(), SSE2);
6585 switch (instr->op()) {
6586 case kSIMD128Change: {
6587 Comment(";;; deoptimize: can not perform representation change"
6588 "for float32x4 or int32x4");
6589 DeoptimizeIf(no_condition, instr->environment());
6594 case kFloat32x4Reciprocal:
6595 case kFloat32x4ReciprocalSqrt:
6596 case kFloat32x4Sqrt: {
6597 ASSERT(instr->value()->Equals(instr->result()));
6598 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6599 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6600 switch (instr->op()) {
6602 __ absps(input_reg);
6605 __ negateps(input_reg);
6607 case kFloat32x4Reciprocal:
6608 __ rcpps(input_reg, input_reg);
6610 case kFloat32x4ReciprocalSqrt:
6611 __ rsqrtps(input_reg, input_reg);
6613 case kFloat32x4Sqrt:
6614 __ sqrtps(input_reg, input_reg);
6624 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6625 XMMRegister input_reg = ToInt32x4Register(instr->value());
6626 switch (instr->op()) {
6628 __ notps(input_reg);
6631 __ pnegd(input_reg);
6639 case kFloat32x4BitsToInt32x4:
6640 case kFloat32x4ToInt32x4: {
6641 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6642 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6643 XMMRegister result_reg = ToInt32x4Register(instr->result());
6644 if (instr->op() == kFloat32x4BitsToInt32x4) {
6645 if (!result_reg.is(input_reg)) {
6646 __ movaps(result_reg, input_reg);
6649 ASSERT(instr->op() == kFloat32x4ToInt32x4);
6650 __ cvtps2dq(result_reg, input_reg);
6654 case kInt32x4BitsToFloat32x4:
6655 case kInt32x4ToFloat32x4: {
6656 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6657 XMMRegister input_reg = ToInt32x4Register(instr->value());
6658 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6659 if (instr->op() == kInt32x4BitsToFloat32x4) {
6660 if (!result_reg.is(input_reg)) {
6661 __ movaps(result_reg, input_reg);
6664 ASSERT(instr->op() == kInt32x4ToFloat32x4);
6665 __ cvtdq2ps(result_reg, input_reg);
6669 case kFloat32x4Splat: {
6670 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
6671 XMMRegister input_reg = ToDoubleRegister(instr->value());
6672 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6673 XMMRegister xmm_scratch = xmm0;
6674 __ xorps(xmm_scratch, xmm_scratch);
6675 __ cvtsd2ss(xmm_scratch, input_reg);
6676 __ shufps(xmm_scratch, xmm_scratch, 0x0);
6677 __ movaps(result_reg, xmm_scratch);
6680 case kInt32x4Splat: {
6681 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
6682 Register input_reg = ToRegister(instr->value());
6683 XMMRegister result_reg = ToInt32x4Register(instr->result());
6684 __ movd(result_reg, input_reg);
6685 __ shufps(result_reg, result_reg, 0x0);
6688 case kInt32x4GetSignMask: {
6689 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6690 XMMRegister input_reg = ToInt32x4Register(instr->value());
6691 Register result = ToRegister(instr->result());
6692 __ movmskps(result, input_reg);
6695 case kFloat32x4GetSignMask: {
6696 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6697 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6698 Register result = ToRegister(instr->result());
6699 __ movmskps(result, input_reg);
6702 case kFloat32x4GetW:
6704 case kFloat32x4GetZ:
6706 case kFloat32x4GetY:
6708 case kFloat32x4GetX: {
6709 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6710 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6711 XMMRegister result = ToDoubleRegister(instr->result());
6712 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
6714 if (select == 0x0) {
6715 __ xorps(xmm_scratch, xmm_scratch);
6716 __ cvtss2sd(xmm_scratch, input_reg);
6717 if (!xmm_scratch.is(result)) {
6718 __ movaps(result, xmm_scratch);
6721 __ pshufd(xmm_scratch, input_reg, select);
6722 if (!xmm_scratch.is(result)) {
6723 __ xorps(result, result);
6725 __ cvtss2sd(result, xmm_scratch);
6733 case kInt32x4GetFlagX:
6734 case kInt32x4GetFlagY:
6735 case kInt32x4GetFlagZ:
6736 case kInt32x4GetFlagW: {
6737 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6739 switch (instr->op()) {
6740 case kInt32x4GetFlagX:
6744 case kInt32x4GetFlagY:
6749 case kInt32x4GetFlagZ:
6754 case kInt32x4GetFlagW:
6763 XMMRegister input_reg = ToInt32x4Register(instr->value());
6764 Register result = ToRegister(instr->result());
6765 if (select == 0x0) {
6766 __ movd(result, input_reg);
6768 if (CpuFeatures::IsSupported(SSE4_1)) {
6769 CpuFeatureScope scope(masm(), SSE4_1);
6770 __ extractps(result, input_reg, select);
6772 XMMRegister xmm_scratch = xmm0;
6773 __ pshufd(xmm_scratch, input_reg, select);
6774 __ movd(result, xmm_scratch);
6779 Label false_value, done;
6780 __ test(result, result);
6781 __ j(zero, &false_value, Label::kNear);
6782 __ LoadRoot(result, Heap::kTrueValueRootIndex);
6783 __ jmp(&done, Label::kNear);
6784 __ bind(&false_value);
6785 __ LoadRoot(result, Heap::kFalseValueRootIndex);
6797 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
6798 CpuFeatureScope scope(masm(), SSE2);
6799 uint8_t imm8 = 0; // for with operation
6800 switch (instr->op()) {
6806 case kFloat32x4Max: {
6807 ASSERT(instr->left()->Equals(instr->result()));
6808 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6809 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
6810 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6811 XMMRegister right_reg = ToFloat32x4Register(instr->right());
6812 switch (instr->op()) {
6814 __ addps(left_reg, right_reg);
6817 __ subps(left_reg, right_reg);
6820 __ mulps(left_reg, right_reg);
6823 __ divps(left_reg, right_reg);
6826 __ minps(left_reg, right_reg);
6829 __ maxps(left_reg, right_reg);
6837 case kFloat32x4Scale: {
6838 ASSERT(instr->left()->Equals(instr->result()));
6839 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6840 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
6841 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6842 XMMRegister right_reg = ToDoubleRegister(instr->right());
6843 XMMRegister scratch_reg = xmm0;
6844 __ xorps(scratch_reg, scratch_reg);
6845 __ cvtsd2ss(scratch_reg, right_reg);
6846 __ shufps(scratch_reg, scratch_reg, 0x0);
6847 __ mulps(left_reg, scratch_reg);
6850 case kFloat32x4Shuffle: {
6851 ASSERT(instr->left()->Equals(instr->result()));
6852 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6853 if (instr->hydrogen()->right()->IsConstant() &&
6854 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6855 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6856 uint8_t select = static_cast<uint8_t>(value & 0xFF);
6857 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6858 __ shufps(left_reg, left_reg, select);
6861 Comment(";;; deoptimize: non-constant selector for shuffle");
6862 DeoptimizeIf(no_condition, instr->environment());
6866 case kInt32x4Shuffle: {
6867 ASSERT(instr->left()->Equals(instr->result()));
6868 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
6869 if (instr->hydrogen()->right()->IsConstant() &&
6870 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6871 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6872 uint8_t select = static_cast<uint8_t>(value & 0xFF);
6873 XMMRegister left_reg = ToInt32x4Register(instr->left());
6874 __ pshufd(left_reg, left_reg, select);
6877 Comment(";;; deoptimize: non-constant selector for shuffle");
6878 DeoptimizeIf(no_condition, instr->environment());
6882 case kInt32x4ShiftLeft:
6883 case kInt32x4ShiftRight:
6884 case kInt32x4ShiftRightArithmetic: {
6885 ASSERT(instr->left()->Equals(instr->result()));
6886 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
6887 if (instr->hydrogen()->right()->IsConstant() &&
6888 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6889 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6890 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
6891 XMMRegister left_reg = ToInt32x4Register(instr->left());
6892 switch (instr->op()) {
6893 case kInt32x4ShiftLeft:
6894 __ pslld(left_reg, shift);
6896 case kInt32x4ShiftRight:
6897 __ psrld(left_reg, shift);
6899 case kInt32x4ShiftRightArithmetic:
6900 __ psrad(left_reg, shift);
6907 XMMRegister left_reg = ToInt32x4Register(instr->left());
6908 Register shift = ToRegister(instr->right());
6909 XMMRegister xmm_scratch = double_scratch0();
6910 __ movd(xmm_scratch, shift);
6911 switch (instr->op()) {
6912 case kInt32x4ShiftLeft:
6913 __ pslld(left_reg, xmm_scratch);
6915 case kInt32x4ShiftRight:
6916 __ psrld(left_reg, xmm_scratch);
6918 case kInt32x4ShiftRightArithmetic:
6919 __ psrad(left_reg, xmm_scratch);
6927 case kFloat32x4LessThan:
6928 case kFloat32x4LessThanOrEqual:
6929 case kFloat32x4Equal:
6930 case kFloat32x4NotEqual:
6931 case kFloat32x4GreaterThanOrEqual:
6932 case kFloat32x4GreaterThan: {
6933 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6934 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
6935 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6936 XMMRegister right_reg = ToFloat32x4Register(instr->right());
6937 XMMRegister result_reg = ToInt32x4Register(instr->result());
6938 switch (instr->op()) {
6939 case kFloat32x4LessThan:
6940 if (result_reg.is(left_reg)) {
6941 __ cmpltps(result_reg, right_reg);
6942 } else if (result_reg.is(right_reg)) {
6943 __ cmpnltps(result_reg, left_reg);
6945 __ movaps(result_reg, left_reg);
6946 __ cmpltps(result_reg, right_reg);
6949 case kFloat32x4LessThanOrEqual:
6950 if (result_reg.is(left_reg)) {
6951 __ cmpleps(result_reg, right_reg);
6952 } else if (result_reg.is(right_reg)) {
6953 __ cmpnleps(result_reg, left_reg);
6955 __ movaps(result_reg, left_reg);
6956 __ cmpleps(result_reg, right_reg);
6959 case kFloat32x4Equal:
6960 if (result_reg.is(left_reg)) {
6961 __ cmpeqps(result_reg, right_reg);
6962 } else if (result_reg.is(right_reg)) {
6963 __ cmpeqps(result_reg, left_reg);
6965 __ movaps(result_reg, left_reg);
6966 __ cmpeqps(result_reg, right_reg);
6969 case kFloat32x4NotEqual:
6970 if (result_reg.is(left_reg)) {
6971 __ cmpneqps(result_reg, right_reg);
6972 } else if (result_reg.is(right_reg)) {
6973 __ cmpneqps(result_reg, left_reg);
6975 __ movaps(result_reg, left_reg);
6976 __ cmpneqps(result_reg, right_reg);
6979 case kFloat32x4GreaterThanOrEqual:
6980 if (result_reg.is(left_reg)) {
6981 __ cmpnltps(result_reg, right_reg);
6982 } else if (result_reg.is(right_reg)) {
6983 __ cmpltps(result_reg, left_reg);
6985 __ movaps(result_reg, left_reg);
6986 __ cmpnltps(result_reg, right_reg);
6989 case kFloat32x4GreaterThan:
6990 if (result_reg.is(left_reg)) {
6991 __ cmpnleps(result_reg, right_reg);
6992 } else if (result_reg.is(right_reg)) {
6993 __ cmpleps(result_reg, left_reg);
6995 __ movaps(result_reg, left_reg);
6996 __ cmpnleps(result_reg, right_reg);
7011 case kInt32x4GreaterThan:
7013 case kInt32x4LessThan: {
7014 ASSERT(instr->left()->Equals(instr->result()));
7015 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7016 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
7017 XMMRegister left_reg = ToInt32x4Register(instr->left());
7018 XMMRegister right_reg = ToInt32x4Register(instr->right());
7019 switch (instr->op()) {
7021 __ andps(left_reg, right_reg);
7024 __ orps(left_reg, right_reg);
7027 __ xorps(left_reg, right_reg);
7030 __ paddd(left_reg, right_reg);
7033 __ psubd(left_reg, right_reg);
7036 if (CpuFeatures::IsSupported(SSE4_1)) {
7037 CpuFeatureScope scope(masm(), SSE4_1);
7038 __ pmulld(left_reg, right_reg);
7040 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
7041 XMMRegister xmm_scratch = xmm0;
7042 __ movaps(xmm_scratch, left_reg);
7043 __ pmuludq(left_reg, right_reg);
7044 __ psrlq(xmm_scratch, 4);
7045 __ psrlq(right_reg, 4);
7046 __ pmuludq(xmm_scratch, right_reg);
7047 __ pshufd(left_reg, left_reg, 8);
7048 __ pshufd(xmm_scratch, xmm_scratch, 8);
7049 __ punpackldq(left_reg, xmm_scratch);
7052 case kInt32x4GreaterThan:
7053 __ pcmpgtd(left_reg, right_reg);
7056 __ pcmpeqd(left_reg, right_reg);
7058 case kInt32x4LessThan: {
7059 XMMRegister xmm_scratch = xmm0;
7060 __ movaps(xmm_scratch, right_reg);
7061 __ pcmpgtd(xmm_scratch, left_reg);
7062 __ movaps(left_reg, xmm_scratch);
7071 case kFloat32x4WithW:
7073 case kFloat32x4WithZ:
7075 case kFloat32x4WithY:
7077 case kFloat32x4WithX: {
7078 ASSERT(instr->left()->Equals(instr->result()));
7079 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
7080 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
7081 XMMRegister left_reg = ToFloat32x4Register(instr->left());
7082 XMMRegister right_reg = ToDoubleRegister(instr->right());
7083 XMMRegister xmm_scratch = xmm0;
7084 __ xorps(xmm_scratch, xmm_scratch);
7085 __ cvtsd2ss(xmm_scratch, right_reg);
7086 if (CpuFeatures::IsSupported(SSE4_1)) {
7088 CpuFeatureScope scope(masm(), SSE4_1);
7089 __ insertps(left_reg, xmm_scratch, imm8);
7091 __ sub(esp, Immediate(kFloat32x4Size));
7092 __ movups(Operand(esp, 0), left_reg);
7093 __ movss(Operand(esp, imm8 * kFloatSize), xmm_scratch);
7094 __ movups(left_reg, Operand(esp, 0));
7095 __ add(esp, Immediate(kFloat32x4Size));
7105 case kInt32x4WithX: {
7106 ASSERT(instr->left()->Equals(instr->result()));
7107 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7108 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
7109 XMMRegister left_reg = ToInt32x4Register(instr->left());
7110 Register right_reg = ToRegister(instr->right());
7111 if (CpuFeatures::IsSupported(SSE4_1)) {
7112 CpuFeatureScope scope(masm(), SSE4_1);
7113 __ pinsrd(left_reg, right_reg, imm8);
7115 __ sub(esp, Immediate(kInt32x4Size));
7116 __ movdqu(Operand(esp, 0), left_reg);
7117 __ mov(Operand(esp, imm8 * kFloatSize), right_reg);
7118 __ movdqu(left_reg, Operand(esp, 0));
7119 __ add(esp, Immediate(kInt32x4Size));
7123 case kInt32x4WithFlagW:
7125 case kInt32x4WithFlagZ:
7127 case kInt32x4WithFlagY:
7129 case kInt32x4WithFlagX: {
7130 ASSERT(instr->left()->Equals(instr->result()));
7131 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7132 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
7133 HType type = instr->hydrogen()->right()->type();
7134 XMMRegister left_reg = ToInt32x4Register(instr->left());
7135 Register right_reg = ToRegister(instr->right());
7136 Label load_false_value, done;
7137 if (type.IsBoolean()) {
7138 __ sub(esp, Immediate(kInt32x4Size));
7139 __ movups(Operand(esp, 0), left_reg);
7140 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
7141 __ j(not_equal, &load_false_value, Label::kNear);
7143 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
7144 DeoptimizeIf(no_condition, instr->environment());
7148 __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
7149 __ jmp(&done, Label::kNear);
7150 __ bind(&load_false_value);
7151 __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0x0));
7153 __ movups(left_reg, Operand(esp, 0));
7154 __ add(esp, Immediate(kInt32x4Size));
7164 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
7165 CpuFeatureScope scope(masm(), SSE2);
7166 switch (instr->op()) {
7167 case kInt32x4Select: {
7168 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
7169 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7170 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
7172 XMMRegister mask_reg = ToInt32x4Register(instr->first());
7173 XMMRegister left_reg = ToFloat32x4Register(instr->second());
7174 XMMRegister right_reg = ToFloat32x4Register(instr->third());
7175 XMMRegister result_reg = ToFloat32x4Register(instr->result());
7176 XMMRegister temp_reg = xmm0;
7179 __ movaps(temp_reg, mask_reg);
7182 // temp_reg = temp_reg & falseValue.
7183 __ andps(temp_reg, right_reg);
7185 if (!result_reg.is(mask_reg)) {
7186 if (result_reg.is(left_reg)) {
7187 // result_reg = result_reg & trueValue.
7188 __ andps(result_reg, mask_reg);
7189 // out = result_reg | temp_reg.
7190 __ orps(result_reg, temp_reg);
7192 __ movaps(result_reg, mask_reg);
7193 // result_reg = result_reg & trueValue.
7194 __ andps(result_reg, left_reg);
7195 // out = result_reg | temp_reg.
7196 __ orps(result_reg, temp_reg);
7199 // result_reg = result_reg & trueValue.
7200 __ andps(result_reg, left_reg);
7201 // out = result_reg | temp_reg.
7202 __ orps(result_reg, temp_reg);
7206 case kFloat32x4ShuffleMix: {
7207 ASSERT(instr->first()->Equals(instr->result()));
7208 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
7209 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7210 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
7211 if (instr->hydrogen()->third()->IsConstant() &&
7212 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
7213 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
7214 uint8_t select = static_cast<uint8_t>(value & 0xFF);
7215 XMMRegister first_reg = ToFloat32x4Register(instr->first());
7216 XMMRegister second_reg = ToFloat32x4Register(instr->second());
7217 __ shufps(first_reg, second_reg, select);
7220 Comment(";;; deoptimize: non-constant selector for shuffle");
7221 DeoptimizeIf(no_condition, instr->environment());
7225 case kFloat32x4Clamp: {
7226 ASSERT(instr->first()->Equals(instr->result()));
7227 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
7228 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7229 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
7231 XMMRegister value_reg = ToFloat32x4Register(instr->first());
7232 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
7233 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
7234 __ minps(value_reg, upper_reg);
7235 __ maxps(value_reg, lower_reg);
7245 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
7246 CpuFeatureScope scope(masm(), SSE2);
7247 switch (instr->op()) {
7248 case kFloat32x4Constructor: {
7249 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
7250 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
7251 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
7252 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
7253 XMMRegister x_reg = ToDoubleRegister(instr->x());
7254 XMMRegister y_reg = ToDoubleRegister(instr->y());
7255 XMMRegister z_reg = ToDoubleRegister(instr->z());
7256 XMMRegister w_reg = ToDoubleRegister(instr->w());
7257 XMMRegister result_reg = ToFloat32x4Register(instr->result());
7258 __ sub(esp, Immediate(kFloat32x4Size));
7259 __ xorps(xmm0, xmm0);
7260 __ cvtsd2ss(xmm0, x_reg);
7261 __ movss(Operand(esp, 0 * kFloatSize), xmm0);
7262 __ xorps(xmm0, xmm0);
7263 __ cvtsd2ss(xmm0, y_reg);
7264 __ movss(Operand(esp, 1 * kFloatSize), xmm0);
7265 __ xorps(xmm0, xmm0);
7266 __ cvtsd2ss(xmm0, z_reg);
7267 __ movss(Operand(esp, 2 * kFloatSize), xmm0);
7268 __ xorps(xmm0, xmm0);
7269 __ cvtsd2ss(xmm0, w_reg);
7270 __ movss(Operand(esp, 3 * kFloatSize), xmm0);
7271 __ movups(result_reg, Operand(esp, 0 * kFloatSize));
7272 __ add(esp, Immediate(kFloat32x4Size));
7275 case kInt32x4Constructor: {
7276 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
7277 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
7278 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
7279 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
7280 Register x_reg = ToRegister(instr->x());
7281 Register y_reg = ToRegister(instr->y());
7282 Register z_reg = ToRegister(instr->z());
7283 Register w_reg = ToRegister(instr->w());
7284 XMMRegister result_reg = ToInt32x4Register(instr->result());
7285 __ sub(esp, Immediate(kInt32x4Size));
7286 __ mov(Operand(esp, 0 * kInt32Size), x_reg);
7287 __ mov(Operand(esp, 1 * kInt32Size), y_reg);
7288 __ mov(Operand(esp, 2 * kInt32Size), z_reg);
7289 __ mov(Operand(esp, 3 * kInt32Size), w_reg);
7290 __ movups(result_reg, Operand(esp, 0 * kInt32Size));
7291 __ add(esp, Immediate(kInt32x4Size));
7294 case kInt32x4Bool: {
7295 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
7296 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
7297 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
7298 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
7299 HType x_type = instr->hydrogen()->x()->type();
7300 HType y_type = instr->hydrogen()->y()->type();
7301 HType z_type = instr->hydrogen()->z()->type();
7302 HType w_type = instr->hydrogen()->w()->type();
7303 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
7304 !z_type.IsBoolean() || !w_type.IsBoolean()) {
7305 Comment(";;; deoptimize: other types for int32x4.bool.");
7306 DeoptimizeIf(no_condition, instr->environment());
7309 XMMRegister result_reg = ToInt32x4Register(instr->result());
7310 Register x_reg = ToRegister(instr->x());
7311 Register y_reg = ToRegister(instr->y());
7312 Register z_reg = ToRegister(instr->z());
7313 Register w_reg = ToRegister(instr->w());
7314 Label load_false_x, done_x, load_false_y, done_y,
7315 load_false_z, done_z, load_false_w, done_w;
7316 __ sub(esp, Immediate(kInt32x4Size));
7318 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
7319 __ j(not_equal, &load_false_x, Label::kNear);
7320 __ mov(Operand(esp, 0 * kInt32Size), Immediate(-1));
7321 __ jmp(&done_x, Label::kNear);
7322 __ bind(&load_false_x);
7323 __ mov(Operand(esp, 0 * kInt32Size), Immediate(0x0));
7326 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
7327 __ j(not_equal, &load_false_y, Label::kNear);
7328 __ mov(Operand(esp, 1 * kInt32Size), Immediate(-1));
7329 __ jmp(&done_y, Label::kNear);
7330 __ bind(&load_false_y);
7331 __ mov(Operand(esp, 1 * kInt32Size), Immediate(0x0));
7334 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
7335 __ j(not_equal, &load_false_z, Label::kNear);
7336 __ mov(Operand(esp, 2 * kInt32Size), Immediate(-1));
7337 __ jmp(&done_z, Label::kNear);
7338 __ bind(&load_false_z);
7339 __ mov(Operand(esp, 2 * kInt32Size), Immediate(0x0));
7342 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
7343 __ j(not_equal, &load_false_w, Label::kNear);
7344 __ mov(Operand(esp, 3 * kInt32Size), Immediate(-1));
7345 __ jmp(&done_w, Label::kNear);
7346 __ bind(&load_false_w);
7347 __ mov(Operand(esp, 3 * kInt32Size), Immediate(0x0));
7350 __ movups(result_reg, Operand(esp, 0));
7351 __ add(esp, Immediate(kInt32x4Size));
7363 } } // namespace v8::internal
7365 #endif // V8_TARGET_ARCH_IA32