1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_IA32
32 #include "ia32/lithium-codegen-ia32.h"
34 #include "code-stubs.h"
35 #include "deoptimizer.h"
36 #include "stub-cache.h"
38 #include "hydrogen-osr.h"
44 static SaveFPRegsMode GetSaveFPRegsMode() {
45 // We don't need to save floating point regs when generating the snapshot
46 return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
50 // When invoking builtins, we need to record the safepoint in the middle of
51 // the invoke instruction sequence generated by the macro assembler.
52 class SafepointGenerator V8_FINAL : public CallWrapper {
54 SafepointGenerator(LCodeGen* codegen,
55 LPointerMap* pointers,
56 Safepoint::DeoptMode mode)
60 virtual ~SafepointGenerator() {}
62 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
64 virtual void AfterCall() const V8_OVERRIDE {
65 codegen_->RecordSafepoint(pointers_, deopt_mode_);
70 LPointerMap* pointers_;
71 Safepoint::DeoptMode deopt_mode_;
77 bool LCodeGen::GenerateCode() {
78 LPhase phase("Z_Code generation", chunk());
82 // Open a frame scope to indicate that there is a frame on the stack. The
83 // MANUAL indicates that the scope shouldn't actually generate code to set up
84 // the frame (that is done in GeneratePrologue).
85 FrameScope frame_scope(masm_, StackFrame::MANUAL);
87 support_aligned_spilled_doubles_ = info()->IsOptimizing();
89 dynamic_frame_alignment_ = info()->IsOptimizing() &&
90 ((chunk()->num_double_slots() > 2 &&
91 !chunk()->graph()->is_recursive()) ||
92 !info()->osr_ast_id().IsNone());
94 return GeneratePrologue() &&
96 GenerateDeferredCode() &&
97 GenerateJumpTable() &&
98 GenerateSafepointTable();
102 void LCodeGen::FinishCode(Handle<Code> code) {
104 code->set_stack_slots(GetStackSlotCount());
105 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
107 PopulateDeoptimizationData(code);
108 if (!info()->IsStub()) {
109 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
111 info()->CommitDependencies(code);
115 void LCodeGen::Abort(BailoutReason reason) {
116 info()->set_bailout_reason(reason);
122 void LCodeGen::MakeSureStackPagesMapped(int offset) {
123 const int kPageSize = 4 * KB;
124 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
125 __ mov(Operand(esp, offset), eax);
131 void LCodeGen::SaveCallerDoubles() {
132 ASSERT(info()->saves_caller_doubles());
133 ASSERT(NeedsEagerFrame());
134 Comment(";;; Save clobbered callee double registers");
135 CpuFeatureScope scope(masm(), SSE2);
137 BitVector* doubles = chunk()->allocated_double_registers();
138 BitVector::Iterator save_iterator(doubles);
139 while (!save_iterator.Done()) {
140 __ movsd(MemOperand(esp, count * kDoubleSize),
141 XMMRegister::FromAllocationIndex(save_iterator.Current()));
142 save_iterator.Advance();
148 void LCodeGen::RestoreCallerDoubles() {
149 ASSERT(info()->saves_caller_doubles());
150 ASSERT(NeedsEagerFrame());
151 Comment(";;; Restore clobbered callee double registers");
152 CpuFeatureScope scope(masm(), SSE2);
153 BitVector* doubles = chunk()->allocated_double_registers();
154 BitVector::Iterator save_iterator(doubles);
156 while (!save_iterator.Done()) {
157 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
158 MemOperand(esp, count * kDoubleSize));
159 save_iterator.Advance();
165 bool LCodeGen::GeneratePrologue() {
166 ASSERT(is_generating());
168 if (info()->IsOptimizing()) {
169 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
172 if (strlen(FLAG_stop_at) > 0 &&
173 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
178 // Sloppy mode functions and builtins need to replace the receiver with the
179 // global proxy when called as functions (without an explicit receiver
181 if (info_->this_has_uses() &&
182 info_->strict_mode() == SLOPPY &&
183 !info_->is_native()) {
185 // +1 for return address.
186 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
187 __ mov(ecx, Operand(esp, receiver_offset));
189 __ cmp(ecx, isolate()->factory()->undefined_value());
190 __ j(not_equal, &ok, Label::kNear);
192 __ mov(ecx, GlobalObjectOperand());
193 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
195 __ mov(Operand(esp, receiver_offset), ecx);
200 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
201 // Move state of dynamic frame alignment into edx.
202 __ Move(edx, Immediate(kNoAlignmentPadding));
204 Label do_not_pad, align_loop;
205 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
206 // Align esp + 4 to a multiple of 2 * kPointerSize.
207 __ test(esp, Immediate(kPointerSize));
208 __ j(not_zero, &do_not_pad, Label::kNear);
209 __ push(Immediate(0));
211 __ mov(edx, Immediate(kAlignmentPaddingPushed));
212 // Copy arguments, receiver, and return address.
213 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
215 __ bind(&align_loop);
216 __ mov(eax, Operand(ebx, 1 * kPointerSize));
217 __ mov(Operand(ebx, 0), eax);
218 __ add(Operand(ebx), Immediate(kPointerSize));
220 __ j(not_zero, &align_loop, Label::kNear);
221 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
222 __ bind(&do_not_pad);
226 info()->set_prologue_offset(masm_->pc_offset());
227 if (NeedsEagerFrame()) {
228 ASSERT(!frame_is_built_);
229 frame_is_built_ = true;
230 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
231 info()->AddNoFrameRange(0, masm_->pc_offset());
234 if (info()->IsOptimizing() &&
235 dynamic_frame_alignment_ &&
237 __ test(esp, Immediate(kPointerSize));
238 __ Assert(zero, kFrameIsExpectedToBeAligned);
241 // Reserve space for the stack slots needed by the code.
242 int slots = GetStackSlotCount();
243 ASSERT(slots != 0 || !info()->IsOptimizing());
246 if (dynamic_frame_alignment_) {
249 __ push(Immediate(kNoAlignmentPadding));
252 if (FLAG_debug_code) {
253 __ sub(Operand(esp), Immediate(slots * kPointerSize));
255 MakeSureStackPagesMapped(slots * kPointerSize);
258 __ mov(Operand(eax), Immediate(slots));
261 __ mov(MemOperand(esp, eax, times_4, 0),
262 Immediate(kSlotsZapValue));
264 __ j(not_zero, &loop);
267 __ sub(Operand(esp), Immediate(slots * kPointerSize));
269 MakeSureStackPagesMapped(slots * kPointerSize);
273 if (support_aligned_spilled_doubles_) {
274 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
275 // Store dynamic frame alignment state in the first local.
276 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
277 if (dynamic_frame_alignment_) {
278 __ mov(Operand(ebp, offset), edx);
280 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
285 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
290 // Possibly allocate a local context.
291 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
292 if (heap_slots > 0) {
293 Comment(";;; Allocate local context");
294 // Argument to NewContext is the function, which is still in edi.
295 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
296 FastNewContextStub stub(heap_slots);
300 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
302 RecordSafepoint(Safepoint::kNoLazyDeopt);
303 // Context is returned in eax. It replaces the context passed to us.
304 // It's saved in the stack and kept live in esi.
306 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
308 // Copy parameters into context if necessary.
309 int num_parameters = scope()->num_parameters();
310 for (int i = 0; i < num_parameters; i++) {
311 Variable* var = scope()->parameter(i);
312 if (var->IsContextSlot()) {
313 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
314 (num_parameters - 1 - i) * kPointerSize;
315 // Load parameter from stack.
316 __ mov(eax, Operand(ebp, parameter_offset));
317 // Store it in the context.
318 int context_offset = Context::SlotOffset(var->index());
319 __ mov(Operand(esi, context_offset), eax);
320 // Update the write barrier. This clobbers eax and ebx.
321 __ RecordWriteContextSlot(esi,
328 Comment(";;; End allocate local context");
332 if (FLAG_trace && info()->IsOptimizing()) {
333 // We have not executed any compiled code yet, so esi still holds the
335 __ CallRuntime(Runtime::kTraceEnter, 0);
337 return !is_aborted();
341 void LCodeGen::GenerateOsrPrologue() {
342 // Generate the OSR entry prologue at the first unknown OSR value, or if there
343 // are none, at the OSR entrypoint instruction.
344 if (osr_pc_offset_ >= 0) return;
346 osr_pc_offset_ = masm()->pc_offset();
348 // Move state of dynamic frame alignment into edx.
349 __ Move(edx, Immediate(kNoAlignmentPadding));
351 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
352 Label do_not_pad, align_loop;
353 // Align ebp + 4 to a multiple of 2 * kPointerSize.
354 __ test(ebp, Immediate(kPointerSize));
355 __ j(zero, &do_not_pad, Label::kNear);
356 __ push(Immediate(0));
358 __ mov(edx, Immediate(kAlignmentPaddingPushed));
360 // Move all parts of the frame over one word. The frame consists of:
361 // unoptimized frame slots, alignment state, context, frame pointer, return
362 // address, receiver, and the arguments.
363 __ mov(ecx, Immediate(scope()->num_parameters() +
364 5 + graph()->osr()->UnoptimizedFrameSlots()));
366 __ bind(&align_loop);
367 __ mov(eax, Operand(ebx, 1 * kPointerSize));
368 __ mov(Operand(ebx, 0), eax);
369 __ add(Operand(ebx), Immediate(kPointerSize));
371 __ j(not_zero, &align_loop, Label::kNear);
372 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
373 __ sub(Operand(ebp), Immediate(kPointerSize));
374 __ bind(&do_not_pad);
377 // Save the first local, which is overwritten by the alignment state.
378 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
379 __ push(alignment_loc);
381 // Set the dynamic frame alignment state.
382 __ mov(alignment_loc, edx);
384 // Adjust the frame size, subsuming the unoptimized frame into the
386 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
388 __ sub(esp, Immediate((slots - 1) * kPointerSize));
392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
393 if (instr->IsCall()) {
394 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
396 if (!instr->IsLazyBailout() && !instr->IsGap()) {
397 safepoints_.BumpLastLazySafepointIndex();
399 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
403 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
404 if (!CpuFeatures::IsSupported(SSE2)) {
405 if (instr->IsGoto()) {
406 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
407 } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
408 !instr->IsGap() && !instr->IsReturn()) {
409 if (instr->ClobbersDoubleRegisters()) {
410 if (instr->HasDoubleRegisterResult()) {
411 ASSERT_EQ(1, x87_stack_.depth());
413 ASSERT_EQ(0, x87_stack_.depth());
416 __ VerifyX87StackDepth(x87_stack_.depth());
422 bool LCodeGen::GenerateJumpTable() {
424 if (jump_table_.length() > 0) {
425 Comment(";;; -------------------- Jump table --------------------");
427 for (int i = 0; i < jump_table_.length(); i++) {
428 __ bind(&jump_table_[i].label);
429 Address entry = jump_table_[i].address;
430 Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
431 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
432 if (id == Deoptimizer::kNotDeoptimizationEntry) {
433 Comment(";;; jump table entry %d.", i);
435 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
437 if (jump_table_[i].needs_frame) {
438 ASSERT(!info()->saves_caller_doubles());
439 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
440 if (needs_frame.is_bound()) {
441 __ jmp(&needs_frame);
443 __ bind(&needs_frame);
444 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
445 // This variant of deopt can only be used with stubs. Since we don't
446 // have a function pointer to install in the stack frame that we're
447 // building, install a special marker there instead.
448 ASSERT(info()->IsStub());
449 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
450 // Push a PC inside the function so that the deopt code can find where
451 // the deopt comes from. It doesn't have to be the precise return
452 // address of a "calling" LAZY deopt, it only has to be somewhere
453 // inside the code body.
454 Label push_approx_pc;
455 __ call(&push_approx_pc);
456 __ bind(&push_approx_pc);
457 // Push the continuation which was stashed were the ebp should
458 // be. Replace it with the saved ebp.
459 __ push(MemOperand(esp, 3 * kPointerSize));
460 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
461 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
462 __ ret(0); // Call the continuation without clobbering registers.
465 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
466 RestoreCallerDoubles();
468 __ call(entry, RelocInfo::RUNTIME_ENTRY);
471 return !is_aborted();
475 bool LCodeGen::GenerateDeferredCode() {
476 ASSERT(is_generating());
477 if (deferred_.length() > 0) {
478 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
479 LDeferredCode* code = deferred_[i];
480 X87Stack copy(code->x87_stack());
484 instructions_->at(code->instruction_index())->hydrogen_value();
485 RecordAndWritePosition(
486 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
488 Comment(";;; <@%d,#%d> "
489 "-------------------- Deferred %s --------------------",
490 code->instruction_index(),
491 code->instr()->hydrogen_value()->id(),
492 code->instr()->Mnemonic());
493 __ bind(code->entry());
494 if (NeedsDeferredFrame()) {
495 Comment(";;; Build frame");
496 ASSERT(!frame_is_built_);
497 ASSERT(info()->IsStub());
498 frame_is_built_ = true;
499 // Build the frame in such a way that esi isn't trashed.
500 __ push(ebp); // Caller's frame pointer.
501 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
502 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
503 __ lea(ebp, Operand(esp, 2 * kPointerSize));
504 Comment(";;; Deferred code");
507 if (NeedsDeferredFrame()) {
508 __ bind(code->done());
509 Comment(";;; Destroy frame");
510 ASSERT(frame_is_built_);
511 frame_is_built_ = false;
515 __ jmp(code->exit());
519 // Deferred code is the last part of the instruction sequence. Mark
520 // the generated code as done unless we bailed out.
521 if (!is_aborted()) status_ = DONE;
522 return !is_aborted();
526 bool LCodeGen::GenerateSafepointTable() {
528 if (!info()->IsStub()) {
529 // For lazy deoptimization we need space to patch a call after every call.
530 // Ensure there is always space for such patching, even if the code ends
532 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
533 while (masm()->pc_offset() < target_offset) {
537 safepoints_.Emit(masm(), GetStackSlotCount());
538 return !is_aborted();
542 Register LCodeGen::ToRegister(int index) const {
543 return Register::FromAllocationIndex(index);
547 X87Register LCodeGen::ToX87Register(int index) const {
548 return X87Register::FromAllocationIndex(index);
552 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
553 return XMMRegister::FromAllocationIndex(index);
557 XMMRegister LCodeGen::ToSIMD128Register(int index) const {
558 return XMMRegister::FromAllocationIndex(index);
562 void LCodeGen::X87LoadForUsage(X87Register reg) {
563 ASSERT(x87_stack_.Contains(reg));
564 x87_stack_.Fxch(reg);
569 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
570 ASSERT(x87_stack_.Contains(reg1));
571 ASSERT(x87_stack_.Contains(reg2));
572 x87_stack_.Fxch(reg1, 1);
573 x87_stack_.Fxch(reg2);
579 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
581 ASSERT(Contains(reg) && stack_depth_ > other_slot);
582 int i = ArrayIndex(reg);
584 if (st != other_slot) {
585 int other_i = st2idx(other_slot);
586 X87Register other = stack_[other_i];
587 stack_[other_i] = reg;
591 } else if (other_slot == 0) {
602 int LCodeGen::X87Stack::st2idx(int pos) {
603 return stack_depth_ - pos - 1;
607 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
608 for (int i = 0; i < stack_depth_; i++) {
609 if (stack_[i].is(reg)) return i;
616 bool LCodeGen::X87Stack::Contains(X87Register reg) {
617 for (int i = 0; i < stack_depth_; i++) {
618 if (stack_[i].is(reg)) return true;
624 void LCodeGen::X87Stack::Free(X87Register reg) {
626 ASSERT(Contains(reg));
627 int i = ArrayIndex(reg);
630 // keep track of how fstp(i) changes the order of elements
631 int tos_i = st2idx(0);
632 stack_[i] = stack_[tos_i];
639 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
640 if (x87_stack_.Contains(dst)) {
641 x87_stack_.Fxch(dst);
644 x87_stack_.push(dst);
650 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
651 ASSERT(!src.is_reg_only());
653 case kX87DoubleOperand:
656 case kX87FloatOperand:
668 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
669 ASSERT(!dst.is_reg_only());
670 x87_stack_.Fxch(src);
672 case kX87DoubleOperand:
684 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
689 // Mark this register as the next register to write to
690 stack_[stack_depth_] = reg;
694 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
696 // Assert the reg is prepared to write, but not on the virtual stack yet
697 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
698 stack_depth_ < X87Register::kNumAllocatableRegisters);
703 void LCodeGen::X87PrepareBinaryOp(
704 X87Register left, X87Register right, X87Register result) {
705 // You need to use DefineSameAsFirst for x87 instructions
706 ASSERT(result.is(left));
707 x87_stack_.Fxch(right, 1);
708 x87_stack_.Fxch(left);
712 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
713 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
714 bool double_inputs = instr->HasDoubleRegisterInput();
716 // Flush stack from tos down, since FreeX87() will mess with tos
717 for (int i = stack_depth_-1; i >= 0; i--) {
718 X87Register reg = stack_[i];
719 // Skip registers which contain the inputs for the next instruction
720 // when flushing the stack
721 if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
725 if (i < stack_depth_-1) i++;
728 if (instr->IsReturn()) {
729 while (stack_depth_ > 0) {
733 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
738 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
739 ASSERT(stack_depth_ <= 1);
740 // If ever used for new stubs producing two pairs of doubles joined into two
741 // phis this assert hits. That situation is not handled, since the two stacks
742 // might have st0 and st1 swapped.
743 if (current_block_id + 1 != goto_instr->block_id()) {
744 // If we have a value on the x87 stack on leaving a block, it must be a
745 // phi input. If the next block we compile is not the join block, we have
746 // to discard the stack state.
752 void LCodeGen::EmitFlushX87ForDeopt() {
753 // The deoptimizer does not support X87 Registers. But as long as we
754 // deopt from a stub its not a problem, since we will re-materialize the
755 // original stub inputs, which can't be double registers.
756 ASSERT(info()->IsStub());
757 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
759 __ VerifyX87StackDepth(x87_stack_.depth());
762 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
766 Register LCodeGen::ToRegister(LOperand* op) const {
767 ASSERT(op->IsRegister());
768 return ToRegister(op->index());
772 X87Register LCodeGen::ToX87Register(LOperand* op) const {
773 ASSERT(op->IsDoubleRegister());
774 return ToX87Register(op->index());
778 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
779 ASSERT(op->IsDoubleRegister());
780 return ToDoubleRegister(op->index());
784 XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
785 ASSERT(op->IsFloat32x4Register());
786 return ToSIMD128Register(op->index());
790 XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
791 ASSERT(op->IsInt32x4Register());
792 return ToSIMD128Register(op->index());
796 XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
797 ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
798 return ToSIMD128Register(op->index());
802 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
803 return ToRepresentation(op, Representation::Integer32());
807 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
808 const Representation& r) const {
809 HConstant* constant = chunk_->LookupConstant(op);
810 int32_t value = constant->Integer32Value();
811 if (r.IsInteger32()) return value;
812 ASSERT(r.IsSmiOrTagged());
813 return reinterpret_cast<int32_t>(Smi::FromInt(value));
817 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
818 HConstant* constant = chunk_->LookupConstant(op);
819 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
820 return constant->handle(isolate());
824 double LCodeGen::ToDouble(LConstantOperand* op) const {
825 HConstant* constant = chunk_->LookupConstant(op);
826 ASSERT(constant->HasDoubleValue());
827 return constant->DoubleValue();
831 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
832 HConstant* constant = chunk_->LookupConstant(op);
833 ASSERT(constant->HasExternalReferenceValue());
834 return constant->ExternalReferenceValue();
838 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
839 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
843 bool LCodeGen::IsSmi(LConstantOperand* op) const {
844 return chunk_->LookupLiteralRepresentation(op).IsSmi();
848 static int ArgumentsOffsetWithoutFrame(int index) {
850 return -(index + 1) * kPointerSize + kPCOnStackSize;
854 Operand LCodeGen::ToOperand(LOperand* op) const {
855 if (op->IsRegister()) return Operand(ToRegister(op));
856 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
857 if (op->IsFloat32x4Register()) return Operand(ToFloat32x4Register(op));
858 if (op->IsInt32x4Register()) return Operand(ToInt32x4Register(op));
859 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
860 op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
861 if (NeedsEagerFrame()) {
862 return Operand(ebp, StackSlotOffset(op->index()));
864 // Retrieve parameter without eager stack-frame relative to the
866 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
871 Operand LCodeGen::HighOperand(LOperand* op) {
872 ASSERT(op->IsDoubleStackSlot());
873 if (NeedsEagerFrame()) {
874 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
876 // Retrieve parameter without eager stack-frame relative to the
879 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
884 void LCodeGen::WriteTranslation(LEnvironment* environment,
885 Translation* translation) {
886 if (environment == NULL) return;
888 // The translation includes one command per value in the environment.
889 int translation_size = environment->translation_size();
890 // The output frame height does not include the parameters.
891 int height = translation_size - environment->parameter_count();
893 WriteTranslation(environment->outer(), translation);
894 bool has_closure_id = !info()->closure().is_null() &&
895 !info()->closure().is_identical_to(environment->closure());
896 int closure_id = has_closure_id
897 ? DefineDeoptimizationLiteral(environment->closure())
898 : Translation::kSelfLiteralId;
899 switch (environment->frame_type()) {
901 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
904 translation->BeginConstructStubFrame(closure_id, translation_size);
907 ASSERT(translation_size == 1);
909 translation->BeginGetterStubFrame(closure_id);
912 ASSERT(translation_size == 2);
914 translation->BeginSetterStubFrame(closure_id);
916 case ARGUMENTS_ADAPTOR:
917 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
920 translation->BeginCompiledStubFrame();
926 int object_index = 0;
927 int dematerialized_index = 0;
928 for (int i = 0; i < translation_size; ++i) {
929 LOperand* value = environment->values()->at(i);
930 AddToTranslation(environment,
933 environment->HasTaggedValueAt(i),
934 environment->HasUint32ValueAt(i),
936 &dematerialized_index);
941 void LCodeGen::AddToTranslation(LEnvironment* environment,
942 Translation* translation,
946 int* object_index_pointer,
947 int* dematerialized_index_pointer) {
948 if (op == LEnvironment::materialization_marker()) {
949 int object_index = (*object_index_pointer)++;
950 if (environment->ObjectIsDuplicateAt(object_index)) {
951 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
952 translation->DuplicateObject(dupe_of);
955 int object_length = environment->ObjectLengthAt(object_index);
956 if (environment->ObjectIsArgumentsAt(object_index)) {
957 translation->BeginArgumentsObject(object_length);
959 translation->BeginCapturedObject(object_length);
961 int dematerialized_index = *dematerialized_index_pointer;
962 int env_offset = environment->translation_size() + dematerialized_index;
963 *dematerialized_index_pointer += object_length;
964 for (int i = 0; i < object_length; ++i) {
965 LOperand* value = environment->values()->at(env_offset + i);
966 AddToTranslation(environment,
969 environment->HasTaggedValueAt(env_offset + i),
970 environment->HasUint32ValueAt(env_offset + i),
971 object_index_pointer,
972 dematerialized_index_pointer);
977 if (op->IsStackSlot()) {
979 translation->StoreStackSlot(op->index());
980 } else if (is_uint32) {
981 translation->StoreUint32StackSlot(op->index());
983 translation->StoreInt32StackSlot(op->index());
985 } else if (op->IsDoubleStackSlot()) {
986 translation->StoreDoubleStackSlot(op->index());
987 } else if (op->IsFloat32x4StackSlot()) {
988 translation->StoreSIMD128StackSlot(op->index(),
989 Translation::FLOAT32x4_STACK_SLOT);
990 } else if (op->IsInt32x4StackSlot()) {
991 translation->StoreSIMD128StackSlot(op->index(),
992 Translation::INT32x4_STACK_SLOT);
993 } else if (op->IsRegister()) {
994 Register reg = ToRegister(op);
996 translation->StoreRegister(reg);
997 } else if (is_uint32) {
998 translation->StoreUint32Register(reg);
1000 translation->StoreInt32Register(reg);
1002 } else if (op->IsDoubleRegister()) {
1003 XMMRegister reg = ToDoubleRegister(op);
1004 translation->StoreDoubleRegister(reg);
1005 } else if (op->IsFloat32x4Register()) {
1006 XMMRegister reg = ToFloat32x4Register(op);
1007 translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
1008 } else if (op->IsInt32x4Register()) {
1009 XMMRegister reg = ToInt32x4Register(op);
1010 translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
1011 } else if (op->IsConstantOperand()) {
1012 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
1013 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
1014 translation->StoreLiteral(src_index);
1021 void LCodeGen::CallCodeGeneric(Handle<Code> code,
1022 RelocInfo::Mode mode,
1023 LInstruction* instr,
1024 SafepointMode safepoint_mode) {
1025 ASSERT(instr != NULL);
1026 __ call(code, mode);
1027 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
1029 // Signal that we don't inline smi code before these stubs in the
1030 // optimizing code generator.
1031 if (code->kind() == Code::BINARY_OP_IC ||
1032 code->kind() == Code::COMPARE_IC) {
1038 void LCodeGen::CallCode(Handle<Code> code,
1039 RelocInfo::Mode mode,
1040 LInstruction* instr) {
1041 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
1045 void LCodeGen::CallRuntime(const Runtime::Function* fun,
1047 LInstruction* instr,
1048 SaveFPRegsMode save_doubles) {
1049 ASSERT(instr != NULL);
1050 ASSERT(instr->HasPointerMap());
1052 __ CallRuntime(fun, argc, save_doubles);
1054 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1056 ASSERT(info()->is_calling());
1060 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1061 if (context->IsRegister()) {
1062 if (!ToRegister(context).is(esi)) {
1063 __ mov(esi, ToRegister(context));
1065 } else if (context->IsStackSlot()) {
1066 __ mov(esi, ToOperand(context));
1067 } else if (context->IsConstantOperand()) {
1068 HConstant* constant =
1069 chunk_->LookupConstant(LConstantOperand::cast(context));
1070 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1076 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1078 LInstruction* instr,
1079 LOperand* context) {
1080 LoadContextFromDeferred(context);
1082 __ CallRuntimeSaveDoubles(id);
1083 RecordSafepointWithRegisters(
1084 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1086 ASSERT(info()->is_calling());
1090 void LCodeGen::RegisterEnvironmentForDeoptimization(
1091 LEnvironment* environment, Safepoint::DeoptMode mode) {
1092 if (!environment->HasBeenRegistered()) {
1093 // Physical stack frame layout:
1094 // -x ............. -4 0 ..................................... y
1095 // [incoming arguments] [spill slots] [pushed outgoing arguments]
1097 // Layout of the environment:
1098 // 0 ..................................................... size-1
1099 // [parameters] [locals] [expression stack including arguments]
1101 // Layout of the translation:
1102 // 0 ........................................................ size - 1 + 4
1103 // [expression stack including arguments] [locals] [4 words] [parameters]
1104 // |>------------ translation_size ------------<|
1106 int frame_count = 0;
1107 int jsframe_count = 0;
1108 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1110 if (e->frame_type() == JS_FUNCTION) {
1114 Translation translation(&translations_, frame_count, jsframe_count, zone());
1115 WriteTranslation(environment, &translation);
1116 int deoptimization_index = deoptimizations_.length();
1117 int pc_offset = masm()->pc_offset();
1118 environment->Register(deoptimization_index,
1119 translation.index(),
1120 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1121 deoptimizations_.Add(environment, zone());
1126 void LCodeGen::DeoptimizeIf(Condition cc,
1127 LEnvironment* environment,
1128 Deoptimizer::BailoutType bailout_type) {
1129 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1130 ASSERT(environment->HasBeenRegistered());
1131 int id = environment->deoptimization_index();
1132 ASSERT(info()->IsOptimizing() || info()->IsStub());
1134 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1135 if (entry == NULL) {
1136 Abort(kBailoutWasNotPrepared);
1140 if (DeoptEveryNTimes()) {
1141 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1145 __ mov(eax, Operand::StaticVariable(count));
1146 __ sub(eax, Immediate(1));
1147 __ j(not_zero, &no_deopt, Label::kNear);
1148 if (FLAG_trap_on_deopt) __ int3();
1149 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1150 __ mov(Operand::StaticVariable(count), eax);
1153 ASSERT(frame_is_built_);
1154 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1156 __ mov(Operand::StaticVariable(count), eax);
1161 // Before Instructions which can deopt, we normally flush the x87 stack. But
1162 // we can have inputs or outputs of the current instruction on the stack,
1163 // thus we need to flush them here from the physical stack to leave it in a
1164 // consistent state.
1165 if (x87_stack_.depth() > 0) {
1167 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1168 EmitFlushX87ForDeopt();
1172 if (info()->ShouldTrapOnDeopt()) {
1174 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1179 ASSERT(info()->IsStub() || frame_is_built_);
1180 if (cc == no_condition && frame_is_built_) {
1181 __ call(entry, RelocInfo::RUNTIME_ENTRY);
1183 // We often have several deopts to the same entry, reuse the last
1184 // jump entry if this is the case.
1185 if (jump_table_.is_empty() ||
1186 jump_table_.last().address != entry ||
1187 jump_table_.last().needs_frame != !frame_is_built_ ||
1188 jump_table_.last().bailout_type != bailout_type) {
1189 Deoptimizer::JumpTableEntry table_entry(entry,
1192 jump_table_.Add(table_entry, zone());
1194 if (cc == no_condition) {
1195 __ jmp(&jump_table_.last().label);
1197 __ j(cc, &jump_table_.last().label);
1203 void LCodeGen::DeoptimizeIf(Condition cc,
1204 LEnvironment* environment) {
1205 Deoptimizer::BailoutType bailout_type = info()->IsStub()
1207 : Deoptimizer::EAGER;
1208 DeoptimizeIf(cc, environment, bailout_type);
1212 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1213 int length = deoptimizations_.length();
1214 if (length == 0) return;
1215 Handle<DeoptimizationInputData> data =
1216 factory()->NewDeoptimizationInputData(length, TENURED);
1218 Handle<ByteArray> translations =
1219 translations_.CreateByteArray(isolate()->factory());
1220 data->SetTranslationByteArray(*translations);
1221 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1222 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
1223 if (info_->IsOptimizing()) {
1224 // Reference to shared function info does not change between phases.
1225 AllowDeferredHandleDereference allow_handle_dereference;
1226 data->SetSharedFunctionInfo(*info_->shared_info());
1228 data->SetSharedFunctionInfo(Smi::FromInt(0));
1231 Handle<FixedArray> literals =
1232 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1233 { AllowDeferredHandleDereference copy_handles;
1234 for (int i = 0; i < deoptimization_literals_.length(); i++) {
1235 literals->set(i, *deoptimization_literals_[i]);
1237 data->SetLiteralArray(*literals);
1240 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1241 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1243 // Populate the deoptimization entries.
1244 for (int i = 0; i < length; i++) {
1245 LEnvironment* env = deoptimizations_[i];
1246 data->SetAstId(i, env->ast_id());
1247 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1248 data->SetArgumentsStackHeight(i,
1249 Smi::FromInt(env->arguments_stack_height()));
1250 data->SetPc(i, Smi::FromInt(env->pc_offset()));
1252 code->set_deoptimization_data(*data);
1256 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1257 int result = deoptimization_literals_.length();
1258 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1259 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1261 deoptimization_literals_.Add(literal, zone());
1266 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1267 ASSERT(deoptimization_literals_.length() == 0);
1269 const ZoneList<Handle<JSFunction> >* inlined_closures =
1270 chunk()->inlined_closures();
1272 for (int i = 0, length = inlined_closures->length();
1275 DefineDeoptimizationLiteral(inlined_closures->at(i));
1278 inlined_function_count_ = deoptimization_literals_.length();
1282 void LCodeGen::RecordSafepointWithLazyDeopt(
1283 LInstruction* instr, SafepointMode safepoint_mode) {
1284 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1285 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1287 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1288 RecordSafepointWithRegisters(
1289 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1294 void LCodeGen::RecordSafepoint(
1295 LPointerMap* pointers,
1296 Safepoint::Kind kind,
1298 Safepoint::DeoptMode deopt_mode) {
1299 ASSERT(kind == expected_safepoint_kind_);
1300 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1301 Safepoint safepoint =
1302 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1303 for (int i = 0; i < operands->length(); i++) {
1304 LOperand* pointer = operands->at(i);
1305 if (pointer->IsStackSlot()) {
1306 safepoint.DefinePointerSlot(pointer->index(), zone());
1307 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1308 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1314 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1315 Safepoint::DeoptMode mode) {
1316 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1320 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1321 LPointerMap empty_pointers(zone());
1322 RecordSafepoint(&empty_pointers, mode);
1326 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1328 Safepoint::DeoptMode mode) {
1329 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1333 void LCodeGen::RecordAndWritePosition(int position) {
1334 if (position == RelocInfo::kNoPosition) return;
1335 masm()->positions_recorder()->RecordPosition(position);
1336 masm()->positions_recorder()->WriteRecordedPositions();
1340 static const char* LabelType(LLabel* label) {
1341 if (label->is_loop_header()) return " (loop header)";
1342 if (label->is_osr_entry()) return " (OSR entry)";
1347 void LCodeGen::DoLabel(LLabel* label) {
1348 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1349 current_instruction_,
1350 label->hydrogen_value()->id(),
1353 __ bind(label->label());
1354 current_block_ = label->block_id();
1359 void LCodeGen::DoParallelMove(LParallelMove* move) {
1360 resolver_.Resolve(move);
1364 void LCodeGen::DoGap(LGap* gap) {
1365 for (int i = LGap::FIRST_INNER_POSITION;
1366 i <= LGap::LAST_INNER_POSITION;
1368 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1369 LParallelMove* move = gap->GetParallelMove(inner_pos);
1370 if (move != NULL) DoParallelMove(move);
1375 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1380 void LCodeGen::DoParameter(LParameter* instr) {
1385 void LCodeGen::DoCallStub(LCallStub* instr) {
1386 ASSERT(ToRegister(instr->context()).is(esi));
1387 ASSERT(ToRegister(instr->result()).is(eax));
1388 switch (instr->hydrogen()->major_key()) {
1389 case CodeStub::RegExpExec: {
1390 RegExpExecStub stub;
1391 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1394 case CodeStub::SubString: {
1396 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1399 case CodeStub::StringCompare: {
1400 StringCompareStub stub;
1401 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1410 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1411 GenerateOsrPrologue();
1415 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1416 Register dividend = ToRegister(instr->dividend());
1417 int32_t divisor = instr->divisor();
1418 ASSERT(dividend.is(ToRegister(instr->result())));
1420 // Theoretically, a variation of the branch-free code for integer division by
1421 // a power of 2 (calculating the remainder via an additional multiplication
1422 // (which gets simplified to an 'and') and subtraction) should be faster, and
1423 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1424 // indicate that positive dividends are heavily favored, so the branching
1425 // version performs better.
1426 HMod* hmod = instr->hydrogen();
1427 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1428 Label dividend_is_not_negative, done;
1429 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1430 __ test(dividend, dividend);
1431 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
1432 // Note that this is correct even for kMinInt operands.
1434 __ and_(dividend, mask);
1436 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1437 DeoptimizeIf(zero, instr->environment());
1439 __ jmp(&done, Label::kNear);
1442 __ bind(÷nd_is_not_negative);
1443 __ and_(dividend, mask);
1448 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1449 Register dividend = ToRegister(instr->dividend());
1450 int32_t divisor = instr->divisor();
1451 ASSERT(ToRegister(instr->result()).is(eax));
1454 DeoptimizeIf(no_condition, instr->environment());
1458 __ TruncatingDiv(dividend, Abs(divisor));
1459 __ imul(edx, edx, Abs(divisor));
1460 __ mov(eax, dividend);
1463 // Check for negative zero.
1464 HMod* hmod = instr->hydrogen();
1465 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1466 Label remainder_not_zero;
1467 __ j(not_zero, &remainder_not_zero, Label::kNear);
1468 __ cmp(dividend, Immediate(0));
1469 DeoptimizeIf(less, instr->environment());
1470 __ bind(&remainder_not_zero);
1475 void LCodeGen::DoModI(LModI* instr) {
1476 HMod* hmod = instr->hydrogen();
1478 Register left_reg = ToRegister(instr->left());
1479 ASSERT(left_reg.is(eax));
1480 Register right_reg = ToRegister(instr->right());
1481 ASSERT(!right_reg.is(eax));
1482 ASSERT(!right_reg.is(edx));
1483 Register result_reg = ToRegister(instr->result());
1484 ASSERT(result_reg.is(edx));
1487 // Check for x % 0, idiv would signal a divide error. We have to
1488 // deopt in this case because we can't return a NaN.
1489 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1490 __ test(right_reg, Operand(right_reg));
1491 DeoptimizeIf(zero, instr->environment());
1494 // Check for kMinInt % -1, idiv would signal a divide error. We
1495 // have to deopt if we care about -0, because we can't return that.
1496 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1497 Label no_overflow_possible;
1498 __ cmp(left_reg, kMinInt);
1499 __ j(not_equal, &no_overflow_possible, Label::kNear);
1500 __ cmp(right_reg, -1);
1501 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1502 DeoptimizeIf(equal, instr->environment());
1504 __ j(not_equal, &no_overflow_possible, Label::kNear);
1505 __ Move(result_reg, Immediate(0));
1506 __ jmp(&done, Label::kNear);
1508 __ bind(&no_overflow_possible);
1511 // Sign extend dividend in eax into edx:eax.
1514 // If we care about -0, test if the dividend is <0 and the result is 0.
1515 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1516 Label positive_left;
1517 __ test(left_reg, Operand(left_reg));
1518 __ j(not_sign, &positive_left, Label::kNear);
1520 __ test(result_reg, Operand(result_reg));
1521 DeoptimizeIf(zero, instr->environment());
1522 __ jmp(&done, Label::kNear);
1523 __ bind(&positive_left);
1530 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1531 Register dividend = ToRegister(instr->dividend());
1532 int32_t divisor = instr->divisor();
1533 Register result = ToRegister(instr->result());
1534 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1535 ASSERT(!result.is(dividend));
1537 // Check for (0 / -x) that will produce negative zero.
1538 HDiv* hdiv = instr->hydrogen();
1539 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1540 __ test(dividend, dividend);
1541 DeoptimizeIf(zero, instr->environment());
1543 // Check for (kMinInt / -1).
1544 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1545 __ cmp(dividend, kMinInt);
1546 DeoptimizeIf(zero, instr->environment());
1548 // Deoptimize if remainder will not be 0.
1549 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1550 divisor != 1 && divisor != -1) {
1551 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1552 __ test(dividend, Immediate(mask));
1553 DeoptimizeIf(not_zero, instr->environment());
1555 __ Move(result, dividend);
1556 int32_t shift = WhichPowerOf2Abs(divisor);
1558 // The arithmetic shift is always OK, the 'if' is an optimization only.
1559 if (shift > 1) __ sar(result, 31);
1560 __ shr(result, 32 - shift);
1561 __ add(result, dividend);
1562 __ sar(result, shift);
1564 if (divisor < 0) __ neg(result);
1568 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1569 Register dividend = ToRegister(instr->dividend());
1570 int32_t divisor = instr->divisor();
1571 ASSERT(ToRegister(instr->result()).is(edx));
1574 DeoptimizeIf(no_condition, instr->environment());
1578 // Check for (0 / -x) that will produce negative zero.
1579 HDiv* hdiv = instr->hydrogen();
1580 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1581 __ test(dividend, dividend);
1582 DeoptimizeIf(zero, instr->environment());
1585 __ TruncatingDiv(dividend, Abs(divisor));
1586 if (divisor < 0) __ neg(edx);
1588 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1590 __ imul(eax, eax, divisor);
1591 __ sub(eax, dividend);
1592 DeoptimizeIf(not_equal, instr->environment());
1597 void LCodeGen::DoDivI(LDivI* instr) {
1598 HBinaryOperation* hdiv = instr->hydrogen();
1599 Register dividend = ToRegister(instr->left());
1600 Register divisor = ToRegister(instr->right());
1601 Register remainder = ToRegister(instr->temp());
1602 Register result = ToRegister(instr->result());
1603 ASSERT(dividend.is(eax));
1604 ASSERT(remainder.is(edx));
1605 ASSERT(result.is(eax));
1606 ASSERT(!divisor.is(eax));
1607 ASSERT(!divisor.is(edx));
1610 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1611 __ test(divisor, divisor);
1612 DeoptimizeIf(zero, instr->environment());
1615 // Check for (0 / -x) that will produce negative zero.
1616 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1617 Label dividend_not_zero;
1618 __ test(dividend, dividend);
1619 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1620 __ test(divisor, divisor);
1621 DeoptimizeIf(sign, instr->environment());
1622 __ bind(÷nd_not_zero);
1625 // Check for (kMinInt / -1).
1626 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1627 Label dividend_not_min_int;
1628 __ cmp(dividend, kMinInt);
1629 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1630 __ cmp(divisor, -1);
1631 DeoptimizeIf(zero, instr->environment());
1632 __ bind(÷nd_not_min_int);
1635 // Sign extend to edx (= remainder).
1639 if (hdiv->IsMathFloorOfDiv()) {
1641 __ test(remainder, remainder);
1642 __ j(zero, &done, Label::kNear);
1643 __ xor_(remainder, divisor);
1644 __ sar(remainder, 31);
1645 __ add(result, remainder);
1647 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1648 // Deoptimize if remainder is not 0.
1649 __ test(remainder, remainder);
1650 DeoptimizeIf(not_zero, instr->environment());
1655 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1656 Register dividend = ToRegister(instr->dividend());
1657 int32_t divisor = instr->divisor();
1658 ASSERT(dividend.is(ToRegister(instr->result())));
1660 // If the divisor is positive, things are easy: There can be no deopts and we
1661 // can simply do an arithmetic right shift.
1662 if (divisor == 1) return;
1663 int32_t shift = WhichPowerOf2Abs(divisor);
1665 __ sar(dividend, shift);
1669 // If the divisor is negative, we have to negate and handle edge cases.
1670 Label not_kmin_int, done;
1672 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1673 DeoptimizeIf(zero, instr->environment());
1675 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1676 // Note that we could emit branch-free code, but that would need one more
1678 if (divisor == -1) {
1679 DeoptimizeIf(overflow, instr->environment());
1681 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1682 __ mov(dividend, Immediate(kMinInt / divisor));
1683 __ jmp(&done, Label::kNear);
1686 __ bind(¬_kmin_int);
1687 __ sar(dividend, shift);
1692 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1693 Register dividend = ToRegister(instr->dividend());
1694 int32_t divisor = instr->divisor();
1695 ASSERT(ToRegister(instr->result()).is(edx));
1698 DeoptimizeIf(no_condition, instr->environment());
1702 // Check for (0 / -x) that will produce negative zero.
1703 HMathFloorOfDiv* hdiv = instr->hydrogen();
1704 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1705 __ test(dividend, dividend);
1706 DeoptimizeIf(zero, instr->environment());
1709 // Easy case: We need no dynamic check for the dividend and the flooring
1710 // division is the same as the truncating division.
1711 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1712 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1713 __ TruncatingDiv(dividend, Abs(divisor));
1714 if (divisor < 0) __ neg(edx);
1718 // In the general case we may need to adjust before and after the truncating
1719 // division to get a flooring division.
1720 Register temp = ToRegister(instr->temp3());
1721 ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1722 Label needs_adjustment, done;
1723 __ cmp(dividend, Immediate(0));
1724 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1725 __ TruncatingDiv(dividend, Abs(divisor));
1726 if (divisor < 0) __ neg(edx);
1727 __ jmp(&done, Label::kNear);
1728 __ bind(&needs_adjustment);
1729 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1730 __ TruncatingDiv(temp, Abs(divisor));
1731 if (divisor < 0) __ neg(edx);
1737 void LCodeGen::DoMulI(LMulI* instr) {
1738 Register left = ToRegister(instr->left());
1739 LOperand* right = instr->right();
1741 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1742 __ mov(ToRegister(instr->temp()), left);
1745 if (right->IsConstantOperand()) {
1746 // Try strength reductions on the multiplication.
1747 // All replacement instructions are at most as long as the imul
1748 // and have better latency.
1749 int constant = ToInteger32(LConstantOperand::cast(right));
1750 if (constant == -1) {
1752 } else if (constant == 0) {
1753 __ xor_(left, Operand(left));
1754 } else if (constant == 2) {
1755 __ add(left, Operand(left));
1756 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1757 // If we know that the multiplication can't overflow, it's safe to
1758 // use instructions that don't set the overflow flag for the
1765 __ lea(left, Operand(left, left, times_2, 0));
1771 __ lea(left, Operand(left, left, times_4, 0));
1777 __ lea(left, Operand(left, left, times_8, 0));
1783 __ imul(left, left, constant);
1787 __ imul(left, left, constant);
1790 if (instr->hydrogen()->representation().IsSmi()) {
1793 __ imul(left, ToOperand(right));
1796 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1797 DeoptimizeIf(overflow, instr->environment());
1800 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1801 // Bail out if the result is supposed to be negative zero.
1803 __ test(left, Operand(left));
1804 __ j(not_zero, &done, Label::kNear);
1805 if (right->IsConstantOperand()) {
1806 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1807 DeoptimizeIf(no_condition, instr->environment());
1808 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1809 __ cmp(ToRegister(instr->temp()), Immediate(0));
1810 DeoptimizeIf(less, instr->environment());
1813 // Test the non-zero operand for negative sign.
1814 __ or_(ToRegister(instr->temp()), ToOperand(right));
1815 DeoptimizeIf(sign, instr->environment());
1822 void LCodeGen::DoBitI(LBitI* instr) {
1823 LOperand* left = instr->left();
1824 LOperand* right = instr->right();
1825 ASSERT(left->Equals(instr->result()));
1826 ASSERT(left->IsRegister());
1828 if (right->IsConstantOperand()) {
1829 int32_t right_operand =
1830 ToRepresentation(LConstantOperand::cast(right),
1831 instr->hydrogen()->representation());
1832 switch (instr->op()) {
1833 case Token::BIT_AND:
1834 __ and_(ToRegister(left), right_operand);
1837 __ or_(ToRegister(left), right_operand);
1839 case Token::BIT_XOR:
1840 if (right_operand == int32_t(~0)) {
1841 __ not_(ToRegister(left));
1843 __ xor_(ToRegister(left), right_operand);
1851 switch (instr->op()) {
1852 case Token::BIT_AND:
1853 __ and_(ToRegister(left), ToOperand(right));
1856 __ or_(ToRegister(left), ToOperand(right));
1858 case Token::BIT_XOR:
1859 __ xor_(ToRegister(left), ToOperand(right));
1869 void LCodeGen::DoShiftI(LShiftI* instr) {
1870 LOperand* left = instr->left();
1871 LOperand* right = instr->right();
1872 ASSERT(left->Equals(instr->result()));
1873 ASSERT(left->IsRegister());
1874 if (right->IsRegister()) {
1875 ASSERT(ToRegister(right).is(ecx));
1877 switch (instr->op()) {
1879 __ ror_cl(ToRegister(left));
1880 if (instr->can_deopt()) {
1881 __ test(ToRegister(left), ToRegister(left));
1882 DeoptimizeIf(sign, instr->environment());
1886 __ sar_cl(ToRegister(left));
1889 __ shr_cl(ToRegister(left));
1890 if (instr->can_deopt()) {
1891 __ test(ToRegister(left), ToRegister(left));
1892 DeoptimizeIf(sign, instr->environment());
1896 __ shl_cl(ToRegister(left));
1903 int value = ToInteger32(LConstantOperand::cast(right));
1904 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1905 switch (instr->op()) {
1907 if (shift_count == 0 && instr->can_deopt()) {
1908 __ test(ToRegister(left), ToRegister(left));
1909 DeoptimizeIf(sign, instr->environment());
1911 __ ror(ToRegister(left), shift_count);
1915 if (shift_count != 0) {
1916 __ sar(ToRegister(left), shift_count);
1920 if (shift_count == 0 && instr->can_deopt()) {
1921 __ test(ToRegister(left), ToRegister(left));
1922 DeoptimizeIf(sign, instr->environment());
1924 __ shr(ToRegister(left), shift_count);
1928 if (shift_count != 0) {
1929 if (instr->hydrogen_value()->representation().IsSmi() &&
1930 instr->can_deopt()) {
1931 if (shift_count != 1) {
1932 __ shl(ToRegister(left), shift_count - 1);
1934 __ SmiTag(ToRegister(left));
1935 DeoptimizeIf(overflow, instr->environment());
1937 __ shl(ToRegister(left), shift_count);
1949 void LCodeGen::DoSubI(LSubI* instr) {
1950 LOperand* left = instr->left();
1951 LOperand* right = instr->right();
1952 ASSERT(left->Equals(instr->result()));
1954 if (right->IsConstantOperand()) {
1955 __ sub(ToOperand(left),
1956 ToImmediate(right, instr->hydrogen()->representation()));
1958 __ sub(ToRegister(left), ToOperand(right));
1960 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1961 DeoptimizeIf(overflow, instr->environment());
1966 void LCodeGen::DoConstantI(LConstantI* instr) {
1967 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1971 void LCodeGen::DoConstantS(LConstantS* instr) {
1972 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1976 void LCodeGen::DoConstantD(LConstantD* instr) {
1977 double v = instr->value();
1978 uint64_t int_val = BitCast<uint64_t, double>(v);
1979 int32_t lower = static_cast<int32_t>(int_val);
1980 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1981 ASSERT(instr->result()->IsDoubleRegister());
1983 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1984 __ push(Immediate(upper));
1985 __ push(Immediate(lower));
1986 X87Register reg = ToX87Register(instr->result());
1987 X87Mov(reg, Operand(esp, 0));
1988 __ add(Operand(esp), Immediate(kDoubleSize));
1990 CpuFeatureScope scope1(masm(), SSE2);
1991 XMMRegister res = ToDoubleRegister(instr->result());
1995 Register temp = ToRegister(instr->temp());
1996 if (CpuFeatures::IsSupported(SSE4_1)) {
1997 CpuFeatureScope scope2(masm(), SSE4_1);
1999 __ Move(temp, Immediate(lower));
2000 __ movd(res, Operand(temp));
2001 __ Move(temp, Immediate(upper));
2002 __ pinsrd(res, Operand(temp), 1);
2005 __ Move(temp, Immediate(upper));
2006 __ pinsrd(res, Operand(temp), 1);
2009 __ Move(temp, Immediate(upper));
2010 __ movd(res, Operand(temp));
2013 XMMRegister xmm_scratch = double_scratch0();
2014 __ Move(temp, Immediate(lower));
2015 __ movd(xmm_scratch, Operand(temp));
2016 __ orps(res, xmm_scratch);
2024 void LCodeGen::DoConstantE(LConstantE* instr) {
2025 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
2029 void LCodeGen::DoConstantT(LConstantT* instr) {
2030 Register reg = ToRegister(instr->result());
2031 Handle<Object> handle = instr->value(isolate());
2032 AllowDeferredHandleDereference smi_check;
2033 __ LoadObject(reg, handle);
2037 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2038 Register result = ToRegister(instr->result());
2039 Register map = ToRegister(instr->value());
2040 __ EnumLength(result, map);
2044 void LCodeGen::DoDateField(LDateField* instr) {
2045 Register object = ToRegister(instr->date());
2046 Register result = ToRegister(instr->result());
2047 Register scratch = ToRegister(instr->temp());
2048 Smi* index = instr->index();
2049 Label runtime, done;
2050 ASSERT(object.is(result));
2051 ASSERT(object.is(eax));
2053 __ test(object, Immediate(kSmiTagMask));
2054 DeoptimizeIf(zero, instr->environment());
2055 __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2056 DeoptimizeIf(not_equal, instr->environment());
2058 if (index->value() == 0) {
2059 __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2061 if (index->value() < JSDate::kFirstUncachedField) {
2062 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2063 __ mov(scratch, Operand::StaticVariable(stamp));
2064 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2065 __ j(not_equal, &runtime, Label::kNear);
2066 __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2067 kPointerSize * index->value()));
2068 __ jmp(&done, Label::kNear);
2071 __ PrepareCallCFunction(2, scratch);
2072 __ mov(Operand(esp, 0), object);
2073 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2074 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2080 Operand LCodeGen::BuildSeqStringOperand(Register string,
2082 String::Encoding encoding) {
2083 if (index->IsConstantOperand()) {
2084 int offset = ToRepresentation(LConstantOperand::cast(index),
2085 Representation::Integer32());
2086 if (encoding == String::TWO_BYTE_ENCODING) {
2087 offset *= kUC16Size;
2089 STATIC_ASSERT(kCharSize == 1);
2090 return FieldOperand(string, SeqString::kHeaderSize + offset);
2092 return FieldOperand(
2093 string, ToRegister(index),
2094 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2095 SeqString::kHeaderSize);
2099 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2100 String::Encoding encoding = instr->hydrogen()->encoding();
2101 Register result = ToRegister(instr->result());
2102 Register string = ToRegister(instr->string());
2104 if (FLAG_debug_code) {
2106 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2107 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2109 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2110 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2111 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2112 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2113 ? one_byte_seq_type : two_byte_seq_type));
2114 __ Check(equal, kUnexpectedStringType);
2118 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2119 if (encoding == String::ONE_BYTE_ENCODING) {
2120 __ movzx_b(result, operand);
2122 __ movzx_w(result, operand);
2127 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2128 String::Encoding encoding = instr->hydrogen()->encoding();
2129 Register string = ToRegister(instr->string());
2131 if (FLAG_debug_code) {
2132 Register value = ToRegister(instr->value());
2133 Register index = ToRegister(instr->index());
2134 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2135 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2137 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2138 ? one_byte_seq_type : two_byte_seq_type;
2139 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2142 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2143 if (instr->value()->IsConstantOperand()) {
2144 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2145 Representation::Integer32());
2146 ASSERT_LE(0, value);
2147 if (encoding == String::ONE_BYTE_ENCODING) {
2148 ASSERT_LE(value, String::kMaxOneByteCharCode);
2149 __ mov_b(operand, static_cast<int8_t>(value));
2151 ASSERT_LE(value, String::kMaxUtf16CodeUnit);
2152 __ mov_w(operand, static_cast<int16_t>(value));
2155 Register value = ToRegister(instr->value());
2156 if (encoding == String::ONE_BYTE_ENCODING) {
2157 __ mov_b(operand, value);
2159 __ mov_w(operand, value);
2165 void LCodeGen::DoAddI(LAddI* instr) {
2166 LOperand* left = instr->left();
2167 LOperand* right = instr->right();
2169 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2170 if (right->IsConstantOperand()) {
2171 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2172 instr->hydrogen()->representation());
2173 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2175 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2176 __ lea(ToRegister(instr->result()), address);
2179 if (right->IsConstantOperand()) {
2180 __ add(ToOperand(left),
2181 ToImmediate(right, instr->hydrogen()->representation()));
2183 __ add(ToRegister(left), ToOperand(right));
2185 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2186 DeoptimizeIf(overflow, instr->environment());
2192 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2193 CpuFeatureScope scope(masm(), SSE2);
2194 LOperand* left = instr->left();
2195 LOperand* right = instr->right();
2196 ASSERT(left->Equals(instr->result()));
2197 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2198 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2200 Condition condition = (operation == HMathMinMax::kMathMin)
2203 if (right->IsConstantOperand()) {
2204 Operand left_op = ToOperand(left);
2205 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2206 instr->hydrogen()->representation());
2207 __ cmp(left_op, immediate);
2208 __ j(condition, &return_left, Label::kNear);
2209 __ mov(left_op, immediate);
2211 Register left_reg = ToRegister(left);
2212 Operand right_op = ToOperand(right);
2213 __ cmp(left_reg, right_op);
2214 __ j(condition, &return_left, Label::kNear);
2215 __ mov(left_reg, right_op);
2217 __ bind(&return_left);
2219 ASSERT(instr->hydrogen()->representation().IsDouble());
2220 Label check_nan_left, check_zero, return_left, return_right;
2221 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2222 XMMRegister left_reg = ToDoubleRegister(left);
2223 XMMRegister right_reg = ToDoubleRegister(right);
2224 __ ucomisd(left_reg, right_reg);
2225 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2226 __ j(equal, &check_zero, Label::kNear); // left == right.
2227 __ j(condition, &return_left, Label::kNear);
2228 __ jmp(&return_right, Label::kNear);
2230 __ bind(&check_zero);
2231 XMMRegister xmm_scratch = double_scratch0();
2232 __ xorps(xmm_scratch, xmm_scratch);
2233 __ ucomisd(left_reg, xmm_scratch);
2234 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2235 // At this point, both left and right are either 0 or -0.
2236 if (operation == HMathMinMax::kMathMin) {
2237 __ orpd(left_reg, right_reg);
2239 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2240 __ addsd(left_reg, right_reg);
2242 __ jmp(&return_left, Label::kNear);
2244 __ bind(&check_nan_left);
2245 __ ucomisd(left_reg, left_reg); // NaN check.
2246 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2247 __ bind(&return_right);
2248 __ movaps(left_reg, right_reg);
2250 __ bind(&return_left);
2255 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2256 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2257 CpuFeatureScope scope(masm(), SSE2);
2258 XMMRegister left = ToDoubleRegister(instr->left());
2259 XMMRegister right = ToDoubleRegister(instr->right());
2260 XMMRegister result = ToDoubleRegister(instr->result());
2261 switch (instr->op()) {
2263 __ addsd(left, right);
2266 __ subsd(left, right);
2269 __ mulsd(left, right);
2272 __ divsd(left, right);
2273 // Don't delete this mov. It may improve performance on some CPUs,
2274 // when there is a mulsd depending on the result
2275 __ movaps(left, left);
2278 // Pass two doubles as arguments on the stack.
2279 __ PrepareCallCFunction(4, eax);
2280 __ movsd(Operand(esp, 0 * kDoubleSize), left);
2281 __ movsd(Operand(esp, 1 * kDoubleSize), right);
2283 ExternalReference::mod_two_doubles_operation(isolate()),
2286 // Return value is in st(0) on ia32.
2287 // Store it into the result register.
2288 __ sub(Operand(esp), Immediate(kDoubleSize));
2289 __ fstp_d(Operand(esp, 0));
2290 __ movsd(result, Operand(esp, 0));
2291 __ add(Operand(esp), Immediate(kDoubleSize));
2299 X87Register left = ToX87Register(instr->left());
2300 X87Register right = ToX87Register(instr->right());
2301 X87Register result = ToX87Register(instr->result());
2302 if (instr->op() != Token::MOD) {
2303 X87PrepareBinaryOp(left, right, result);
2305 switch (instr->op()) {
2319 // Pass two doubles as arguments on the stack.
2320 __ PrepareCallCFunction(4, eax);
2321 X87Mov(Operand(esp, 1 * kDoubleSize), right);
2322 X87Mov(Operand(esp, 0), left);
2324 ASSERT(left.is(result));
2325 X87PrepareToWrite(result);
2327 ExternalReference::mod_two_doubles_operation(isolate()),
2330 // Return value is in st(0) on ia32.
2331 X87CommitWrite(result);
2342 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2343 ASSERT(ToRegister(instr->context()).is(esi));
2344 ASSERT(ToRegister(instr->left()).is(edx));
2345 ASSERT(ToRegister(instr->right()).is(eax));
2346 ASSERT(ToRegister(instr->result()).is(eax));
2348 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2349 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2353 template<class InstrType>
2354 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2355 int left_block = instr->TrueDestination(chunk_);
2356 int right_block = instr->FalseDestination(chunk_);
2358 int next_block = GetNextEmittedBlock();
2360 if (right_block == left_block || cc == no_condition) {
2361 EmitGoto(left_block);
2362 } else if (left_block == next_block) {
2363 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2364 } else if (right_block == next_block) {
2365 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2367 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2368 __ jmp(chunk_->GetAssemblyLabel(right_block));
2373 template<class InstrType>
2374 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2375 int false_block = instr->FalseDestination(chunk_);
2376 if (cc == no_condition) {
2377 __ jmp(chunk_->GetAssemblyLabel(false_block));
2379 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2384 void LCodeGen::DoBranch(LBranch* instr) {
2385 Representation r = instr->hydrogen()->value()->representation();
2386 if (r.IsSmiOrInteger32()) {
2387 Register reg = ToRegister(instr->value());
2388 __ test(reg, Operand(reg));
2389 EmitBranch(instr, not_zero);
2390 } else if (r.IsDouble()) {
2391 ASSERT(!info()->IsStub());
2392 CpuFeatureScope scope(masm(), SSE2);
2393 XMMRegister reg = ToDoubleRegister(instr->value());
2394 XMMRegister xmm_scratch = double_scratch0();
2395 __ xorps(xmm_scratch, xmm_scratch);
2396 __ ucomisd(reg, xmm_scratch);
2397 EmitBranch(instr, not_equal);
2398 } else if (r.IsSIMD128()) {
2399 ASSERT(!info()->IsStub());
2400 EmitBranch(instr, no_condition);
2402 ASSERT(r.IsTagged());
2403 Register reg = ToRegister(instr->value());
2404 HType type = instr->hydrogen()->value()->type();
2405 if (type.IsBoolean()) {
2406 ASSERT(!info()->IsStub());
2407 __ cmp(reg, factory()->true_value());
2408 EmitBranch(instr, equal);
2409 } else if (type.IsSmi()) {
2410 ASSERT(!info()->IsStub());
2411 __ test(reg, Operand(reg));
2412 EmitBranch(instr, not_equal);
2413 } else if (type.IsJSArray()) {
2414 ASSERT(!info()->IsStub());
2415 EmitBranch(instr, no_condition);
2416 } else if (type.IsSIMD128()) {
2417 ASSERT(!info()->IsStub());
2418 EmitBranch(instr, no_condition);
2419 } else if (type.IsHeapNumber()) {
2420 ASSERT(!info()->IsStub());
2421 CpuFeatureScope scope(masm(), SSE2);
2422 XMMRegister xmm_scratch = double_scratch0();
2423 __ xorps(xmm_scratch, xmm_scratch);
2424 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2425 EmitBranch(instr, not_equal);
2426 } else if (type.IsString()) {
2427 ASSERT(!info()->IsStub());
2428 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2429 EmitBranch(instr, not_equal);
2431 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2432 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2434 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2435 // undefined -> false.
2436 __ cmp(reg, factory()->undefined_value());
2437 __ j(equal, instr->FalseLabel(chunk_));
2439 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2441 __ cmp(reg, factory()->true_value());
2442 __ j(equal, instr->TrueLabel(chunk_));
2444 __ cmp(reg, factory()->false_value());
2445 __ j(equal, instr->FalseLabel(chunk_));
2447 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2449 __ cmp(reg, factory()->null_value());
2450 __ j(equal, instr->FalseLabel(chunk_));
2453 if (expected.Contains(ToBooleanStub::SMI)) {
2454 // Smis: 0 -> false, all other -> true.
2455 __ test(reg, Operand(reg));
2456 __ j(equal, instr->FalseLabel(chunk_));
2457 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2458 } else if (expected.NeedsMap()) {
2459 // If we need a map later and have a Smi -> deopt.
2460 __ test(reg, Immediate(kSmiTagMask));
2461 DeoptimizeIf(zero, instr->environment());
2464 Register map = no_reg; // Keep the compiler happy.
2465 if (expected.NeedsMap()) {
2466 map = ToRegister(instr->temp());
2467 ASSERT(!map.is(reg));
2468 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2470 if (expected.CanBeUndetectable()) {
2471 // Undetectable -> false.
2472 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2473 1 << Map::kIsUndetectable);
2474 __ j(not_zero, instr->FalseLabel(chunk_));
2478 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2479 // spec object -> true.
2480 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2481 __ j(above_equal, instr->TrueLabel(chunk_));
2484 if (expected.Contains(ToBooleanStub::STRING)) {
2485 // String value -> false iff empty.
2487 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2488 __ j(above_equal, ¬_string, Label::kNear);
2489 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2490 __ j(not_zero, instr->TrueLabel(chunk_));
2491 __ jmp(instr->FalseLabel(chunk_));
2492 __ bind(¬_string);
2495 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2496 // Symbol value -> true.
2497 __ CmpInstanceType(map, SYMBOL_TYPE);
2498 __ j(equal, instr->TrueLabel(chunk_));
2501 if (expected.Contains(ToBooleanStub::FLOAT32x4)) {
2502 // Float32x4 value -> true.
2503 __ CmpInstanceType(map, FLOAT32x4_TYPE);
2504 __ j(equal, instr->TrueLabel(chunk_));
2507 if (expected.Contains(ToBooleanStub::INT32x4)) {
2508 // Int32x4 value -> true.
2509 __ CmpInstanceType(map, INT32x4_TYPE);
2510 __ j(equal, instr->TrueLabel(chunk_));
2513 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2514 // heap number -> false iff +0, -0, or NaN.
2515 Label not_heap_number;
2516 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2517 factory()->heap_number_map());
2518 __ j(not_equal, ¬_heap_number, Label::kNear);
2519 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2520 CpuFeatureScope scope(masm(), SSE2);
2521 XMMRegister xmm_scratch = double_scratch0();
2522 __ xorps(xmm_scratch, xmm_scratch);
2523 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2526 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2529 __ j(zero, instr->FalseLabel(chunk_));
2530 __ jmp(instr->TrueLabel(chunk_));
2531 __ bind(¬_heap_number);
2534 if (!expected.IsGeneric()) {
2535 // We've seen something for the first time -> deopt.
2536 // This can only happen if we are not generic already.
2537 DeoptimizeIf(no_condition, instr->environment());
2544 void LCodeGen::EmitGoto(int block) {
2545 if (!IsNextEmittedBlock(block)) {
2546 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2551 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2555 void LCodeGen::DoGoto(LGoto* instr) {
2556 EmitGoto(instr->block_id());
2560 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2561 Condition cond = no_condition;
2564 case Token::EQ_STRICT:
2568 case Token::NE_STRICT:
2572 cond = is_unsigned ? below : less;
2575 cond = is_unsigned ? above : greater;
2578 cond = is_unsigned ? below_equal : less_equal;
2581 cond = is_unsigned ? above_equal : greater_equal;
2584 case Token::INSTANCEOF:
2592 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2593 LOperand* left = instr->left();
2594 LOperand* right = instr->right();
2595 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2597 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2598 // We can statically evaluate the comparison.
2599 double left_val = ToDouble(LConstantOperand::cast(left));
2600 double right_val = ToDouble(LConstantOperand::cast(right));
2601 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2602 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2603 EmitGoto(next_block);
2605 if (instr->is_double()) {
2606 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2607 CpuFeatureScope scope(masm(), SSE2);
2608 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2610 X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2613 // Don't base result on EFLAGS when a NaN is involved. Instead
2614 // jump to the false block.
2615 __ j(parity_even, instr->FalseLabel(chunk_));
2617 if (right->IsConstantOperand()) {
2618 __ cmp(ToOperand(left),
2619 ToImmediate(right, instr->hydrogen()->representation()));
2620 } else if (left->IsConstantOperand()) {
2621 __ cmp(ToOperand(right),
2622 ToImmediate(left, instr->hydrogen()->representation()));
2623 // We transposed the operands. Reverse the condition.
2624 cc = ReverseCondition(cc);
2626 __ cmp(ToRegister(left), ToOperand(right));
2629 EmitBranch(instr, cc);
2634 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2635 Register left = ToRegister(instr->left());
2637 if (instr->right()->IsConstantOperand()) {
2638 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2639 __ CmpObject(left, right);
2641 Operand right = ToOperand(instr->right());
2642 __ cmp(left, right);
2644 EmitBranch(instr, equal);
2648 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2649 if (instr->hydrogen()->representation().IsTagged()) {
2650 Register input_reg = ToRegister(instr->object());
2651 __ cmp(input_reg, factory()->the_hole_value());
2652 EmitBranch(instr, equal);
2656 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
2658 CpuFeatureScope scope(masm(), SSE2);
2659 XMMRegister input_reg = ToDoubleRegister(instr->object());
2660 __ ucomisd(input_reg, input_reg);
2661 EmitFalseBranch(instr, parity_odd);
2663 // Put the value to the top of stack
2664 X87Register src = ToX87Register(instr->object());
2665 X87LoadForUsage(src);
2670 __ j(parity_even, &ok, Label::kNear);
2672 EmitFalseBranch(instr, no_condition);
2677 __ sub(esp, Immediate(kDoubleSize));
2679 CpuFeatureScope scope(masm(), SSE2);
2680 XMMRegister input_reg = ToDoubleRegister(instr->object());
2681 __ movsd(MemOperand(esp, 0), input_reg);
2683 __ fstp_d(MemOperand(esp, 0));
2686 __ add(esp, Immediate(kDoubleSize));
2687 int offset = sizeof(kHoleNanUpper32);
2688 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2689 EmitBranch(instr, equal);
2693 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2694 Representation rep = instr->hydrogen()->value()->representation();
2695 ASSERT(!rep.IsInteger32());
2696 Register scratch = ToRegister(instr->temp());
2698 if (rep.IsDouble()) {
2699 CpuFeatureScope use_sse2(masm(), SSE2);
2700 XMMRegister value = ToDoubleRegister(instr->value());
2701 XMMRegister xmm_scratch = double_scratch0();
2702 __ xorps(xmm_scratch, xmm_scratch);
2703 __ ucomisd(xmm_scratch, value);
2704 EmitFalseBranch(instr, not_equal);
2705 __ movmskpd(scratch, value);
2706 __ test(scratch, Immediate(1));
2707 EmitBranch(instr, not_zero);
2709 Register value = ToRegister(instr->value());
2710 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2711 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2712 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2714 EmitFalseBranch(instr, no_overflow);
2715 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2716 Immediate(0x00000000));
2717 EmitBranch(instr, equal);
2722 Condition LCodeGen::EmitIsObject(Register input,
2724 Label* is_not_object,
2726 __ JumpIfSmi(input, is_not_object);
2728 __ cmp(input, isolate()->factory()->null_value());
2729 __ j(equal, is_object);
2731 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2732 // Undetectable objects behave like undefined.
2733 __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2734 1 << Map::kIsUndetectable);
2735 __ j(not_zero, is_not_object);
2737 __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2738 __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2739 __ j(below, is_not_object);
2740 __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2745 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2746 Register reg = ToRegister(instr->value());
2747 Register temp = ToRegister(instr->temp());
2749 Condition true_cond = EmitIsObject(
2750 reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2752 EmitBranch(instr, true_cond);
2756 Condition LCodeGen::EmitIsString(Register input,
2758 Label* is_not_string,
2759 SmiCheck check_needed = INLINE_SMI_CHECK) {
2760 if (check_needed == INLINE_SMI_CHECK) {
2761 __ JumpIfSmi(input, is_not_string);
2764 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2770 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2771 Register reg = ToRegister(instr->value());
2772 Register temp = ToRegister(instr->temp());
2774 SmiCheck check_needed =
2775 instr->hydrogen()->value()->IsHeapObject()
2776 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2778 Condition true_cond = EmitIsString(
2779 reg, temp, instr->FalseLabel(chunk_), check_needed);
2781 EmitBranch(instr, true_cond);
2785 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2786 Operand input = ToOperand(instr->value());
2788 __ test(input, Immediate(kSmiTagMask));
2789 EmitBranch(instr, zero);
2793 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2794 Register input = ToRegister(instr->value());
2795 Register temp = ToRegister(instr->temp());
2797 if (!instr->hydrogen()->value()->IsHeapObject()) {
2798 STATIC_ASSERT(kSmiTag == 0);
2799 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2801 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2802 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2803 1 << Map::kIsUndetectable);
2804 EmitBranch(instr, not_zero);
2808 static Condition ComputeCompareCondition(Token::Value op) {
2810 case Token::EQ_STRICT:
2820 return greater_equal;
2823 return no_condition;
2828 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2829 Token::Value op = instr->op();
2831 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2832 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2834 Condition condition = ComputeCompareCondition(op);
2835 __ test(eax, Operand(eax));
2837 EmitBranch(instr, condition);
2841 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2842 InstanceType from = instr->from();
2843 InstanceType to = instr->to();
2844 if (from == FIRST_TYPE) return to;
2845 ASSERT(from == to || to == LAST_TYPE);
2850 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2851 InstanceType from = instr->from();
2852 InstanceType to = instr->to();
2853 if (from == to) return equal;
2854 if (to == LAST_TYPE) return above_equal;
2855 if (from == FIRST_TYPE) return below_equal;
2861 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2862 Register input = ToRegister(instr->value());
2863 Register temp = ToRegister(instr->temp());
2865 if (!instr->hydrogen()->value()->IsHeapObject()) {
2866 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2869 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2870 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2874 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2875 Register input = ToRegister(instr->value());
2876 Register result = ToRegister(instr->result());
2878 __ AssertString(input);
2880 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2881 __ IndexFromHash(result, result);
2885 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2886 LHasCachedArrayIndexAndBranch* instr) {
2887 Register input = ToRegister(instr->value());
2889 __ test(FieldOperand(input, String::kHashFieldOffset),
2890 Immediate(String::kContainsCachedArrayIndexMask));
2891 EmitBranch(instr, equal);
2895 // Branches to a label or falls through with the answer in the z flag. Trashes
2896 // the temp registers, but not the input.
2897 void LCodeGen::EmitClassOfTest(Label* is_true,
2899 Handle<String>class_name,
2903 ASSERT(!input.is(temp));
2904 ASSERT(!input.is(temp2));
2905 ASSERT(!temp.is(temp2));
2906 __ JumpIfSmi(input, is_false);
2908 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2909 // Assuming the following assertions, we can use the same compares to test
2910 // for both being a function type and being in the object type range.
2911 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2912 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2913 FIRST_SPEC_OBJECT_TYPE + 1);
2914 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2915 LAST_SPEC_OBJECT_TYPE - 1);
2916 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2917 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2918 __ j(below, is_false);
2919 __ j(equal, is_true);
2920 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2921 __ j(equal, is_true);
2923 // Faster code path to avoid two compares: subtract lower bound from the
2924 // actual type and do a signed compare with the width of the type range.
2925 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2926 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2927 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2928 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2929 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2930 __ j(above, is_false);
2933 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2934 // Check if the constructor in the map is a function.
2935 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2936 // Objects with a non-function constructor have class 'Object'.
2937 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2938 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2939 __ j(not_equal, is_true);
2941 __ j(not_equal, is_false);
2944 // temp now contains the constructor function. Grab the
2945 // instance class name from there.
2946 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2947 __ mov(temp, FieldOperand(temp,
2948 SharedFunctionInfo::kInstanceClassNameOffset));
2949 // The class name we are testing against is internalized since it's a literal.
2950 // The name in the constructor is internalized because of the way the context
2951 // is booted. This routine isn't expected to work for random API-created
2952 // classes and it doesn't have to because you can't access it with natives
2953 // syntax. Since both sides are internalized it is sufficient to use an
2954 // identity comparison.
2955 __ cmp(temp, class_name);
2956 // End with the answer in the z flag.
2960 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2961 Register input = ToRegister(instr->value());
2962 Register temp = ToRegister(instr->temp());
2963 Register temp2 = ToRegister(instr->temp2());
2965 Handle<String> class_name = instr->hydrogen()->class_name();
2967 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2968 class_name, input, temp, temp2);
2970 EmitBranch(instr, equal);
2974 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2975 Register reg = ToRegister(instr->value());
2976 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2977 EmitBranch(instr, equal);
2981 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2982 // Object and function are in fixed registers defined by the stub.
2983 ASSERT(ToRegister(instr->context()).is(esi));
2984 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2985 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2987 Label true_value, done;
2988 __ test(eax, Operand(eax));
2989 __ j(zero, &true_value, Label::kNear);
2990 __ mov(ToRegister(instr->result()), factory()->false_value());
2991 __ jmp(&done, Label::kNear);
2992 __ bind(&true_value);
2993 __ mov(ToRegister(instr->result()), factory()->true_value());
2998 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2999 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
3001 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3002 LInstanceOfKnownGlobal* instr,
3003 const X87Stack& x87_stack)
3004 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3005 virtual void Generate() V8_OVERRIDE {
3006 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
3008 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3009 Label* map_check() { return &map_check_; }
3011 LInstanceOfKnownGlobal* instr_;
3015 DeferredInstanceOfKnownGlobal* deferred;
3016 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
3018 Label done, false_result;
3019 Register object = ToRegister(instr->value());
3020 Register temp = ToRegister(instr->temp());
3022 // A Smi is not an instance of anything.
3023 __ JumpIfSmi(object, &false_result, Label::kNear);
3025 // This is the inlined call site instanceof cache. The two occurences of the
3026 // hole value will be patched to the last map/result pair generated by the
3029 Register map = ToRegister(instr->temp());
3030 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
3031 __ bind(deferred->map_check()); // Label for calculating code patching.
3032 Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
3033 __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
3034 __ j(not_equal, &cache_miss, Label::kNear);
3035 __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
3036 __ jmp(&done, Label::kNear);
3038 // The inlined call site cache did not match. Check for null and string
3039 // before calling the deferred code.
3040 __ bind(&cache_miss);
3041 // Null is not an instance of anything.
3042 __ cmp(object, factory()->null_value());
3043 __ j(equal, &false_result, Label::kNear);
3045 // String values are not instances of anything.
3046 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
3047 __ j(is_string, &false_result, Label::kNear);
3049 // Go to the deferred code.
3050 __ jmp(deferred->entry());
3052 __ bind(&false_result);
3053 __ mov(ToRegister(instr->result()), factory()->false_value());
3055 // Here result has either true or false. Deferred code also produces true or
3057 __ bind(deferred->exit());
3062 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
3064 PushSafepointRegistersScope scope(this);
3066 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3067 flags = static_cast<InstanceofStub::Flags>(
3068 flags | InstanceofStub::kArgsInRegisters);
3069 flags = static_cast<InstanceofStub::Flags>(
3070 flags | InstanceofStub::kCallSiteInlineCheck);
3071 flags = static_cast<InstanceofStub::Flags>(
3072 flags | InstanceofStub::kReturnTrueFalseObject);
3073 InstanceofStub stub(flags);
3075 // Get the temp register reserved by the instruction. This needs to be a
3076 // register which is pushed last by PushSafepointRegisters as top of the
3077 // stack is used to pass the offset to the location of the map check to
3079 Register temp = ToRegister(instr->temp());
3080 ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3081 __ LoadHeapObject(InstanceofStub::right(), instr->function());
3082 static const int kAdditionalDelta = 13;
3083 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3084 __ mov(temp, Immediate(delta));
3085 __ StoreToSafepointRegisterSlot(temp, temp);
3086 CallCodeGeneric(stub.GetCode(isolate()),
3087 RelocInfo::CODE_TARGET,
3089 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3090 // Get the deoptimization index of the LLazyBailout-environment that
3091 // corresponds to this instruction.
3092 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3093 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3095 // Put the result value into the eax slot and restore all registers.
3096 __ StoreToSafepointRegisterSlot(eax, eax);
3100 void LCodeGen::DoCmpT(LCmpT* instr) {
3101 Token::Value op = instr->op();
3103 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
3104 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3106 Condition condition = ComputeCompareCondition(op);
3107 Label true_value, done;
3108 __ test(eax, Operand(eax));
3109 __ j(condition, &true_value, Label::kNear);
3110 __ mov(ToRegister(instr->result()), factory()->false_value());
3111 __ jmp(&done, Label::kNear);
3112 __ bind(&true_value);
3113 __ mov(ToRegister(instr->result()), factory()->true_value());
3118 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3119 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3121 if (instr->has_constant_parameter_count()) {
3122 int parameter_count = ToInteger32(instr->constant_parameter_count());
3123 if (dynamic_frame_alignment && FLAG_debug_code) {
3125 (parameter_count + extra_value_count) * kPointerSize),
3126 Immediate(kAlignmentZapValue));
3127 __ Assert(equal, kExpectedAlignmentMarker);
3129 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3131 Register reg = ToRegister(instr->parameter_count());
3132 // The argument count parameter is a smi
3134 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3135 if (dynamic_frame_alignment && FLAG_debug_code) {
3136 ASSERT(extra_value_count == 2);
3137 __ cmp(Operand(esp, reg, times_pointer_size,
3138 extra_value_count * kPointerSize),
3139 Immediate(kAlignmentZapValue));
3140 __ Assert(equal, kExpectedAlignmentMarker);
3143 // emit code to restore stack based on instr->parameter_count()
3144 __ pop(return_addr_reg); // save return address
3145 if (dynamic_frame_alignment) {
3146 __ inc(reg); // 1 more for alignment
3148 __ shl(reg, kPointerSizeLog2);
3150 __ jmp(return_addr_reg);
3155 void LCodeGen::DoReturn(LReturn* instr) {
3156 if (FLAG_trace && info()->IsOptimizing()) {
3157 // Preserve the return value on the stack and rely on the runtime call
3158 // to return the value in the same register. We're leaving the code
3159 // managed by the register allocator and tearing down the frame, it's
3160 // safe to write to the context register.
3162 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3163 __ CallRuntime(Runtime::kTraceExit, 1);
3165 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3166 RestoreCallerDoubles();
3168 if (dynamic_frame_alignment_) {
3169 // Fetch the state of the dynamic frame alignment.
3170 __ mov(edx, Operand(ebp,
3171 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3173 int no_frame_start = -1;
3174 if (NeedsEagerFrame()) {
3177 no_frame_start = masm_->pc_offset();
3179 if (dynamic_frame_alignment_) {
3181 __ cmp(edx, Immediate(kNoAlignmentPadding));
3182 __ j(equal, &no_padding, Label::kNear);
3184 EmitReturn(instr, true);
3185 __ bind(&no_padding);
3188 EmitReturn(instr, false);
3189 if (no_frame_start != -1) {
3190 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3195 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3196 Register result = ToRegister(instr->result());
3197 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3198 if (instr->hydrogen()->RequiresHoleCheck()) {
3199 __ cmp(result, factory()->the_hole_value());
3200 DeoptimizeIf(equal, instr->environment());
3205 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3206 ASSERT(ToRegister(instr->context()).is(esi));
3207 ASSERT(ToRegister(instr->global_object()).is(edx));
3208 ASSERT(ToRegister(instr->result()).is(eax));
3210 __ mov(ecx, instr->name());
3211 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3212 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3213 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3217 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3218 Register value = ToRegister(instr->value());
3219 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3221 // If the cell we are storing to contains the hole it could have
3222 // been deleted from the property dictionary. In that case, we need
3223 // to update the property details in the property dictionary to mark
3224 // it as no longer deleted. We deoptimize in that case.
3225 if (instr->hydrogen()->RequiresHoleCheck()) {
3226 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3227 DeoptimizeIf(equal, instr->environment());
3231 __ mov(Operand::ForCell(cell_handle), value);
3232 // Cells are always rescanned, so no write barrier here.
3236 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3237 Register context = ToRegister(instr->context());
3238 Register result = ToRegister(instr->result());
3239 __ mov(result, ContextOperand(context, instr->slot_index()));
3241 if (instr->hydrogen()->RequiresHoleCheck()) {
3242 __ cmp(result, factory()->the_hole_value());
3243 if (instr->hydrogen()->DeoptimizesOnHole()) {
3244 DeoptimizeIf(equal, instr->environment());
3247 __ j(not_equal, &is_not_hole, Label::kNear);
3248 __ mov(result, factory()->undefined_value());
3249 __ bind(&is_not_hole);
3255 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3256 Register context = ToRegister(instr->context());
3257 Register value = ToRegister(instr->value());
3259 Label skip_assignment;
3261 Operand target = ContextOperand(context, instr->slot_index());
3262 if (instr->hydrogen()->RequiresHoleCheck()) {
3263 __ cmp(target, factory()->the_hole_value());
3264 if (instr->hydrogen()->DeoptimizesOnHole()) {
3265 DeoptimizeIf(equal, instr->environment());
3267 __ j(not_equal, &skip_assignment, Label::kNear);
3271 __ mov(target, value);
3272 if (instr->hydrogen()->NeedsWriteBarrier()) {
3273 SmiCheck check_needed =
3274 instr->hydrogen()->value()->IsHeapObject()
3275 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3276 Register temp = ToRegister(instr->temp());
3277 int offset = Context::SlotOffset(instr->slot_index());
3278 __ RecordWriteContextSlot(context,
3282 GetSaveFPRegsMode(),
3283 EMIT_REMEMBERED_SET,
3287 __ bind(&skip_assignment);
3291 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3292 HObjectAccess access = instr->hydrogen()->access();
3293 int offset = access.offset();
3295 if (access.IsExternalMemory()) {
3296 Register result = ToRegister(instr->result());
3297 MemOperand operand = instr->object()->IsConstantOperand()
3298 ? MemOperand::StaticVariable(ToExternalReference(
3299 LConstantOperand::cast(instr->object())))
3300 : MemOperand(ToRegister(instr->object()), offset);
3301 __ Load(result, operand, access.representation());
3305 Register object = ToRegister(instr->object());
3306 if (instr->hydrogen()->representation().IsDouble()) {
3307 if (CpuFeatures::IsSupported(SSE2)) {
3308 CpuFeatureScope scope(masm(), SSE2);
3309 XMMRegister result = ToDoubleRegister(instr->result());
3310 __ movsd(result, FieldOperand(object, offset));
3312 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3317 Register result = ToRegister(instr->result());
3318 if (!access.IsInobject()) {
3319 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3322 __ Load(result, FieldOperand(object, offset), access.representation());
3326 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3327 ASSERT(!operand->IsDoubleRegister());
3328 if (operand->IsConstantOperand()) {
3329 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3330 AllowDeferredHandleDereference smi_check;
3331 if (object->IsSmi()) {
3332 __ Push(Handle<Smi>::cast(object));
3334 __ PushHeapObject(Handle<HeapObject>::cast(object));
3336 } else if (operand->IsRegister()) {
3337 __ push(ToRegister(operand));
3339 __ push(ToOperand(operand));
3344 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3345 ASSERT(ToRegister(instr->context()).is(esi));
3346 ASSERT(ToRegister(instr->object()).is(edx));
3347 ASSERT(ToRegister(instr->result()).is(eax));
3349 __ mov(ecx, instr->name());
3350 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3351 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3355 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3356 Register function = ToRegister(instr->function());
3357 Register temp = ToRegister(instr->temp());
3358 Register result = ToRegister(instr->result());
3360 // Check that the function really is a function.
3361 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3362 DeoptimizeIf(not_equal, instr->environment());
3364 // Check whether the function has an instance prototype.
3366 __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3367 1 << Map::kHasNonInstancePrototype);
3368 __ j(not_zero, &non_instance, Label::kNear);
3370 // Get the prototype or initial map from the function.
3372 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3374 // Check that the function has a prototype or an initial map.
3375 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3376 DeoptimizeIf(equal, instr->environment());
3378 // If the function does not have an initial map, we're done.
3380 __ CmpObjectType(result, MAP_TYPE, temp);
3381 __ j(not_equal, &done, Label::kNear);
3383 // Get the prototype from the initial map.
3384 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3385 __ jmp(&done, Label::kNear);
3387 // Non-instance prototype: Fetch prototype from constructor field
3388 // in the function's map.
3389 __ bind(&non_instance);
3390 __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3397 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3398 Register result = ToRegister(instr->result());
3399 __ LoadRoot(result, instr->index());
3403 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3404 Register arguments = ToRegister(instr->arguments());
3405 Register result = ToRegister(instr->result());
3406 if (instr->length()->IsConstantOperand() &&
3407 instr->index()->IsConstantOperand()) {
3408 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3409 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3410 int index = (const_length - const_index) + 1;
3411 __ mov(result, Operand(arguments, index * kPointerSize));
3413 Register length = ToRegister(instr->length());
3414 Operand index = ToOperand(instr->index());
3415 // There are two words between the frame pointer and the last argument.
3416 // Subtracting from length accounts for one of them add one more.
3417 __ sub(length, index);
3418 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3423 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3424 Runtime::FunctionId id) {
3425 // TODO(3095996): Get rid of this. For now, we need to make the
3426 // result register contain a valid pointer because it is already
3427 // contained in the register pointer map.
3428 Register reg = ToRegister(instr->result());
3429 __ Move(reg, Immediate(0));
3431 PushSafepointRegistersScope scope(this);
3432 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3433 __ CallRuntimeSaveDoubles(id);
3434 RecordSafepointWithRegisters(
3435 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3436 __ StoreToSafepointRegisterSlot(reg, eax);
3440 void LCodeGen::HandleExternalArrayOpRequiresTemp(
3442 Representation key_representation,
3443 ElementsKind elements_kind) {
3444 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3445 int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
3446 static_cast<int>(maximal_scale_factor);
3447 if (key_representation.IsSmi()) {
3448 pre_shift_size -= kSmiTagSize;
3450 ASSERT(pre_shift_size > 0);
3451 __ shl(ToRegister(key), pre_shift_size);
3453 __ SmiUntag(ToRegister(key));
3459 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3460 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
3462 DeferredSIMD128ToTagged(LCodeGen* codegen,
3463 LInstruction* instr,
3464 Runtime::FunctionId id,
3465 const X87Stack& x87_stack)
3466 : LDeferredCode(codegen, x87_stack), instr_(instr), id_(id) { }
3467 virtual void Generate() V8_OVERRIDE {
3468 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3470 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3472 LInstruction* instr_;
3473 Runtime::FunctionId id_;
3476 LOperand* key = instr->key();
3477 ElementsKind elements_kind = instr->elements_kind();
3479 if (CpuFeatures::IsSupported(SSE2)) {
3480 CpuFeatureScope scope(masm(), SSE2);
3481 Operand operand(BuildFastArrayOperand(
3484 instr->hydrogen()->key()->representation(),
3487 instr->additional_index()));
3488 __ movups(ToSIMD128Register(instr->result()), operand);
3490 // Allocate a SIMD128 object on the heap.
3491 Register reg = ToRegister(instr->result());
3492 Register tmp = ToRegister(instr->temp());
3493 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3494 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()),
3496 if (FLAG_inline_new) {
3497 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
3498 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
3500 __ jmp(deferred->entry());
3502 __ bind(deferred->exit());
3504 // Copy the SIMD128 value from the external array to the heap object.
3505 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3506 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3507 Operand operand(BuildFastArrayOperand(
3510 instr->hydrogen()->key()->representation(),
3513 instr->additional_index()));
3514 __ mov(tmp, operand);
3515 __ mov(FieldOperand(reg, T::kValueOffset + offset), tmp);
3521 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3522 ElementsKind elements_kind = instr->elements_kind();
3523 LOperand* key = instr->key();
3524 if (!key->IsConstantOperand() &&
3525 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3527 HandleExternalArrayOpRequiresTemp(key,
3528 instr->hydrogen()->key()->representation(), elements_kind);
3531 Operand operand(BuildFastArrayOperand(
3534 instr->hydrogen()->key()->representation(),
3537 instr->additional_index()));
3538 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3539 elements_kind == FLOAT32_ELEMENTS) {
3540 if (CpuFeatures::IsSupported(SSE2)) {
3541 CpuFeatureScope scope(masm(), SSE2);
3542 XMMRegister result(ToDoubleRegister(instr->result()));
3543 __ movss(result, operand);
3544 __ cvtss2sd(result, result);
3546 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3548 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3549 elements_kind == FLOAT64_ELEMENTS) {
3550 if (CpuFeatures::IsSupported(SSE2)) {
3551 CpuFeatureScope scope(masm(), SSE2);
3552 __ movsd(ToDoubleRegister(instr->result()), operand);
3554 X87Mov(ToX87Register(instr->result()), operand);
3556 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3557 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3558 } else if (IsInt32x4ElementsKind(elements_kind)) {
3559 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3561 Register result(ToRegister(instr->result()));
3562 switch (elements_kind) {
3563 case EXTERNAL_INT8_ELEMENTS:
3565 __ movsx_b(result, operand);
3567 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3568 case EXTERNAL_UINT8_ELEMENTS:
3569 case UINT8_ELEMENTS:
3570 case UINT8_CLAMPED_ELEMENTS:
3571 __ movzx_b(result, operand);
3573 case EXTERNAL_INT16_ELEMENTS:
3574 case INT16_ELEMENTS:
3575 __ movsx_w(result, operand);
3577 case EXTERNAL_UINT16_ELEMENTS:
3578 case UINT16_ELEMENTS:
3579 __ movzx_w(result, operand);
3581 case EXTERNAL_INT32_ELEMENTS:
3582 case INT32_ELEMENTS:
3583 __ mov(result, operand);
3585 case EXTERNAL_UINT32_ELEMENTS:
3586 case UINT32_ELEMENTS:
3587 __ mov(result, operand);
3588 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3589 __ test(result, Operand(result));
3590 DeoptimizeIf(negative, instr->environment());
3593 case EXTERNAL_FLOAT32_ELEMENTS:
3594 case EXTERNAL_FLOAT64_ELEMENTS:
3595 case EXTERNAL_FLOAT32x4_ELEMENTS:
3596 case EXTERNAL_INT32x4_ELEMENTS:
3597 case FLOAT32_ELEMENTS:
3598 case FLOAT64_ELEMENTS:
3599 case FLOAT32x4_ELEMENTS:
3600 case INT32x4_ELEMENTS:
3601 case FAST_SMI_ELEMENTS:
3603 case FAST_DOUBLE_ELEMENTS:
3604 case FAST_HOLEY_SMI_ELEMENTS:
3605 case FAST_HOLEY_ELEMENTS:
3606 case FAST_HOLEY_DOUBLE_ELEMENTS:
3607 case DICTIONARY_ELEMENTS:
3608 case SLOPPY_ARGUMENTS_ELEMENTS:
3616 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3617 if (instr->hydrogen()->RequiresHoleCheck()) {
3618 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3619 sizeof(kHoleNanLower32);
3620 Operand hole_check_operand = BuildFastArrayOperand(
3621 instr->elements(), instr->key(),
3622 instr->hydrogen()->key()->representation(),
3623 FAST_DOUBLE_ELEMENTS,
3625 instr->additional_index());
3626 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3627 DeoptimizeIf(equal, instr->environment());
3630 Operand double_load_operand = BuildFastArrayOperand(
3633 instr->hydrogen()->key()->representation(),
3634 FAST_DOUBLE_ELEMENTS,
3635 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3636 instr->additional_index());
3637 if (CpuFeatures::IsSupported(SSE2)) {
3638 CpuFeatureScope scope(masm(), SSE2);
3639 XMMRegister result = ToDoubleRegister(instr->result());
3640 __ movsd(result, double_load_operand);
3642 X87Mov(ToX87Register(instr->result()), double_load_operand);
3647 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3648 Register result = ToRegister(instr->result());
3652 BuildFastArrayOperand(instr->elements(),
3654 instr->hydrogen()->key()->representation(),
3656 FixedArray::kHeaderSize - kHeapObjectTag,
3657 instr->additional_index()));
3659 // Check for the hole value.
3660 if (instr->hydrogen()->RequiresHoleCheck()) {
3661 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3662 __ test(result, Immediate(kSmiTagMask));
3663 DeoptimizeIf(not_equal, instr->environment());
3665 __ cmp(result, factory()->the_hole_value());
3666 DeoptimizeIf(equal, instr->environment());
3672 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3673 if (instr->is_typed_elements()) {
3674 DoLoadKeyedExternalArray(instr);
3675 } else if (instr->hydrogen()->representation().IsDouble()) {
3676 DoLoadKeyedFixedDoubleArray(instr);
3678 DoLoadKeyedFixedArray(instr);
3683 Operand LCodeGen::BuildFastArrayOperand(
3684 LOperand* elements_pointer,
3686 Representation key_representation,
3687 ElementsKind elements_kind,
3689 uint32_t additional_index) {
3690 Register elements_pointer_reg = ToRegister(elements_pointer);
3691 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3692 if (IsFixedTypedArrayElementsKind(elements_kind)) {
3693 offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
3695 int shift_size = element_shift_size;
3696 if (key->IsConstantOperand()) {
3697 int constant_value = ToInteger32(LConstantOperand::cast(key));
3698 if (constant_value & 0xF0000000) {
3699 Abort(kArrayIndexConstantValueTooBig);
3701 return Operand(elements_pointer_reg,
3702 ((constant_value + additional_index) << shift_size)
3705 if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
3706 // Make sure the key is pre-scaled against maximal_scale_factor.
3707 shift_size = static_cast<int>(maximal_scale_factor);
3708 } else if (key_representation.IsSmi() && (shift_size >= 1)) {
3709 // Take the tag bit into account while computing the shift size.
3710 shift_size -= kSmiTagSize;
3712 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3713 return Operand(elements_pointer_reg,
3716 offset + (additional_index << element_shift_size));
3721 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3722 ASSERT(ToRegister(instr->context()).is(esi));
3723 ASSERT(ToRegister(instr->object()).is(edx));
3724 ASSERT(ToRegister(instr->key()).is(ecx));
3726 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3727 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3731 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3732 Register result = ToRegister(instr->result());
3734 if (instr->hydrogen()->from_inlined()) {
3735 __ lea(result, Operand(esp, -2 * kPointerSize));
3737 // Check for arguments adapter frame.
3738 Label done, adapted;
3739 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3740 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3741 __ cmp(Operand(result),
3742 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3743 __ j(equal, &adapted, Label::kNear);
3745 // No arguments adaptor frame.
3746 __ mov(result, Operand(ebp));
3747 __ jmp(&done, Label::kNear);
3749 // Arguments adaptor frame present.
3751 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3753 // Result is the frame pointer for the frame if not adapted and for the real
3754 // frame below the adaptor frame if adapted.
3760 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3761 Operand elem = ToOperand(instr->elements());
3762 Register result = ToRegister(instr->result());
3766 // If no arguments adaptor frame the number of arguments is fixed.
3768 __ mov(result, Immediate(scope()->num_parameters()));
3769 __ j(equal, &done, Label::kNear);
3771 // Arguments adaptor frame present. Get argument length from there.
3772 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3773 __ mov(result, Operand(result,
3774 ArgumentsAdaptorFrameConstants::kLengthOffset));
3775 __ SmiUntag(result);
3777 // Argument length is in result register.
3782 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3783 Register receiver = ToRegister(instr->receiver());
3784 Register function = ToRegister(instr->function());
3786 // If the receiver is null or undefined, we have to pass the global
3787 // object as a receiver to normal functions. Values have to be
3788 // passed unchanged to builtins and strict-mode functions.
3789 Label receiver_ok, global_object;
3790 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3791 Register scratch = ToRegister(instr->temp());
3793 if (!instr->hydrogen()->known_function()) {
3794 // Do not transform the receiver to object for strict mode
3797 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3798 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3799 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3800 __ j(not_equal, &receiver_ok, dist);
3802 // Do not transform the receiver to object for builtins.
3803 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3804 1 << SharedFunctionInfo::kNativeBitWithinByte);
3805 __ j(not_equal, &receiver_ok, dist);
3808 // Normal function. Replace undefined or null with global receiver.
3809 __ cmp(receiver, factory()->null_value());
3810 __ j(equal, &global_object, Label::kNear);
3811 __ cmp(receiver, factory()->undefined_value());
3812 __ j(equal, &global_object, Label::kNear);
3814 // The receiver should be a JS object.
3815 __ test(receiver, Immediate(kSmiTagMask));
3816 DeoptimizeIf(equal, instr->environment());
3817 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3818 DeoptimizeIf(below, instr->environment());
3820 __ jmp(&receiver_ok, Label::kNear);
3821 __ bind(&global_object);
3822 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3823 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3824 __ mov(receiver, Operand(receiver, global_offset));
3825 const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
3826 __ mov(receiver, FieldOperand(receiver, receiver_offset));
3827 __ bind(&receiver_ok);
3831 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3832 Register receiver = ToRegister(instr->receiver());
3833 Register function = ToRegister(instr->function());
3834 Register length = ToRegister(instr->length());
3835 Register elements = ToRegister(instr->elements());
3836 ASSERT(receiver.is(eax)); // Used for parameter count.
3837 ASSERT(function.is(edi)); // Required by InvokeFunction.
3838 ASSERT(ToRegister(instr->result()).is(eax));
3840 // Copy the arguments to this function possibly from the
3841 // adaptor frame below it.
3842 const uint32_t kArgumentsLimit = 1 * KB;
3843 __ cmp(length, kArgumentsLimit);
3844 DeoptimizeIf(above, instr->environment());
3847 __ mov(receiver, length);
3849 // Loop through the arguments pushing them onto the execution
3852 // length is a small non-negative integer, due to the test above.
3853 __ test(length, Operand(length));
3854 __ j(zero, &invoke, Label::kNear);
3856 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3858 __ j(not_zero, &loop);
3860 // Invoke the function.
3862 ASSERT(instr->HasPointerMap());
3863 LPointerMap* pointers = instr->pointer_map();
3864 SafepointGenerator safepoint_generator(
3865 this, pointers, Safepoint::kLazyDeopt);
3866 ParameterCount actual(eax);
3867 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3871 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3876 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3877 LOperand* argument = instr->value();
3878 EmitPushTaggedOperand(argument);
3882 void LCodeGen::DoDrop(LDrop* instr) {
3883 __ Drop(instr->count());
3887 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3888 Register result = ToRegister(instr->result());
3889 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3893 void LCodeGen::DoContext(LContext* instr) {
3894 Register result = ToRegister(instr->result());
3895 if (info()->IsOptimizing()) {
3896 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3898 // If there is no frame, the context must be in esi.
3899 ASSERT(result.is(esi));
3904 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3905 ASSERT(ToRegister(instr->context()).is(esi));
3906 __ push(esi); // The context is the first argument.
3907 __ push(Immediate(instr->hydrogen()->pairs()));
3908 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3909 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3913 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3914 int formal_parameter_count,
3916 LInstruction* instr,
3917 EDIState edi_state) {
3918 bool dont_adapt_arguments =
3919 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3920 bool can_invoke_directly =
3921 dont_adapt_arguments || formal_parameter_count == arity;
3923 if (can_invoke_directly) {
3924 if (edi_state == EDI_UNINITIALIZED) {
3925 __ LoadHeapObject(edi, function);
3929 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3931 // Set eax to arguments count if adaption is not needed. Assumes that eax
3932 // is available to write to at this point.
3933 if (dont_adapt_arguments) {
3937 // Invoke function directly.
3938 if (function.is_identical_to(info()->closure())) {
3941 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3943 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3945 // We need to adapt arguments.
3946 LPointerMap* pointers = instr->pointer_map();
3947 SafepointGenerator generator(
3948 this, pointers, Safepoint::kLazyDeopt);
3949 ParameterCount count(arity);
3950 ParameterCount expected(formal_parameter_count);
3951 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3956 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3957 ASSERT(ToRegister(instr->result()).is(eax));
3959 LPointerMap* pointers = instr->pointer_map();
3960 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3962 if (instr->target()->IsConstantOperand()) {
3963 LConstantOperand* target = LConstantOperand::cast(instr->target());
3964 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3965 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3966 __ call(code, RelocInfo::CODE_TARGET);
3968 ASSERT(instr->target()->IsRegister());
3969 Register target = ToRegister(instr->target());
3970 generator.BeforeCall(__ CallSize(Operand(target)));
3971 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3974 generator.AfterCall();
3978 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3979 ASSERT(ToRegister(instr->function()).is(edi));
3980 ASSERT(ToRegister(instr->result()).is(eax));
3982 if (instr->hydrogen()->pass_argument_count()) {
3983 __ mov(eax, instr->arity());
3987 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3989 bool is_self_call = false;
3990 if (instr->hydrogen()->function()->IsConstant()) {
3991 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3992 Handle<JSFunction> jsfun =
3993 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3994 is_self_call = jsfun.is_identical_to(info()->closure());
4000 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
4003 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4007 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
4008 Register input_reg = ToRegister(instr->value());
4009 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4010 factory()->heap_number_map());
4011 DeoptimizeIf(not_equal, instr->environment());
4013 Label slow, allocated, done;
4014 Register tmp = input_reg.is(eax) ? ecx : eax;
4015 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
4017 // Preserve the value of all registers.
4018 PushSafepointRegistersScope scope(this);
4020 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4021 // Check the sign of the argument. If the argument is positive, just
4022 // return it. We do not need to patch the stack since |input| and
4023 // |result| are the same register and |input| will be restored
4024 // unchanged by popping safepoint registers.
4025 __ test(tmp, Immediate(HeapNumber::kSignMask));
4026 __ j(zero, &done, Label::kNear);
4028 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
4029 __ jmp(&allocated, Label::kNear);
4031 // Slow case: Call the runtime system to do the number allocation.
4033 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
4034 instr, instr->context());
4035 // Set the pointer to the new heap number in tmp.
4036 if (!tmp.is(eax)) __ mov(tmp, eax);
4037 // Restore input_reg after call to runtime.
4038 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
4040 __ bind(&allocated);
4041 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4042 __ and_(tmp2, ~HeapNumber::kSignMask);
4043 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
4044 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
4045 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
4046 __ StoreToSafepointRegisterSlot(input_reg, tmp);
4052 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
4053 Register input_reg = ToRegister(instr->value());
4054 __ test(input_reg, Operand(input_reg));
4056 __ j(not_sign, &is_positive, Label::kNear);
4057 __ neg(input_reg); // Sets flags.
4058 DeoptimizeIf(negative, instr->environment());
4059 __ bind(&is_positive);
4063 void LCodeGen::DoMathAbs(LMathAbs* instr) {
4064 // Class for deferred case.
4065 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
4067 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
4069 const X87Stack& x87_stack)
4070 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4071 virtual void Generate() V8_OVERRIDE {
4072 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
4074 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4079 ASSERT(instr->value()->Equals(instr->result()));
4080 Representation r = instr->hydrogen()->value()->representation();
4082 CpuFeatureScope scope(masm(), SSE2);
4084 XMMRegister scratch = double_scratch0();
4085 XMMRegister input_reg = ToDoubleRegister(instr->value());
4086 __ xorps(scratch, scratch);
4087 __ subsd(scratch, input_reg);
4088 __ andps(input_reg, scratch);
4089 } else if (r.IsSmiOrInteger32()) {
4090 EmitIntegerMathAbs(instr);
4091 } else { // Tagged case.
4092 DeferredMathAbsTaggedHeapNumber* deferred =
4093 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
4094 Register input_reg = ToRegister(instr->value());
4096 __ JumpIfNotSmi(input_reg, deferred->entry());
4097 EmitIntegerMathAbs(instr);
4098 __ bind(deferred->exit());
4103 void LCodeGen::DoMathFloor(LMathFloor* instr) {
4104 CpuFeatureScope scope(masm(), SSE2);
4105 XMMRegister xmm_scratch = double_scratch0();
4106 Register output_reg = ToRegister(instr->result());
4107 XMMRegister input_reg = ToDoubleRegister(instr->value());
4109 if (CpuFeatures::IsSupported(SSE4_1)) {
4110 CpuFeatureScope scope(masm(), SSE4_1);
4111 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4112 // Deoptimize on negative zero.
4114 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
4115 __ ucomisd(input_reg, xmm_scratch);
4116 __ j(not_equal, &non_zero, Label::kNear);
4117 __ movmskpd(output_reg, input_reg);
4118 __ test(output_reg, Immediate(1));
4119 DeoptimizeIf(not_zero, instr->environment());
4122 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
4123 __ cvttsd2si(output_reg, Operand(xmm_scratch));
4124 // Overflow is signalled with minint.
4125 __ cmp(output_reg, 0x1);
4126 DeoptimizeIf(overflow, instr->environment());
4128 Label negative_sign, done;
4129 // Deoptimize on unordered.
4130 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
4131 __ ucomisd(input_reg, xmm_scratch);
4132 DeoptimizeIf(parity_even, instr->environment());
4133 __ j(below, &negative_sign, Label::kNear);
4135 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4136 // Check for negative zero.
4137 Label positive_sign;
4138 __ j(above, &positive_sign, Label::kNear);
4139 __ movmskpd(output_reg, input_reg);
4140 __ test(output_reg, Immediate(1));
4141 DeoptimizeIf(not_zero, instr->environment());
4142 __ Move(output_reg, Immediate(0));
4143 __ jmp(&done, Label::kNear);
4144 __ bind(&positive_sign);
4147 // Use truncating instruction (OK because input is positive).
4148 __ cvttsd2si(output_reg, Operand(input_reg));
4149 // Overflow is signalled with minint.
4150 __ cmp(output_reg, 0x1);
4151 DeoptimizeIf(overflow, instr->environment());
4152 __ jmp(&done, Label::kNear);
4154 // Non-zero negative reaches here.
4155 __ bind(&negative_sign);
4156 // Truncate, then compare and compensate.
4157 __ cvttsd2si(output_reg, Operand(input_reg));
4158 __ Cvtsi2sd(xmm_scratch, output_reg);
4159 __ ucomisd(input_reg, xmm_scratch);
4160 __ j(equal, &done, Label::kNear);
4161 __ sub(output_reg, Immediate(1));
4162 DeoptimizeIf(overflow, instr->environment());
4169 void LCodeGen::DoMathRound(LMathRound* instr) {
4170 CpuFeatureScope scope(masm(), SSE2);
4171 Register output_reg = ToRegister(instr->result());
4172 XMMRegister input_reg = ToDoubleRegister(instr->value());
4173 XMMRegister xmm_scratch = double_scratch0();
4174 XMMRegister input_temp = ToDoubleRegister(instr->temp());
4175 ExternalReference one_half = ExternalReference::address_of_one_half();
4176 ExternalReference minus_one_half =
4177 ExternalReference::address_of_minus_one_half();
4179 Label done, round_to_zero, below_one_half, do_not_compensate;
4180 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4182 __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4183 __ ucomisd(xmm_scratch, input_reg);
4184 __ j(above, &below_one_half, Label::kNear);
4186 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
4187 __ addsd(xmm_scratch, input_reg);
4188 __ cvttsd2si(output_reg, Operand(xmm_scratch));
4189 // Overflow is signalled with minint.
4190 __ cmp(output_reg, 0x1);
4191 __ RecordComment("D2I conversion overflow");
4192 DeoptimizeIf(overflow, instr->environment());
4193 __ jmp(&done, dist);
4195 __ bind(&below_one_half);
4196 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4197 __ ucomisd(xmm_scratch, input_reg);
4198 __ j(below_equal, &round_to_zero, Label::kNear);
4200 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4201 // compare and compensate.
4202 __ movaps(input_temp, input_reg); // Do not alter input_reg.
4203 __ subsd(input_temp, xmm_scratch);
4204 __ cvttsd2si(output_reg, Operand(input_temp));
4205 // Catch minint due to overflow, and to prevent overflow when compensating.
4206 __ cmp(output_reg, 0x1);
4207 __ RecordComment("D2I conversion overflow");
4208 DeoptimizeIf(overflow, instr->environment());
4210 __ Cvtsi2sd(xmm_scratch, output_reg);
4211 __ ucomisd(xmm_scratch, input_temp);
4212 __ j(equal, &done, dist);
4213 __ sub(output_reg, Immediate(1));
4214 // No overflow because we already ruled out minint.
4215 __ jmp(&done, dist);
4217 __ bind(&round_to_zero);
4218 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4219 // we can ignore the difference between a result of -0 and +0.
4220 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4221 // If the sign is positive, we return +0.
4222 __ movmskpd(output_reg, input_reg);
4223 __ test(output_reg, Immediate(1));
4224 __ RecordComment("Minus zero");
4225 DeoptimizeIf(not_zero, instr->environment());
4227 __ Move(output_reg, Immediate(0));
4232 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4233 CpuFeatureScope scope(masm(), SSE2);
4234 XMMRegister input_reg = ToDoubleRegister(instr->value());
4235 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4236 __ sqrtsd(input_reg, input_reg);
4240 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4241 CpuFeatureScope scope(masm(), SSE2);
4242 XMMRegister xmm_scratch = double_scratch0();
4243 XMMRegister input_reg = ToDoubleRegister(instr->value());
4244 Register scratch = ToRegister(instr->temp());
4245 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4247 // Note that according to ECMA-262 15.8.2.13:
4248 // Math.pow(-Infinity, 0.5) == Infinity
4249 // Math.sqrt(-Infinity) == NaN
4251 // Check base for -Infinity. According to IEEE-754, single-precision
4252 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
4253 __ mov(scratch, 0xFF800000);
4254 __ movd(xmm_scratch, scratch);
4255 __ cvtss2sd(xmm_scratch, xmm_scratch);
4256 __ ucomisd(input_reg, xmm_scratch);
4257 // Comparing -Infinity with NaN results in "unordered", which sets the
4258 // zero flag as if both were equal. However, it also sets the carry flag.
4259 __ j(not_equal, &sqrt, Label::kNear);
4260 __ j(carry, &sqrt, Label::kNear);
4261 // If input is -Infinity, return Infinity.
4262 __ xorps(input_reg, input_reg);
4263 __ subsd(input_reg, xmm_scratch);
4264 __ jmp(&done, Label::kNear);
4268 __ xorps(xmm_scratch, xmm_scratch);
4269 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
4270 __ sqrtsd(input_reg, input_reg);
4275 void LCodeGen::DoPower(LPower* instr) {
4276 Representation exponent_type = instr->hydrogen()->right()->representation();
4277 // Having marked this as a call, we can use any registers.
4278 // Just make sure that the input/output registers are the expected ones.
4279 ASSERT(!instr->right()->IsDoubleRegister() ||
4280 ToDoubleRegister(instr->right()).is(xmm1));
4281 ASSERT(!instr->right()->IsRegister() ||
4282 ToRegister(instr->right()).is(eax));
4283 ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4284 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4286 if (exponent_type.IsSmi()) {
4287 MathPowStub stub(MathPowStub::TAGGED);
4289 } else if (exponent_type.IsTagged()) {
4291 __ JumpIfSmi(eax, &no_deopt);
4292 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
4293 DeoptimizeIf(not_equal, instr->environment());
4295 MathPowStub stub(MathPowStub::TAGGED);
4297 } else if (exponent_type.IsInteger32()) {
4298 MathPowStub stub(MathPowStub::INTEGER);
4301 ASSERT(exponent_type.IsDouble());
4302 MathPowStub stub(MathPowStub::DOUBLE);
4308 void LCodeGen::DoMathLog(LMathLog* instr) {
4309 CpuFeatureScope scope(masm(), SSE2);
4310 ASSERT(instr->value()->Equals(instr->result()));
4311 XMMRegister input_reg = ToDoubleRegister(instr->value());
4312 XMMRegister xmm_scratch = double_scratch0();
4313 Label positive, done, zero;
4314 __ xorps(xmm_scratch, xmm_scratch);
4315 __ ucomisd(input_reg, xmm_scratch);
4316 __ j(above, &positive, Label::kNear);
4317 __ j(not_carry, &zero, Label::kNear);
4318 ExternalReference nan =
4319 ExternalReference::address_of_canonical_non_hole_nan();
4320 __ movsd(input_reg, Operand::StaticVariable(nan));
4321 __ jmp(&done, Label::kNear);
4323 ExternalReference ninf =
4324 ExternalReference::address_of_negative_infinity();
4325 __ movsd(input_reg, Operand::StaticVariable(ninf));
4326 __ jmp(&done, Label::kNear);
4329 __ sub(Operand(esp), Immediate(kDoubleSize));
4330 __ movsd(Operand(esp, 0), input_reg);
4331 __ fld_d(Operand(esp, 0));
4333 __ fstp_d(Operand(esp, 0));
4334 __ movsd(input_reg, Operand(esp, 0));
4335 __ add(Operand(esp), Immediate(kDoubleSize));
4340 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4341 CpuFeatureScope scope(masm(), SSE2);
4342 Register input = ToRegister(instr->value());
4343 Register result = ToRegister(instr->result());
4344 Label not_zero_input;
4345 __ bsr(result, input);
4347 __ j(not_zero, ¬_zero_input);
4348 __ Move(result, Immediate(63)); // 63^31 == 32
4350 __ bind(¬_zero_input);
4351 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4355 void LCodeGen::DoMathExp(LMathExp* instr) {
4356 CpuFeatureScope scope(masm(), SSE2);
4357 XMMRegister input = ToDoubleRegister(instr->value());
4358 XMMRegister result = ToDoubleRegister(instr->result());
4359 XMMRegister temp0 = double_scratch0();
4360 Register temp1 = ToRegister(instr->temp1());
4361 Register temp2 = ToRegister(instr->temp2());
4363 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4367 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4368 ASSERT(ToRegister(instr->context()).is(esi));
4369 ASSERT(ToRegister(instr->function()).is(edi));
4370 ASSERT(instr->HasPointerMap());
4372 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4373 if (known_function.is_null()) {
4374 LPointerMap* pointers = instr->pointer_map();
4375 SafepointGenerator generator(
4376 this, pointers, Safepoint::kLazyDeopt);
4377 ParameterCount count(instr->arity());
4378 __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
4380 CallKnownFunction(known_function,
4381 instr->hydrogen()->formal_parameter_count(),
4384 EDI_CONTAINS_TARGET);
4389 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4390 ASSERT(ToRegister(instr->context()).is(esi));
4391 ASSERT(ToRegister(instr->function()).is(edi));
4392 ASSERT(ToRegister(instr->result()).is(eax));
4394 int arity = instr->arity();
4395 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4396 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4400 void LCodeGen::DoCallNew(LCallNew* instr) {
4401 ASSERT(ToRegister(instr->context()).is(esi));
4402 ASSERT(ToRegister(instr->constructor()).is(edi));
4403 ASSERT(ToRegister(instr->result()).is(eax));
4405 // No cell in ebx for construct type feedback in optimized code
4406 __ mov(ebx, isolate()->factory()->undefined_value());
4407 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4408 __ Move(eax, Immediate(instr->arity()));
4409 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4413 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4414 ASSERT(ToRegister(instr->context()).is(esi));
4415 ASSERT(ToRegister(instr->constructor()).is(edi));
4416 ASSERT(ToRegister(instr->result()).is(eax));
4418 __ Move(eax, Immediate(instr->arity()));
4419 __ mov(ebx, isolate()->factory()->undefined_value());
4420 ElementsKind kind = instr->hydrogen()->elements_kind();
4421 AllocationSiteOverrideMode override_mode =
4422 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4423 ? DISABLE_ALLOCATION_SITES
4426 if (instr->arity() == 0) {
4427 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4428 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4429 } else if (instr->arity() == 1) {
4431 if (IsFastPackedElementsKind(kind)) {
4433 // We might need a change here
4434 // look at the first argument
4435 __ mov(ecx, Operand(esp, 0));
4437 __ j(zero, &packed_case, Label::kNear);
4439 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4440 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4441 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4442 __ jmp(&done, Label::kNear);
4443 __ bind(&packed_case);
4446 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4447 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4450 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4451 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4456 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4457 ASSERT(ToRegister(instr->context()).is(esi));
4458 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4462 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4463 Register function = ToRegister(instr->function());
4464 Register code_object = ToRegister(instr->code_object());
4465 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4466 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4470 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4471 Register result = ToRegister(instr->result());
4472 Register base = ToRegister(instr->base_object());
4473 if (instr->offset()->IsConstantOperand()) {
4474 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4475 __ lea(result, Operand(base, ToInteger32(offset)));
4477 Register offset = ToRegister(instr->offset());
4478 __ lea(result, Operand(base, offset, times_1, 0));
4483 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4484 Representation representation = instr->representation();
4486 HObjectAccess access = instr->hydrogen()->access();
4487 int offset = access.offset();
4489 if (access.IsExternalMemory()) {
4490 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4491 MemOperand operand = instr->object()->IsConstantOperand()
4492 ? MemOperand::StaticVariable(
4493 ToExternalReference(LConstantOperand::cast(instr->object())))
4494 : MemOperand(ToRegister(instr->object()), offset);
4495 if (instr->value()->IsConstantOperand()) {
4496 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4497 __ mov(operand, Immediate(ToInteger32(operand_value)));
4499 Register value = ToRegister(instr->value());
4500 __ Store(value, operand, representation);
4505 Register object = ToRegister(instr->object());
4506 Handle<Map> transition = instr->transition();
4507 SmiCheck check_needed =
4508 instr->hydrogen()->value()->IsHeapObject()
4509 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4511 ASSERT(!(representation.IsSmi() &&
4512 instr->value()->IsConstantOperand() &&
4513 !IsSmi(LConstantOperand::cast(instr->value()))));
4514 if (representation.IsHeapObject()) {
4515 if (instr->value()->IsConstantOperand()) {
4516 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4517 if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
4518 DeoptimizeIf(no_condition, instr->environment());
4521 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4522 Register value = ToRegister(instr->value());
4523 __ test(value, Immediate(kSmiTagMask));
4524 DeoptimizeIf(zero, instr->environment());
4526 // We know that value is a smi now, so we can omit the check below.
4527 check_needed = OMIT_SMI_CHECK;
4530 } else if (representation.IsDouble()) {
4531 ASSERT(transition.is_null());
4532 ASSERT(access.IsInobject());
4533 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4534 if (CpuFeatures::IsSupported(SSE2)) {
4535 CpuFeatureScope scope(masm(), SSE2);
4536 XMMRegister value = ToDoubleRegister(instr->value());
4537 __ movsd(FieldOperand(object, offset), value);
4539 X87Register value = ToX87Register(instr->value());
4540 X87Mov(FieldOperand(object, offset), value);
4545 if (!transition.is_null()) {
4546 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4547 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4549 Register temp = ToRegister(instr->temp());
4550 Register temp_map = ToRegister(instr->temp_map());
4551 __ mov(temp_map, transition);
4552 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4553 // Update the write barrier for the map field.
4554 __ RecordWriteField(object,
4555 HeapObject::kMapOffset,
4558 GetSaveFPRegsMode(),
4559 OMIT_REMEMBERED_SET,
4565 Register write_register = object;
4566 if (!access.IsInobject()) {
4567 write_register = ToRegister(instr->temp());
4568 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4571 MemOperand operand = FieldOperand(write_register, offset);
4572 if (instr->value()->IsConstantOperand()) {
4573 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4574 if (operand_value->IsRegister()) {
4575 Register value = ToRegister(operand_value);
4576 __ Store(value, operand, representation);
4577 } else if (representation.IsInteger32()) {
4578 Immediate immediate = ToImmediate(operand_value, representation);
4579 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4580 __ mov(operand, immediate);
4582 Handle<Object> handle_value = ToHandle(operand_value);
4583 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4584 __ mov(operand, handle_value);
4587 Register value = ToRegister(instr->value());
4588 __ Store(value, operand, representation);
4591 if (instr->hydrogen()->NeedsWriteBarrier()) {
4592 Register value = ToRegister(instr->value());
4593 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4594 // Update the write barrier for the object for in-object properties.
4595 __ RecordWriteField(write_register,
4599 GetSaveFPRegsMode(),
4600 EMIT_REMEMBERED_SET,
4606 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4607 ASSERT(ToRegister(instr->context()).is(esi));
4608 ASSERT(ToRegister(instr->object()).is(edx));
4609 ASSERT(ToRegister(instr->value()).is(eax));
4611 __ mov(ecx, instr->name());
4612 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4613 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4617 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4618 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4620 __ j(NegateCondition(cc), &done, Label::kNear);
4624 DeoptimizeIf(cc, check->environment());
4629 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4630 if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
4632 if (instr->index()->IsConstantOperand()) {
4633 Immediate immediate =
4634 ToImmediate(LConstantOperand::cast(instr->index()),
4635 instr->hydrogen()->length()->representation());
4636 __ cmp(ToOperand(instr->length()), immediate);
4637 Condition condition =
4638 instr->hydrogen()->allow_equality() ? below : below_equal;
4639 ApplyCheckIf(condition, instr);
4641 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4642 Condition condition =
4643 instr->hydrogen()->allow_equality() ? above : above_equal;
4644 ApplyCheckIf(condition, instr);
4650 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4651 LOperand* key = instr->key();
4652 ElementsKind elements_kind = instr->elements_kind();
4654 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4655 CpuFeatureScope scope(masm(), SSE2);
4656 Operand operand(BuildFastArrayOperand(
4659 instr->hydrogen()->key()->representation(),
4662 instr->additional_index()));
4663 __ movups(operand, ToSIMD128Register(instr->value()));
4665 ASSERT(instr->value()->IsRegister());
4666 Register temp = ToRegister(instr->temp());
4667 Register input_reg = ToRegister(instr->value());
4668 __ test(input_reg, Immediate(kSmiTagMask));
4669 DeoptimizeIf(zero, instr->environment());
4670 __ CmpObjectType(input_reg, T::kInstanceType, temp);
4671 DeoptimizeIf(not_equal, instr->environment());
4673 // Copy the SIMD128 value from the heap object to the external array.
4674 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4675 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4676 Operand operand(BuildFastArrayOperand(
4679 instr->hydrogen()->key()->representation(),
4682 instr->additional_index()));
4683 __ mov(temp, FieldOperand(input_reg, T::kValueOffset + offset));
4684 __ mov(operand, temp);
4690 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4691 ElementsKind elements_kind = instr->elements_kind();
4692 LOperand* key = instr->key();
4693 if (!key->IsConstantOperand() &&
4694 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4696 HandleExternalArrayOpRequiresTemp(key,
4697 instr->hydrogen()->key()->representation(), elements_kind);
4700 Operand operand(BuildFastArrayOperand(
4703 instr->hydrogen()->key()->representation(),
4706 instr->additional_index()));
4707 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4708 elements_kind == FLOAT32_ELEMENTS) {
4709 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4710 CpuFeatureScope scope(masm(), SSE2);
4711 XMMRegister xmm_scratch = double_scratch0();
4712 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4713 __ movss(operand, xmm_scratch);
4718 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4719 elements_kind == FLOAT64_ELEMENTS) {
4720 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4721 CpuFeatureScope scope(masm(), SSE2);
4722 __ movsd(operand, ToDoubleRegister(instr->value()));
4724 X87Mov(operand, ToX87Register(instr->value()));
4726 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4727 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4728 } else if (IsInt32x4ElementsKind(elements_kind)) {
4729 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4731 Register value = ToRegister(instr->value());
4732 switch (elements_kind) {
4733 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4734 case EXTERNAL_UINT8_ELEMENTS:
4735 case EXTERNAL_INT8_ELEMENTS:
4736 case UINT8_ELEMENTS:
4738 case UINT8_CLAMPED_ELEMENTS:
4739 __ mov_b(operand, value);
4741 case EXTERNAL_INT16_ELEMENTS:
4742 case EXTERNAL_UINT16_ELEMENTS:
4743 case UINT16_ELEMENTS:
4744 case INT16_ELEMENTS:
4745 __ mov_w(operand, value);
4747 case EXTERNAL_INT32_ELEMENTS:
4748 case EXTERNAL_UINT32_ELEMENTS:
4749 case UINT32_ELEMENTS:
4750 case INT32_ELEMENTS:
4751 __ mov(operand, value);
4753 case EXTERNAL_FLOAT32_ELEMENTS:
4754 case EXTERNAL_FLOAT64_ELEMENTS:
4755 case EXTERNAL_FLOAT32x4_ELEMENTS:
4756 case EXTERNAL_INT32x4_ELEMENTS:
4757 case FLOAT32_ELEMENTS:
4758 case FLOAT64_ELEMENTS:
4759 case FLOAT32x4_ELEMENTS:
4760 case INT32x4_ELEMENTS:
4761 case FAST_SMI_ELEMENTS:
4763 case FAST_DOUBLE_ELEMENTS:
4764 case FAST_HOLEY_SMI_ELEMENTS:
4765 case FAST_HOLEY_ELEMENTS:
4766 case FAST_HOLEY_DOUBLE_ELEMENTS:
4767 case DICTIONARY_ELEMENTS:
4768 case SLOPPY_ARGUMENTS_ELEMENTS:
4776 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4777 ExternalReference canonical_nan_reference =
4778 ExternalReference::address_of_canonical_non_hole_nan();
4779 Operand double_store_operand = BuildFastArrayOperand(
4782 instr->hydrogen()->key()->representation(),
4783 FAST_DOUBLE_ELEMENTS,
4784 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4785 instr->additional_index());
4787 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4788 CpuFeatureScope scope(masm(), SSE2);
4789 XMMRegister value = ToDoubleRegister(instr->value());
4791 if (instr->NeedsCanonicalization()) {
4794 __ ucomisd(value, value);
4795 __ j(parity_odd, &have_value, Label::kNear); // NaN.
4797 __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4798 __ bind(&have_value);
4801 __ movsd(double_store_operand, value);
4803 // Can't use SSE2 in the serializer
4804 if (instr->hydrogen()->IsConstantHoleStore()) {
4805 // This means we should store the (double) hole. No floating point
4806 // registers required.
4807 double nan_double = FixedDoubleArray::hole_nan_as_double();
4808 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4809 int32_t lower = static_cast<int32_t>(int_val);
4810 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4812 __ mov(double_store_operand, Immediate(lower));
4813 Operand double_store_operand2 = BuildFastArrayOperand(
4816 instr->hydrogen()->key()->representation(),
4817 FAST_DOUBLE_ELEMENTS,
4818 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4819 instr->additional_index());
4820 __ mov(double_store_operand2, Immediate(upper));
4822 Label no_special_nan_handling;
4823 X87Register value = ToX87Register(instr->value());
4826 if (instr->NeedsCanonicalization()) {
4831 __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4832 __ sub(esp, Immediate(kDoubleSize));
4833 __ fst_d(MemOperand(esp, 0));
4834 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4835 Immediate(kHoleNanUpper32));
4836 __ add(esp, Immediate(kDoubleSize));
4838 __ j(not_equal, &canonicalize, Label::kNear);
4839 __ jmp(&no_special_nan_handling, Label::kNear);
4840 __ bind(&canonicalize);
4842 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4845 __ bind(&no_special_nan_handling);
4846 __ fst_d(double_store_operand);
4852 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4853 Register elements = ToRegister(instr->elements());
4854 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4856 Operand operand = BuildFastArrayOperand(
4859 instr->hydrogen()->key()->representation(),
4861 FixedArray::kHeaderSize - kHeapObjectTag,
4862 instr->additional_index());
4863 if (instr->value()->IsRegister()) {
4864 __ mov(operand, ToRegister(instr->value()));
4866 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4867 if (IsSmi(operand_value)) {
4868 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4869 __ mov(operand, immediate);
4871 ASSERT(!IsInteger32(operand_value));
4872 Handle<Object> handle_value = ToHandle(operand_value);
4873 __ mov(operand, handle_value);
4877 if (instr->hydrogen()->NeedsWriteBarrier()) {
4878 ASSERT(instr->value()->IsRegister());
4879 Register value = ToRegister(instr->value());
4880 ASSERT(!instr->key()->IsConstantOperand());
4881 SmiCheck check_needed =
4882 instr->hydrogen()->value()->IsHeapObject()
4883 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4884 // Compute address of modified element and store it into key register.
4885 __ lea(key, operand);
4886 __ RecordWrite(elements,
4889 GetSaveFPRegsMode(),
4890 EMIT_REMEMBERED_SET,
4896 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4897 // By cases...external, fast-double, fast
4898 if (instr->is_typed_elements()) {
4899 DoStoreKeyedExternalArray(instr);
4900 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4901 DoStoreKeyedFixedDoubleArray(instr);
4903 DoStoreKeyedFixedArray(instr);
4908 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4909 ASSERT(ToRegister(instr->context()).is(esi));
4910 ASSERT(ToRegister(instr->object()).is(edx));
4911 ASSERT(ToRegister(instr->key()).is(ecx));
4912 ASSERT(ToRegister(instr->value()).is(eax));
4914 Handle<Code> ic = instr->strict_mode() == STRICT
4915 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4916 : isolate()->builtins()->KeyedStoreIC_Initialize();
4917 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4921 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4922 Register object = ToRegister(instr->object());
4923 Register temp = ToRegister(instr->temp());
4924 Label no_memento_found;
4925 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4926 DeoptimizeIf(equal, instr->environment());
4927 __ bind(&no_memento_found);
4931 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4932 Register object_reg = ToRegister(instr->object());
4934 Handle<Map> from_map = instr->original_map();
4935 Handle<Map> to_map = instr->transitioned_map();
4936 ElementsKind from_kind = instr->from_kind();
4937 ElementsKind to_kind = instr->to_kind();
4939 Label not_applicable;
4940 bool is_simple_map_transition =
4941 IsSimpleMapChangeTransition(from_kind, to_kind);
4942 Label::Distance branch_distance =
4943 is_simple_map_transition ? Label::kNear : Label::kFar;
4944 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4945 __ j(not_equal, ¬_applicable, branch_distance);
4946 if (is_simple_map_transition) {
4947 Register new_map_reg = ToRegister(instr->new_map_temp());
4948 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4951 ASSERT_NE(instr->temp(), NULL);
4952 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4953 ToRegister(instr->temp()),
4956 ASSERT(ToRegister(instr->context()).is(esi));
4957 PushSafepointRegistersScope scope(this);
4958 if (!object_reg.is(eax)) {
4959 __ mov(eax, object_reg);
4961 __ mov(ebx, to_map);
4962 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4963 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4965 RecordSafepointWithRegisters(
4966 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4968 __ bind(¬_applicable);
4972 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4973 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4975 DeferredStringCharCodeAt(LCodeGen* codegen,
4976 LStringCharCodeAt* instr,
4977 const X87Stack& x87_stack)
4978 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4979 virtual void Generate() V8_OVERRIDE {
4980 codegen()->DoDeferredStringCharCodeAt(instr_);
4982 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4984 LStringCharCodeAt* instr_;
4987 DeferredStringCharCodeAt* deferred =
4988 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4990 StringCharLoadGenerator::Generate(masm(),
4992 ToRegister(instr->string()),
4993 ToRegister(instr->index()),
4994 ToRegister(instr->result()),
4996 __ bind(deferred->exit());
5000 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5001 Register string = ToRegister(instr->string());
5002 Register result = ToRegister(instr->result());
5004 // TODO(3095996): Get rid of this. For now, we need to make the
5005 // result register contain a valid pointer because it is already
5006 // contained in the register pointer map.
5007 __ Move(result, Immediate(0));
5009 PushSafepointRegistersScope scope(this);
5011 // Push the index as a smi. This is safe because of the checks in
5012 // DoStringCharCodeAt above.
5013 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
5014 if (instr->index()->IsConstantOperand()) {
5015 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
5016 Representation::Smi());
5019 Register index = ToRegister(instr->index());
5023 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
5024 instr, instr->context());
5027 __ StoreToSafepointRegisterSlot(result, eax);
5031 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5032 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
5034 DeferredStringCharFromCode(LCodeGen* codegen,
5035 LStringCharFromCode* instr,
5036 const X87Stack& x87_stack)
5037 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5038 virtual void Generate() V8_OVERRIDE {
5039 codegen()->DoDeferredStringCharFromCode(instr_);
5041 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5043 LStringCharFromCode* instr_;
5046 DeferredStringCharFromCode* deferred =
5047 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
5049 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5050 Register char_code = ToRegister(instr->char_code());
5051 Register result = ToRegister(instr->result());
5052 ASSERT(!char_code.is(result));
5054 __ cmp(char_code, String::kMaxOneByteCharCode);
5055 __ j(above, deferred->entry());
5056 __ Move(result, Immediate(factory()->single_character_string_cache()));
5057 __ mov(result, FieldOperand(result,
5058 char_code, times_pointer_size,
5059 FixedArray::kHeaderSize));
5060 __ cmp(result, factory()->undefined_value());
5061 __ j(equal, deferred->entry());
5062 __ bind(deferred->exit());
5066 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5067 Register char_code = ToRegister(instr->char_code());
5068 Register result = ToRegister(instr->result());
5070 // TODO(3095996): Get rid of this. For now, we need to make the
5071 // result register contain a valid pointer because it is already
5072 // contained in the register pointer map.
5073 __ Move(result, Immediate(0));
5075 PushSafepointRegistersScope scope(this);
5076 __ SmiTag(char_code);
5078 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5079 __ StoreToSafepointRegisterSlot(result, eax);
5083 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5084 ASSERT(ToRegister(instr->context()).is(esi));
5085 ASSERT(ToRegister(instr->left()).is(edx));
5086 ASSERT(ToRegister(instr->right()).is(eax));
5087 StringAddStub stub(instr->hydrogen()->flags(),
5088 instr->hydrogen()->pretenure_flag());
5089 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5093 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5094 LOperand* input = instr->value();
5095 LOperand* output = instr->result();
5096 ASSERT(input->IsRegister() || input->IsStackSlot());
5097 ASSERT(output->IsDoubleRegister());
5098 if (CpuFeatures::IsSupported(SSE2)) {
5099 CpuFeatureScope scope(masm(), SSE2);
5100 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
5101 } else if (input->IsRegister()) {
5102 Register input_reg = ToRegister(input);
5104 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
5107 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
5112 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5113 LOperand* input = instr->value();
5114 LOperand* output = instr->result();
5115 if (CpuFeatures::IsSupported(SSE2)) {
5116 CpuFeatureScope scope(masm(), SSE2);
5117 LOperand* temp = instr->temp();
5119 __ LoadUint32(ToDoubleRegister(output),
5121 ToDoubleRegister(temp));
5123 X87Register res = ToX87Register(output);
5124 X87PrepareToWrite(res);
5125 __ LoadUint32NoSSE2(ToRegister(input));
5126 X87CommitWrite(res);
5131 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5132 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
5134 DeferredNumberTagI(LCodeGen* codegen,
5136 const X87Stack& x87_stack)
5137 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5138 virtual void Generate() V8_OVERRIDE {
5139 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
5140 NULL, SIGNED_INT32);
5142 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5144 LNumberTagI* instr_;
5147 LOperand* input = instr->value();
5148 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5149 Register reg = ToRegister(input);
5151 DeferredNumberTagI* deferred =
5152 new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
5154 __ j(overflow, deferred->entry());
5155 __ bind(deferred->exit());
5159 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5160 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5162 DeferredNumberTagU(LCodeGen* codegen,
5164 const X87Stack& x87_stack)
5165 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5166 virtual void Generate() V8_OVERRIDE {
5167 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5168 instr_->temp2(), UNSIGNED_INT32);
5170 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5172 LNumberTagU* instr_;
5175 LOperand* input = instr->value();
5176 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5177 Register reg = ToRegister(input);
5179 DeferredNumberTagU* deferred =
5180 new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
5181 __ cmp(reg, Immediate(Smi::kMaxValue));
5182 __ j(above, deferred->entry());
5184 __ bind(deferred->exit());
5188 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
5192 IntegerSignedness signedness) {
5194 Register reg = ToRegister(value);
5195 Register tmp = ToRegister(temp1);
5196 XMMRegister xmm_scratch = double_scratch0();
5198 if (signedness == SIGNED_INT32) {
5199 // There was overflow, so bits 30 and 31 of the original integer
5200 // disagree. Try to allocate a heap number in new space and store
5201 // the value in there. If that fails, call the runtime system.
5203 __ xor_(reg, 0x80000000);
5204 if (CpuFeatures::IsSupported(SSE2)) {
5205 CpuFeatureScope feature_scope(masm(), SSE2);
5206 __ Cvtsi2sd(xmm_scratch, Operand(reg));
5209 __ fild_s(Operand(esp, 0));
5213 if (CpuFeatures::IsSupported(SSE2)) {
5214 CpuFeatureScope feature_scope(masm(), SSE2);
5215 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
5217 // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5219 __ push(Immediate(0));
5221 __ fild_d(Operand(esp, 0));
5227 if (FLAG_inline_new) {
5228 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5229 __ jmp(&done, Label::kNear);
5232 // Slow case: Call the runtime system to do the number allocation.
5235 // TODO(3095996): Put a valid pointer value in the stack slot where the
5236 // result register is stored, as this register is in the pointer map, but
5237 // contains an integer value.
5238 __ Move(reg, Immediate(0));
5240 // Preserve the value of all registers.
5241 PushSafepointRegistersScope scope(this);
5243 // NumberTagI and NumberTagD use the context from the frame, rather than
5244 // the environment's HContext or HInlinedContext value.
5245 // They only call Runtime::kHiddenAllocateHeapNumber.
5246 // The corresponding HChange instructions are added in a phase that does
5247 // not have easy access to the local context.
5248 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5249 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5250 RecordSafepointWithRegisters(
5251 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5252 __ StoreToSafepointRegisterSlot(reg, eax);
5255 // Done. Put the value in xmm_scratch into the value of the allocated heap
5258 if (CpuFeatures::IsSupported(SSE2)) {
5259 CpuFeatureScope feature_scope(masm(), SSE2);
5260 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5262 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5267 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5268 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5270 DeferredNumberTagD(LCodeGen* codegen,
5272 const X87Stack& x87_stack)
5273 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5274 virtual void Generate() V8_OVERRIDE {
5275 codegen()->DoDeferredNumberTagD(instr_);
5277 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5279 LNumberTagD* instr_;
5282 Register reg = ToRegister(instr->result());
5284 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5286 // Put the value to the top of stack
5287 X87Register src = ToX87Register(instr->value());
5288 X87LoadForUsage(src);
5291 DeferredNumberTagD* deferred =
5292 new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5293 if (FLAG_inline_new) {
5294 Register tmp = ToRegister(instr->temp());
5295 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5297 __ jmp(deferred->entry());
5299 __ bind(deferred->exit());
5301 CpuFeatureScope scope(masm(), SSE2);
5302 XMMRegister input_reg = ToDoubleRegister(instr->value());
5303 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5305 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5310 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5311 // TODO(3095996): Get rid of this. For now, we need to make the
5312 // result register contain a valid pointer because it is already
5313 // contained in the register pointer map.
5314 Register reg = ToRegister(instr->result());
5315 __ Move(reg, Immediate(0));
5317 PushSafepointRegistersScope scope(this);
5318 // NumberTagI and NumberTagD use the context from the frame, rather than
5319 // the environment's HContext or HInlinedContext value.
5320 // They only call Runtime::kHiddenAllocateHeapNumber.
5321 // The corresponding HChange instructions are added in a phase that does
5322 // not have easy access to the local context.
5323 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5324 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
5325 RecordSafepointWithRegisters(
5326 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5327 __ StoreToSafepointRegisterSlot(reg, eax);
5331 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5332 HChange* hchange = instr->hydrogen();
5333 Register input = ToRegister(instr->value());
5334 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5335 hchange->value()->CheckFlag(HValue::kUint32)) {
5336 __ test(input, Immediate(0xc0000000));
5337 DeoptimizeIf(not_zero, instr->environment());
5340 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5341 !hchange->value()->CheckFlag(HValue::kUint32)) {
5342 DeoptimizeIf(overflow, instr->environment());
5347 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5348 LOperand* input = instr->value();
5349 Register result = ToRegister(input);
5350 ASSERT(input->IsRegister() && input->Equals(instr->result()));
5351 if (instr->needs_check()) {
5352 __ test(result, Immediate(kSmiTagMask));
5353 DeoptimizeIf(not_zero, instr->environment());
5355 __ AssertSmi(result);
5357 __ SmiUntag(result);
5361 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5363 X87Register res_reg,
5364 bool can_convert_undefined_to_nan,
5365 bool deoptimize_on_minus_zero,
5367 NumberUntagDMode mode) {
5368 Label load_smi, done;
5370 X87PrepareToWrite(res_reg);
5371 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5373 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5375 // Heap number map check.
5376 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5377 factory()->heap_number_map());
5378 if (!can_convert_undefined_to_nan) {
5379 DeoptimizeIf(not_equal, env);
5381 Label heap_number, convert;
5382 __ j(equal, &heap_number, Label::kNear);
5384 // Convert undefined (or hole) to NaN.
5385 __ cmp(input_reg, factory()->undefined_value());
5386 DeoptimizeIf(not_equal, env);
5389 ExternalReference nan =
5390 ExternalReference::address_of_canonical_non_hole_nan();
5391 __ fld_d(Operand::StaticVariable(nan));
5392 __ jmp(&done, Label::kNear);
5394 __ bind(&heap_number);
5396 // Heap number to x87 conversion.
5397 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5398 if (deoptimize_on_minus_zero) {
5401 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5402 __ j(not_zero, &done, Label::kNear);
5404 // Use general purpose registers to check if we have -0.0
5405 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5406 __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5407 __ j(zero, &done, Label::kNear);
5409 // Pop FPU stack before deoptimizing.
5411 DeoptimizeIf(not_zero, env);
5413 __ jmp(&done, Label::kNear);
5415 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5419 // Clobbering a temp is faster than re-tagging the
5420 // input register since we avoid dependencies.
5421 __ mov(temp_reg, input_reg);
5422 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5424 __ fild_s(Operand(esp, 0));
5425 __ add(esp, Immediate(kPointerSize));
5427 X87CommitWrite(res_reg);
5431 void LCodeGen::EmitNumberUntagD(Register input_reg,
5433 XMMRegister result_reg,
5434 bool can_convert_undefined_to_nan,
5435 bool deoptimize_on_minus_zero,
5437 NumberUntagDMode mode) {
5438 Label convert, load_smi, done;
5440 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5442 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5444 // Heap number map check.
5445 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5446 factory()->heap_number_map());
5447 if (can_convert_undefined_to_nan) {
5448 __ j(not_equal, &convert, Label::kNear);
5450 DeoptimizeIf(not_equal, env);
5453 // Heap number to XMM conversion.
5454 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5456 if (deoptimize_on_minus_zero) {
5457 XMMRegister xmm_scratch = double_scratch0();
5458 __ xorps(xmm_scratch, xmm_scratch);
5459 __ ucomisd(result_reg, xmm_scratch);
5460 __ j(not_zero, &done, Label::kNear);
5461 __ movmskpd(temp_reg, result_reg);
5462 __ test_b(temp_reg, 1);
5463 DeoptimizeIf(not_zero, env);
5465 __ jmp(&done, Label::kNear);
5467 if (can_convert_undefined_to_nan) {
5470 // Convert undefined (and hole) to NaN.
5471 __ cmp(input_reg, factory()->undefined_value());
5472 DeoptimizeIf(not_equal, env);
5474 ExternalReference nan =
5475 ExternalReference::address_of_canonical_non_hole_nan();
5476 __ movsd(result_reg, Operand::StaticVariable(nan));
5477 __ jmp(&done, Label::kNear);
5480 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5484 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5485 // input register since we avoid dependencies.
5486 __ mov(temp_reg, input_reg);
5487 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5488 __ Cvtsi2sd(result_reg, Operand(temp_reg));
5493 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5494 Register input_reg = ToRegister(instr->value());
5496 // The input was optimistically untagged; revert it.
5497 STATIC_ASSERT(kSmiTagSize == 1);
5498 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
5500 if (instr->truncating()) {
5501 Label no_heap_number, check_bools, check_false;
5503 // Heap number map check.
5504 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5505 factory()->heap_number_map());
5506 __ j(not_equal, &no_heap_number, Label::kNear);
5507 __ TruncateHeapNumberToI(input_reg, input_reg);
5510 __ bind(&no_heap_number);
5511 // Check for Oddballs. Undefined/False is converted to zero and True to one
5512 // for truncating conversions.
5513 __ cmp(input_reg, factory()->undefined_value());
5514 __ j(not_equal, &check_bools, Label::kNear);
5515 __ Move(input_reg, Immediate(0));
5518 __ bind(&check_bools);
5519 __ cmp(input_reg, factory()->true_value());
5520 __ j(not_equal, &check_false, Label::kNear);
5521 __ Move(input_reg, Immediate(1));
5524 __ bind(&check_false);
5525 __ cmp(input_reg, factory()->false_value());
5526 __ RecordComment("Deferred TaggedToI: cannot truncate");
5527 DeoptimizeIf(not_equal, instr->environment());
5528 __ Move(input_reg, Immediate(0));
5531 XMMRegister scratch = (instr->temp() != NULL)
5532 ? ToDoubleRegister(instr->temp())
5534 __ TaggedToI(input_reg, input_reg, scratch,
5535 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5538 DeoptimizeIf(no_condition, instr->environment());
5543 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5544 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5546 DeferredTaggedToI(LCodeGen* codegen,
5548 const X87Stack& x87_stack)
5549 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5550 virtual void Generate() V8_OVERRIDE {
5551 codegen()->DoDeferredTaggedToI(instr_, done());
5553 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5558 LOperand* input = instr->value();
5559 ASSERT(input->IsRegister());
5560 Register input_reg = ToRegister(input);
5561 ASSERT(input_reg.is(ToRegister(instr->result())));
5563 if (instr->hydrogen()->value()->representation().IsSmi()) {
5564 __ SmiUntag(input_reg);
5566 DeferredTaggedToI* deferred =
5567 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5568 // Optimistically untag the input.
5569 // If the input is a HeapObject, SmiUntag will set the carry flag.
5570 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5571 __ SmiUntag(input_reg);
5572 // Branch to deferred code if the input was tagged.
5573 // The deferred code will take care of restoring the tag.
5574 __ j(carry, deferred->entry());
5575 __ bind(deferred->exit());
5580 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5581 LOperand* input = instr->value();
5582 ASSERT(input->IsRegister());
5583 LOperand* temp = instr->temp();
5584 ASSERT(temp->IsRegister());
5585 LOperand* result = instr->result();
5586 ASSERT(result->IsDoubleRegister());
5588 Register input_reg = ToRegister(input);
5589 bool deoptimize_on_minus_zero =
5590 instr->hydrogen()->deoptimize_on_minus_zero();
5591 Register temp_reg = ToRegister(temp);
5593 HValue* value = instr->hydrogen()->value();
5594 NumberUntagDMode mode = value->representation().IsSmi()
5595 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5597 if (CpuFeatures::IsSupported(SSE2)) {
5598 CpuFeatureScope scope(masm(), SSE2);
5599 XMMRegister result_reg = ToDoubleRegister(result);
5600 EmitNumberUntagD(input_reg,
5603 instr->hydrogen()->can_convert_undefined_to_nan(),
5604 deoptimize_on_minus_zero,
5605 instr->environment(),
5608 EmitNumberUntagDNoSSE2(input_reg,
5610 ToX87Register(instr->result()),
5611 instr->hydrogen()->can_convert_undefined_to_nan(),
5612 deoptimize_on_minus_zero,
5613 instr->environment(),
5619 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5620 LOperand* input = instr->value();
5621 ASSERT(input->IsDoubleRegister());
5622 LOperand* result = instr->result();
5623 ASSERT(result->IsRegister());
5624 Register result_reg = ToRegister(result);
5626 if (instr->truncating()) {
5627 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5628 CpuFeatureScope scope(masm(), SSE2);
5629 XMMRegister input_reg = ToDoubleRegister(input);
5630 __ TruncateDoubleToI(result_reg, input_reg);
5632 X87Register input_reg = ToX87Register(input);
5634 __ TruncateX87TOSToI(result_reg);
5637 Label bailout, done;
5638 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5639 CpuFeatureScope scope(masm(), SSE2);
5640 XMMRegister input_reg = ToDoubleRegister(input);
5641 XMMRegister xmm_scratch = double_scratch0();
5642 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5643 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5645 X87Register input_reg = ToX87Register(input);
5647 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5648 &bailout, Label::kNear);
5650 __ jmp(&done, Label::kNear);
5652 DeoptimizeIf(no_condition, instr->environment());
5658 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5659 LOperand* input = instr->value();
5660 ASSERT(input->IsDoubleRegister());
5661 LOperand* result = instr->result();
5662 ASSERT(result->IsRegister());
5663 Register result_reg = ToRegister(result);
5665 Label bailout, done;
5666 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5667 CpuFeatureScope scope(masm(), SSE2);
5668 XMMRegister input_reg = ToDoubleRegister(input);
5669 XMMRegister xmm_scratch = double_scratch0();
5670 __ DoubleToI(result_reg, input_reg, xmm_scratch,
5671 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5673 X87Register input_reg = ToX87Register(input);
5675 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5676 &bailout, Label::kNear);
5678 __ jmp(&done, Label::kNear);
5680 DeoptimizeIf(no_condition, instr->environment());
5683 __ SmiTag(result_reg);
5684 DeoptimizeIf(overflow, instr->environment());
5688 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5689 LOperand* input = instr->value();
5690 __ test(ToOperand(input), Immediate(kSmiTagMask));
5691 DeoptimizeIf(not_zero, instr->environment());
5695 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5696 if (!instr->hydrogen()->value()->IsHeapObject()) {
5697 LOperand* input = instr->value();
5698 __ test(ToOperand(input), Immediate(kSmiTagMask));
5699 DeoptimizeIf(zero, instr->environment());
5704 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5705 Register input = ToRegister(instr->value());
5706 Register temp = ToRegister(instr->temp());
5708 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5710 if (instr->hydrogen()->is_interval_check()) {
5713 instr->hydrogen()->GetCheckInterval(&first, &last);
5715 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5716 static_cast<int8_t>(first));
5718 // If there is only one type in the interval check for equality.
5719 if (first == last) {
5720 DeoptimizeIf(not_equal, instr->environment());
5722 DeoptimizeIf(below, instr->environment());
5723 // Omit check for the last type.
5724 if (last != LAST_TYPE) {
5725 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5726 static_cast<int8_t>(last));
5727 DeoptimizeIf(above, instr->environment());
5733 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5735 if (IsPowerOf2(mask)) {
5736 ASSERT(tag == 0 || IsPowerOf2(tag));
5737 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5738 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5740 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5741 __ and_(temp, mask);
5743 DeoptimizeIf(not_equal, instr->environment());
5749 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5750 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5751 if (instr->hydrogen()->object_in_new_space()) {
5752 Register reg = ToRegister(instr->value());
5753 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5754 __ cmp(reg, Operand::ForCell(cell));
5756 Operand operand = ToOperand(instr->value());
5757 __ cmp(operand, object);
5759 DeoptimizeIf(not_equal, instr->environment());
5763 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5765 PushSafepointRegistersScope scope(this);
5768 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5769 RecordSafepointWithRegisters(
5770 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5772 __ test(eax, Immediate(kSmiTagMask));
5774 DeoptimizeIf(zero, instr->environment());
5778 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5779 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5781 DeferredCheckMaps(LCodeGen* codegen,
5784 const X87Stack& x87_stack)
5785 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5786 SetExit(check_maps());
5788 virtual void Generate() V8_OVERRIDE {
5789 codegen()->DoDeferredInstanceMigration(instr_, object_);
5791 Label* check_maps() { return &check_maps_; }
5792 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5799 if (instr->hydrogen()->CanOmitMapChecks()) return;
5801 LOperand* input = instr->value();
5802 ASSERT(input->IsRegister());
5803 Register reg = ToRegister(input);
5805 DeferredCheckMaps* deferred = NULL;
5806 if (instr->hydrogen()->has_migration_target()) {
5807 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5808 __ bind(deferred->check_maps());
5811 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5813 for (int i = 0; i < map_set.size() - 1; i++) {
5814 Handle<Map> map = map_set.at(i).handle();
5815 __ CompareMap(reg, map);
5816 __ j(equal, &success, Label::kNear);
5819 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5820 __ CompareMap(reg, map);
5821 if (instr->hydrogen()->has_migration_target()) {
5822 __ j(not_equal, deferred->entry());
5824 DeoptimizeIf(not_equal, instr->environment());
5831 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5832 CpuFeatureScope scope(masm(), SSE2);
5833 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5834 XMMRegister xmm_scratch = double_scratch0();
5835 Register result_reg = ToRegister(instr->result());
5836 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5840 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5841 ASSERT(instr->unclamped()->Equals(instr->result()));
5842 Register value_reg = ToRegister(instr->result());
5843 __ ClampUint8(value_reg);
5847 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5848 CpuFeatureScope scope(masm(), SSE2);
5850 ASSERT(instr->unclamped()->Equals(instr->result()));
5851 Register input_reg = ToRegister(instr->unclamped());
5852 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5853 XMMRegister xmm_scratch = double_scratch0();
5854 Label is_smi, done, heap_number;
5856 __ JumpIfSmi(input_reg, &is_smi);
5858 // Check for heap number
5859 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5860 factory()->heap_number_map());
5861 __ j(equal, &heap_number, Label::kNear);
5863 // Check for undefined. Undefined is converted to zero for clamping
5865 __ cmp(input_reg, factory()->undefined_value());
5866 DeoptimizeIf(not_equal, instr->environment());
5867 __ mov(input_reg, 0);
5868 __ jmp(&done, Label::kNear);
5871 __ bind(&heap_number);
5872 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5873 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5874 __ jmp(&done, Label::kNear);
5878 __ SmiUntag(input_reg);
5879 __ ClampUint8(input_reg);
5884 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5885 Register input_reg = ToRegister(instr->unclamped());
5886 Register result_reg = ToRegister(instr->result());
5887 Register scratch = ToRegister(instr->scratch());
5888 Register scratch2 = ToRegister(instr->scratch2());
5889 Register scratch3 = ToRegister(instr->scratch3());
5890 Label is_smi, done, heap_number, valid_exponent,
5891 largest_value, zero_result, maybe_nan_or_infinity;
5893 __ JumpIfSmi(input_reg, &is_smi);
5895 // Check for heap number
5896 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5897 factory()->heap_number_map());
5898 __ j(equal, &heap_number, Label::kNear);
5900 // Check for undefined. Undefined is converted to zero for clamping
5902 __ cmp(input_reg, factory()->undefined_value());
5903 DeoptimizeIf(not_equal, instr->environment());
5904 __ jmp(&zero_result, Label::kNear);
5907 __ bind(&heap_number);
5909 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5910 // faster than the x86 FPU built-in instruction, especially since "banker's
5911 // rounding" would be additionally very expensive
5913 // Get exponent word.
5914 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5915 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5917 // Test for negative values --> clamp to zero
5918 __ test(scratch, scratch);
5919 __ j(negative, &zero_result, Label::kNear);
5921 // Get exponent alone in scratch2.
5922 __ mov(scratch2, scratch);
5923 __ and_(scratch2, HeapNumber::kExponentMask);
5924 __ shr(scratch2, HeapNumber::kExponentShift);
5925 __ j(zero, &zero_result, Label::kNear);
5926 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5927 __ j(negative, &zero_result, Label::kNear);
5929 const uint32_t non_int8_exponent = 7;
5930 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5931 // If the exponent is too big, check for special values.
5932 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5934 __ bind(&valid_exponent);
5935 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5936 // < 7. The shift bias is the number of bits to shift the mantissa such that
5937 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5938 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5940 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5941 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5942 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5943 // top bits of the mantissa.
5944 __ and_(scratch, HeapNumber::kMantissaMask);
5945 // Put back the implicit 1 of the mantissa
5946 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5947 // Shift up to round
5949 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5950 // use the bit in the "ones" place and add it to the "halves" place, which has
5951 // the effect of rounding to even.
5952 __ mov(scratch2, scratch);
5953 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5954 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5955 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5956 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5958 __ j(less, &no_round, Label::kNear);
5960 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5961 __ j(greater, &round_up, Label::kNear);
5962 __ test(scratch3, scratch3);
5963 __ j(not_zero, &round_up, Label::kNear);
5964 __ mov(scratch2, scratch);
5965 __ and_(scratch2, Immediate(1 << one_bit_shift));
5966 __ shr(scratch2, 1);
5968 __ add(scratch, scratch2);
5969 __ j(overflow, &largest_value, Label::kNear);
5971 __ shr(scratch, 23);
5972 __ mov(result_reg, scratch);
5973 __ jmp(&done, Label::kNear);
5975 __ bind(&maybe_nan_or_infinity);
5976 // Check for NaN/Infinity, all other values map to 255
5977 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5978 __ j(not_equal, &largest_value, Label::kNear);
5980 // Check for NaN, which differs from Infinity in that at least one mantissa
5982 __ and_(scratch, HeapNumber::kMantissaMask);
5983 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5984 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5985 // Infinity -> Fall through to map to 255.
5987 __ bind(&largest_value);
5988 __ mov(result_reg, Immediate(255));
5989 __ jmp(&done, Label::kNear);
5991 __ bind(&zero_result);
5992 __ xor_(result_reg, result_reg);
5993 __ jmp(&done, Label::kNear);
5997 if (!input_reg.is(result_reg)) {
5998 __ mov(result_reg, input_reg);
6000 __ SmiUntag(result_reg);
6001 __ ClampUint8(result_reg);
6006 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
6007 CpuFeatureScope scope(masm(), SSE2);
6008 XMMRegister value_reg = ToDoubleRegister(instr->value());
6009 Register result_reg = ToRegister(instr->result());
6010 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
6011 if (CpuFeatures::IsSupported(SSE4_1)) {
6012 CpuFeatureScope scope2(masm(), SSE4_1);
6013 __ pextrd(result_reg, value_reg, 1);
6015 XMMRegister xmm_scratch = double_scratch0();
6016 __ pshufd(xmm_scratch, value_reg, 1);
6017 __ movd(result_reg, xmm_scratch);
6020 __ movd(result_reg, value_reg);
6025 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
6026 Register hi_reg = ToRegister(instr->hi());
6027 Register lo_reg = ToRegister(instr->lo());
6028 XMMRegister result_reg = ToDoubleRegister(instr->result());
6029 CpuFeatureScope scope(masm(), SSE2);
6031 if (CpuFeatures::IsSupported(SSE4_1)) {
6032 CpuFeatureScope scope2(masm(), SSE4_1);
6033 __ movd(result_reg, lo_reg);
6034 __ pinsrd(result_reg, hi_reg, 1);
6036 XMMRegister xmm_scratch = double_scratch0();
6037 __ movd(result_reg, hi_reg);
6038 __ psllq(result_reg, 32);
6039 __ movd(xmm_scratch, lo_reg);
6040 __ orps(result_reg, xmm_scratch);
6045 void LCodeGen::DoAllocate(LAllocate* instr) {
6046 class DeferredAllocate V8_FINAL : public LDeferredCode {
6048 DeferredAllocate(LCodeGen* codegen,
6050 const X87Stack& x87_stack)
6051 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6052 virtual void Generate() V8_OVERRIDE {
6053 codegen()->DoDeferredAllocate(instr_);
6055 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6060 DeferredAllocate* deferred =
6061 new(zone()) DeferredAllocate(this, instr, x87_stack_);
6063 Register result = ToRegister(instr->result());
6064 Register temp = ToRegister(instr->temp());
6066 // Allocate memory for the object.
6067 AllocationFlags flags = TAG_OBJECT;
6068 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
6069 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
6071 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6072 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6073 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6074 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
6075 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6076 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6077 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
6080 if (instr->size()->IsConstantOperand()) {
6081 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6082 if (size <= Page::kMaxRegularHeapObjectSize) {
6083 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6085 __ jmp(deferred->entry());
6088 Register size = ToRegister(instr->size());
6089 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
6092 __ bind(deferred->exit());
6094 if (instr->hydrogen()->MustPrefillWithFiller()) {
6095 if (instr->size()->IsConstantOperand()) {
6096 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6097 __ mov(temp, (size / kPointerSize) - 1);
6099 temp = ToRegister(instr->size());
6100 __ shr(temp, kPointerSizeLog2);
6105 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
6106 isolate()->factory()->one_pointer_filler_map());
6108 __ j(not_zero, &loop);
6113 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
6114 Register result = ToRegister(instr->result());
6116 // TODO(3095996): Get rid of this. For now, we need to make the
6117 // result register contain a valid pointer because it is already
6118 // contained in the register pointer map.
6119 __ Move(result, Immediate(Smi::FromInt(0)));
6121 PushSafepointRegistersScope scope(this);
6122 if (instr->size()->IsRegister()) {
6123 Register size = ToRegister(instr->size());
6124 ASSERT(!size.is(result));
6125 __ SmiTag(ToRegister(instr->size()));
6128 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
6129 __ push(Immediate(Smi::FromInt(size)));
6132 int flags = AllocateDoubleAlignFlag::encode(
6133 instr->hydrogen()->MustAllocateDoubleAligned());
6134 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
6135 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
6136 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6137 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
6138 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
6139 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
6140 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
6142 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
6144 __ push(Immediate(Smi::FromInt(flags)));
6146 CallRuntimeFromDeferred(
6147 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
6148 __ StoreToSafepointRegisterSlot(result, eax);
6152 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
6153 ASSERT(ToRegister(instr->value()).is(eax));
6155 CallRuntime(Runtime::kToFastProperties, 1, instr);
6159 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
6160 ASSERT(ToRegister(instr->context()).is(esi));
6162 // Registers will be used as follows:
6163 // ecx = literals array.
6164 // ebx = regexp literal.
6165 // eax = regexp literal clone.
6167 int literal_offset =
6168 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6169 __ LoadHeapObject(ecx, instr->hydrogen()->literals());
6170 __ mov(ebx, FieldOperand(ecx, literal_offset));
6171 __ cmp(ebx, factory()->undefined_value());
6172 __ j(not_equal, &materialized, Label::kNear);
6174 // Create regexp literal using runtime function
6175 // Result will be in eax.
6177 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
6178 __ push(Immediate(instr->hydrogen()->pattern()));
6179 __ push(Immediate(instr->hydrogen()->flags()));
6180 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
6183 __ bind(&materialized);
6184 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6185 Label allocated, runtime_allocate;
6186 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
6187 __ jmp(&allocated, Label::kNear);
6189 __ bind(&runtime_allocate);
6191 __ push(Immediate(Smi::FromInt(size)));
6192 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
6195 __ bind(&allocated);
6196 // Copy the content into the newly allocated memory.
6197 // (Unroll copy loop once for better throughput).
6198 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6199 __ mov(edx, FieldOperand(ebx, i));
6200 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
6201 __ mov(FieldOperand(eax, i), edx);
6202 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
6204 if ((size % (2 * kPointerSize)) != 0) {
6205 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
6206 __ mov(FieldOperand(eax, size - kPointerSize), edx);
6211 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6212 ASSERT(ToRegister(instr->context()).is(esi));
6213 // Use the fast case closure allocation code that allocates in new
6214 // space for nested functions that don't need literals cloning.
6215 bool pretenure = instr->hydrogen()->pretenure();
6216 if (!pretenure && instr->hydrogen()->has_no_literals()) {
6217 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
6218 instr->hydrogen()->is_generator());
6219 __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
6220 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6223 __ push(Immediate(instr->hydrogen()->shared_info()));
6224 __ push(Immediate(pretenure ? factory()->true_value()
6225 : factory()->false_value()));
6226 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
6231 void LCodeGen::DoTypeof(LTypeof* instr) {
6232 ASSERT(ToRegister(instr->context()).is(esi));
6233 LOperand* input = instr->value();
6234 EmitPushTaggedOperand(input);
6235 CallRuntime(Runtime::kTypeof, 1, instr);
6239 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6240 Register input = ToRegister(instr->value());
6241 Condition final_branch_condition = EmitTypeofIs(instr, input);
6242 if (final_branch_condition != no_condition) {
6243 EmitBranch(instr, final_branch_condition);
6248 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6249 Label* true_label = instr->TrueLabel(chunk_);
6250 Label* false_label = instr->FalseLabel(chunk_);
6251 Handle<String> type_name = instr->type_literal();
6252 int left_block = instr->TrueDestination(chunk_);
6253 int right_block = instr->FalseDestination(chunk_);
6254 int next_block = GetNextEmittedBlock();
6256 Label::Distance true_distance = left_block == next_block ? Label::kNear
6258 Label::Distance false_distance = right_block == next_block ? Label::kNear
6260 Condition final_branch_condition = no_condition;
6261 if (type_name->Equals(heap()->number_string())) {
6262 __ JumpIfSmi(input, true_label, true_distance);
6263 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
6264 factory()->heap_number_map());
6265 final_branch_condition = equal;
6267 } else if (type_name->Equals(heap()->float32x4_string())) {
6268 __ JumpIfSmi(input, false_label, false_distance);
6269 __ CmpObjectType(input, FLOAT32x4_TYPE, input);
6270 final_branch_condition = equal;
6272 } else if (type_name->Equals(heap()->int32x4_string())) {
6273 __ JumpIfSmi(input, false_label, false_distance);
6274 __ CmpObjectType(input, INT32x4_TYPE, input);
6275 final_branch_condition = equal;
6277 } else if (type_name->Equals(heap()->string_string())) {
6278 __ JumpIfSmi(input, false_label, false_distance);
6279 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6280 __ j(above_equal, false_label, false_distance);
6281 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6282 1 << Map::kIsUndetectable);
6283 final_branch_condition = zero;
6285 } else if (type_name->Equals(heap()->symbol_string())) {
6286 __ JumpIfSmi(input, false_label, false_distance);
6287 __ CmpObjectType(input, SYMBOL_TYPE, input);
6288 final_branch_condition = equal;
6290 } else if (type_name->Equals(heap()->boolean_string())) {
6291 __ cmp(input, factory()->true_value());
6292 __ j(equal, true_label, true_distance);
6293 __ cmp(input, factory()->false_value());
6294 final_branch_condition = equal;
6296 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6297 __ cmp(input, factory()->null_value());
6298 final_branch_condition = equal;
6300 } else if (type_name->Equals(heap()->undefined_string())) {
6301 __ cmp(input, factory()->undefined_value());
6302 __ j(equal, true_label, true_distance);
6303 __ JumpIfSmi(input, false_label, false_distance);
6304 // Check for undetectable objects => true.
6305 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
6306 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6307 1 << Map::kIsUndetectable);
6308 final_branch_condition = not_zero;
6310 } else if (type_name->Equals(heap()->function_string())) {
6311 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6312 __ JumpIfSmi(input, false_label, false_distance);
6313 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6314 __ j(equal, true_label, true_distance);
6315 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6316 final_branch_condition = equal;
6318 } else if (type_name->Equals(heap()->object_string())) {
6319 __ JumpIfSmi(input, false_label, false_distance);
6320 if (!FLAG_harmony_typeof) {
6321 __ cmp(input, factory()->null_value());
6322 __ j(equal, true_label, true_distance);
6324 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6325 __ j(below, false_label, false_distance);
6326 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6327 __ j(above, false_label, false_distance);
6328 // Check for undetectable objects => false.
6329 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6330 1 << Map::kIsUndetectable);
6331 final_branch_condition = zero;
6334 __ jmp(false_label, false_distance);
6336 return final_branch_condition;
6340 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6341 Register temp = ToRegister(instr->temp());
6343 EmitIsConstructCall(temp);
6344 EmitBranch(instr, equal);
6348 void LCodeGen::EmitIsConstructCall(Register temp) {
6349 // Get the frame pointer for the calling frame.
6350 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6352 // Skip the arguments adaptor frame if it exists.
6353 Label check_frame_marker;
6354 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6355 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6356 __ j(not_equal, &check_frame_marker, Label::kNear);
6357 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6359 // Check the marker in the calling frame.
6360 __ bind(&check_frame_marker);
6361 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6362 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6366 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6367 if (!info()->IsStub()) {
6368 // Ensure that we have enough space after the previous lazy-bailout
6369 // instruction for patching the code here.
6370 int current_pc = masm()->pc_offset();
6371 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6372 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6373 __ Nop(padding_size);
6376 last_lazy_deopt_pc_ = masm()->pc_offset();
6380 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6381 last_lazy_deopt_pc_ = masm()->pc_offset();
6382 ASSERT(instr->HasEnvironment());
6383 LEnvironment* env = instr->environment();
6384 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6385 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6389 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6390 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6391 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6392 // needed return address), even though the implementation of LAZY and EAGER is
6393 // now identical. When LAZY is eventually completely folded into EAGER, remove
6394 // the special case below.
6395 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6396 type = Deoptimizer::LAZY;
6398 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6399 DeoptimizeIf(no_condition, instr->environment(), type);
6403 void LCodeGen::DoDummy(LDummy* instr) {
6404 // Nothing to see here, move on!
6408 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6409 // Nothing to see here, move on!
6413 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6414 PushSafepointRegistersScope scope(this);
6415 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6416 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
6417 RecordSafepointWithLazyDeopt(
6418 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6419 ASSERT(instr->HasEnvironment());
6420 LEnvironment* env = instr->environment();
6421 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6425 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6426 class DeferredStackCheck V8_FINAL : public LDeferredCode {
6428 DeferredStackCheck(LCodeGen* codegen,
6430 const X87Stack& x87_stack)
6431 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6432 virtual void Generate() V8_OVERRIDE {
6433 codegen()->DoDeferredStackCheck(instr_);
6435 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6437 LStackCheck* instr_;
6440 ASSERT(instr->HasEnvironment());
6441 LEnvironment* env = instr->environment();
6442 // There is no LLazyBailout instruction for stack-checks. We have to
6443 // prepare for lazy deoptimization explicitly here.
6444 if (instr->hydrogen()->is_function_entry()) {
6445 // Perform stack overflow check.
6447 ExternalReference stack_limit =
6448 ExternalReference::address_of_stack_limit(isolate());
6449 __ cmp(esp, Operand::StaticVariable(stack_limit));
6450 __ j(above_equal, &done, Label::kNear);
6452 ASSERT(instr->context()->IsRegister());
6453 ASSERT(ToRegister(instr->context()).is(esi));
6454 CallCode(isolate()->builtins()->StackCheck(),
6455 RelocInfo::CODE_TARGET,
6459 ASSERT(instr->hydrogen()->is_backwards_branch());
6460 // Perform stack overflow check if this goto needs it before jumping.
6461 DeferredStackCheck* deferred_stack_check =
6462 new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6463 ExternalReference stack_limit =
6464 ExternalReference::address_of_stack_limit(isolate());
6465 __ cmp(esp, Operand::StaticVariable(stack_limit));
6466 __ j(below, deferred_stack_check->entry());
6467 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6468 __ bind(instr->done_label());
6469 deferred_stack_check->SetExit(instr->done_label());
6470 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6471 // Don't record a deoptimization index for the safepoint here.
6472 // This will be done explicitly when emitting call and the safepoint in
6473 // the deferred code.
6478 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6479 // This is a pseudo-instruction that ensures that the environment here is
6480 // properly registered for deoptimization and records the assembler's PC
6482 LEnvironment* environment = instr->environment();
6484 // If the environment were already registered, we would have no way of
6485 // backpatching it with the spill slot operands.
6486 ASSERT(!environment->HasBeenRegistered());
6487 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6489 GenerateOsrPrologue();
6493 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6494 ASSERT(ToRegister(instr->context()).is(esi));
6495 __ cmp(eax, isolate()->factory()->undefined_value());
6496 DeoptimizeIf(equal, instr->environment());
6498 __ cmp(eax, isolate()->factory()->null_value());
6499 DeoptimizeIf(equal, instr->environment());
6501 __ test(eax, Immediate(kSmiTagMask));
6502 DeoptimizeIf(zero, instr->environment());
6504 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6505 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6506 DeoptimizeIf(below_equal, instr->environment());
6508 Label use_cache, call_runtime;
6509 __ CheckEnumCache(&call_runtime);
6511 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
6512 __ jmp(&use_cache, Label::kNear);
6514 // Get the set of properties to enumerate.
6515 __ bind(&call_runtime);
6517 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6519 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6520 isolate()->factory()->meta_map());
6521 DeoptimizeIf(not_equal, instr->environment());
6522 __ bind(&use_cache);
6526 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6527 Register map = ToRegister(instr->map());
6528 Register result = ToRegister(instr->result());
6529 Label load_cache, done;
6530 __ EnumLength(result, map);
6531 __ cmp(result, Immediate(Smi::FromInt(0)));
6532 __ j(not_equal, &load_cache, Label::kNear);
6533 __ mov(result, isolate()->factory()->empty_fixed_array());
6534 __ jmp(&done, Label::kNear);
6536 __ bind(&load_cache);
6537 __ LoadInstanceDescriptors(map, result);
6539 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6541 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6543 __ test(result, result);
6544 DeoptimizeIf(equal, instr->environment());
6548 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6549 Register object = ToRegister(instr->value());
6550 __ cmp(ToRegister(instr->map()),
6551 FieldOperand(object, HeapObject::kMapOffset));
6552 DeoptimizeIf(not_equal, instr->environment());
6556 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6557 Register object = ToRegister(instr->object());
6558 Register index = ToRegister(instr->index());
6560 Label out_of_object, done;
6561 __ cmp(index, Immediate(0));
6562 __ j(less, &out_of_object, Label::kNear);
6563 __ mov(object, FieldOperand(object,
6565 times_half_pointer_size,
6566 JSObject::kHeaderSize));
6567 __ jmp(&done, Label::kNear);
6569 __ bind(&out_of_object);
6570 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6572 // Index is now equal to out of object property index plus 1.
6573 __ mov(object, FieldOperand(object,
6575 times_half_pointer_size,
6576 FixedArray::kHeaderSize - kPointerSize));
6582 void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
6583 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
6585 DeferredSIMD128ToTagged(LCodeGen* codegen,
6586 LInstruction* instr,
6587 Runtime::FunctionId id,
6588 const X87Stack& x87_stack)
6589 : LDeferredCode(codegen, x87_stack), instr_(instr), id_(id) { }
6590 virtual void Generate() V8_OVERRIDE {
6591 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
6593 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6595 LInstruction* instr_;
6596 Runtime::FunctionId id_;
6599 CpuFeatureScope scope(masm(), SSE2);
6600 XMMRegister input_reg = ToSIMD128Register(instr->value());
6601 Register reg = ToRegister(instr->result());
6602 Register tmp = ToRegister(instr->temp());
6604 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
6605 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()),
6607 if (FLAG_inline_new) {
6608 __ AllocateSIMDHeapObject(T::kSize, reg, tmp, deferred->entry(),
6609 static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
6611 __ jmp(deferred->entry());
6613 __ bind(deferred->exit());
6614 __ movups(FieldOperand(reg, T::kValueOffset), input_reg);
6618 void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
6619 if (instr->value()->IsFloat32x4Register()) {
6620 HandleSIMD128ToTagged<Float32x4>(instr);
6622 ASSERT(instr->value()->IsInt32x4Register());
6623 HandleSIMD128ToTagged<Int32x4>(instr);
6629 void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
6630 LOperand* input = instr->value();
6631 ASSERT(input->IsRegister());
6632 LOperand* result = instr->result();
6633 ASSERT(result->IsSIMD128Register());
6635 Register input_reg = ToRegister(input);
6636 Register temp_reg = ToRegister(instr->temp());
6637 XMMRegister result_reg = ToSIMD128Register(result);
6639 CpuFeatureScope scope(masm(), SSE2);
6640 __ test(input_reg, Immediate(kSmiTagMask));
6641 DeoptimizeIf(zero, instr->environment());
6642 __ CmpObjectType(input_reg, T::kInstanceType, temp_reg);
6643 DeoptimizeIf(not_equal, instr->environment());
6644 __ movups(result_reg, FieldOperand(input_reg, T::kValueOffset));
6648 void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
6649 if (instr->representation().IsFloat32x4()) {
6650 HandleTaggedToSIMD128<Float32x4>(instr);
6652 ASSERT(instr->representation().IsInt32x4());
6653 HandleTaggedToSIMD128<Int32x4>(instr);
6658 void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
6659 CpuFeatureScope scope(masm(), SSE2);
6660 switch (instr->op()) {
6661 case kFloat32x4Zero: {
6662 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6663 __ xorps(result_reg, result_reg);
6666 case kInt32x4Zero: {
6667 XMMRegister result_reg = ToInt32x4Register(instr->result());
6668 __ xorps(result_reg, result_reg);
6678 void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
6679 CpuFeatureScope scope(masm(), SSE2);
6681 switch (instr->op()) {
6682 case kSIMD128Change: {
6683 Comment(";;; deoptimize: can not perform representation change"
6684 "for float32x4 or int32x4");
6685 DeoptimizeIf(no_condition, instr->environment());
6690 case kFloat32x4Reciprocal:
6691 case kFloat32x4ReciprocalSqrt:
6692 case kFloat32x4Sqrt: {
6693 ASSERT(instr->value()->Equals(instr->result()));
6694 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6695 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6696 switch (instr->op()) {
6698 __ absps(input_reg);
6701 __ negateps(input_reg);
6703 case kFloat32x4Reciprocal:
6704 __ rcpps(input_reg, input_reg);
6706 case kFloat32x4ReciprocalSqrt:
6707 __ rsqrtps(input_reg, input_reg);
6709 case kFloat32x4Sqrt:
6710 __ sqrtps(input_reg, input_reg);
6720 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6721 XMMRegister input_reg = ToInt32x4Register(instr->value());
6722 switch (instr->op()) {
6724 __ notps(input_reg);
6727 __ pnegd(input_reg);
6735 case kFloat32x4BitsToInt32x4:
6736 case kFloat32x4ToInt32x4: {
6737 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6738 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6739 XMMRegister result_reg = ToInt32x4Register(instr->result());
6740 if (instr->op() == kFloat32x4BitsToInt32x4) {
6741 if (!result_reg.is(input_reg)) {
6742 __ movaps(result_reg, input_reg);
6745 ASSERT(instr->op() == kFloat32x4ToInt32x4);
6746 __ cvtps2dq(result_reg, input_reg);
6750 case kInt32x4BitsToFloat32x4:
6751 case kInt32x4ToFloat32x4: {
6752 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6753 XMMRegister input_reg = ToInt32x4Register(instr->value());
6754 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6755 if (instr->op() == kInt32x4BitsToFloat32x4) {
6756 if (!result_reg.is(input_reg)) {
6757 __ movaps(result_reg, input_reg);
6760 ASSERT(instr->op() == kInt32x4ToFloat32x4);
6761 __ cvtdq2ps(result_reg, input_reg);
6765 case kFloat32x4Splat: {
6766 ASSERT(instr->hydrogen()->value()->representation().IsDouble());
6767 XMMRegister input_reg = ToDoubleRegister(instr->value());
6768 XMMRegister result_reg = ToFloat32x4Register(instr->result());
6769 XMMRegister xmm_scratch = xmm0;
6770 __ xorps(xmm_scratch, xmm_scratch);
6771 __ cvtsd2ss(xmm_scratch, input_reg);
6772 __ shufps(xmm_scratch, xmm_scratch, 0x0);
6773 __ movaps(result_reg, xmm_scratch);
6776 case kInt32x4Splat: {
6777 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
6778 Register input_reg = ToRegister(instr->value());
6779 XMMRegister result_reg = ToInt32x4Register(instr->result());
6780 __ movd(result_reg, input_reg);
6781 __ shufps(result_reg, result_reg, 0x0);
6784 case kInt32x4GetSignMask: {
6785 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6786 XMMRegister input_reg = ToInt32x4Register(instr->value());
6787 Register result = ToRegister(instr->result());
6788 __ movmskps(result, input_reg);
6791 case kFloat32x4GetSignMask: {
6792 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6793 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6794 Register result = ToRegister(instr->result());
6795 __ movmskps(result, input_reg);
6798 case kFloat32x4GetW:
6800 case kFloat32x4GetZ:
6802 case kFloat32x4GetY:
6804 case kFloat32x4GetX: {
6805 ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
6806 XMMRegister input_reg = ToFloat32x4Register(instr->value());
6807 XMMRegister result = ToDoubleRegister(instr->result());
6808 XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
6810 if (select == 0x0) {
6811 __ xorps(xmm_scratch, xmm_scratch);
6812 __ cvtss2sd(xmm_scratch, input_reg);
6813 if (!xmm_scratch.is(result)) {
6814 __ movaps(result, xmm_scratch);
6817 __ pshufd(xmm_scratch, input_reg, select);
6818 if (!xmm_scratch.is(result)) {
6819 __ xorps(result, result);
6821 __ cvtss2sd(result, xmm_scratch);
6829 case kInt32x4GetFlagX:
6830 case kInt32x4GetFlagY:
6831 case kInt32x4GetFlagZ:
6832 case kInt32x4GetFlagW: {
6833 ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
6835 switch (instr->op()) {
6836 case kInt32x4GetFlagX:
6840 case kInt32x4GetFlagY:
6845 case kInt32x4GetFlagZ:
6850 case kInt32x4GetFlagW:
6859 XMMRegister input_reg = ToInt32x4Register(instr->value());
6860 Register result = ToRegister(instr->result());
6861 if (select == 0x0) {
6862 __ movd(result, input_reg);
6864 if (CpuFeatures::IsSupported(SSE4_1)) {
6865 CpuFeatureScope scope(masm(), SSE4_1);
6866 __ extractps(result, input_reg, select);
6868 XMMRegister xmm_scratch = xmm0;
6869 __ pshufd(xmm_scratch, input_reg, select);
6870 __ movd(result, xmm_scratch);
6875 Label false_value, done;
6876 __ test(result, result);
6877 __ j(zero, &false_value, Label::kNear);
6878 __ LoadRoot(result, Heap::kTrueValueRootIndex);
6879 __ jmp(&done, Label::kNear);
6880 __ bind(&false_value);
6881 __ LoadRoot(result, Heap::kFalseValueRootIndex);
6893 void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
6894 CpuFeatureScope scope(masm(), SSE2);
6895 uint8_t imm8 = 0; // for with operation
6896 switch (instr->op()) {
6902 case kFloat32x4Max: {
6903 ASSERT(instr->left()->Equals(instr->result()));
6904 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6905 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
6906 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6907 XMMRegister right_reg = ToFloat32x4Register(instr->right());
6908 switch (instr->op()) {
6910 __ addps(left_reg, right_reg);
6913 __ subps(left_reg, right_reg);
6916 __ mulps(left_reg, right_reg);
6919 __ divps(left_reg, right_reg);
6922 __ minps(left_reg, right_reg);
6925 __ maxps(left_reg, right_reg);
6933 case kFloat32x4Scale: {
6934 ASSERT(instr->left()->Equals(instr->result()));
6935 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6936 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
6937 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6938 XMMRegister right_reg = ToDoubleRegister(instr->right());
6939 XMMRegister scratch_reg = xmm0;
6940 __ xorps(scratch_reg, scratch_reg);
6941 __ cvtsd2ss(scratch_reg, right_reg);
6942 __ shufps(scratch_reg, scratch_reg, 0x0);
6943 __ mulps(left_reg, scratch_reg);
6946 case kFloat32x4Shuffle: {
6947 ASSERT(instr->left()->Equals(instr->result()));
6948 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
6949 if (instr->hydrogen()->right()->IsConstant() &&
6950 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6951 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6952 uint8_t select = static_cast<uint8_t>(value & 0xFF);
6953 XMMRegister left_reg = ToFloat32x4Register(instr->left());
6954 __ shufps(left_reg, left_reg, select);
6957 Comment(";;; deoptimize: non-constant selector for shuffle");
6958 DeoptimizeIf(no_condition, instr->environment());
6962 case kInt32x4Shuffle: {
6963 ASSERT(instr->left()->Equals(instr->result()));
6964 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
6965 if (instr->hydrogen()->right()->IsConstant() &&
6966 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6967 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6968 uint8_t select = static_cast<uint8_t>(value & 0xFF);
6969 XMMRegister left_reg = ToInt32x4Register(instr->left());
6970 __ pshufd(left_reg, left_reg, select);
6973 Comment(";;; deoptimize: non-constant selector for shuffle");
6974 DeoptimizeIf(no_condition, instr->environment());
6978 case kInt32x4ShiftLeft:
6979 case kInt32x4ShiftRight:
6980 case kInt32x4ShiftRightArithmetic: {
6981 ASSERT(instr->left()->Equals(instr->result()));
6982 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
6983 if (instr->hydrogen()->right()->IsConstant() &&
6984 HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
6985 int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
6986 uint8_t shift = static_cast<uint8_t>(value & 0xFF);
6987 XMMRegister left_reg = ToInt32x4Register(instr->left());
6988 switch (instr->op()) {
6989 case kInt32x4ShiftLeft:
6990 __ pslld(left_reg, shift);
6992 case kInt32x4ShiftRight:
6993 __ psrld(left_reg, shift);
6995 case kInt32x4ShiftRightArithmetic:
6996 __ psrad(left_reg, shift);
7003 XMMRegister left_reg = ToInt32x4Register(instr->left());
7004 Register shift = ToRegister(instr->right());
7005 XMMRegister xmm_scratch = double_scratch0();
7006 __ movd(xmm_scratch, shift);
7007 switch (instr->op()) {
7008 case kInt32x4ShiftLeft:
7009 __ pslld(left_reg, xmm_scratch);
7011 case kInt32x4ShiftRight:
7012 __ psrld(left_reg, xmm_scratch);
7014 case kInt32x4ShiftRightArithmetic:
7015 __ psrad(left_reg, xmm_scratch);
7023 case kFloat32x4LessThan:
7024 case kFloat32x4LessThanOrEqual:
7025 case kFloat32x4Equal:
7026 case kFloat32x4NotEqual:
7027 case kFloat32x4GreaterThanOrEqual:
7028 case kFloat32x4GreaterThan: {
7029 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
7030 ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
7031 XMMRegister left_reg = ToFloat32x4Register(instr->left());
7032 XMMRegister right_reg = ToFloat32x4Register(instr->right());
7033 XMMRegister result_reg = ToInt32x4Register(instr->result());
7034 switch (instr->op()) {
7035 case kFloat32x4LessThan:
7036 if (result_reg.is(left_reg)) {
7037 __ cmpltps(result_reg, right_reg);
7038 } else if (result_reg.is(right_reg)) {
7039 __ cmpnltps(result_reg, left_reg);
7041 __ movaps(result_reg, left_reg);
7042 __ cmpltps(result_reg, right_reg);
7045 case kFloat32x4LessThanOrEqual:
7046 if (result_reg.is(left_reg)) {
7047 __ cmpleps(result_reg, right_reg);
7048 } else if (result_reg.is(right_reg)) {
7049 __ cmpnleps(result_reg, left_reg);
7051 __ movaps(result_reg, left_reg);
7052 __ cmpleps(result_reg, right_reg);
7055 case kFloat32x4Equal:
7056 if (result_reg.is(left_reg)) {
7057 __ cmpeqps(result_reg, right_reg);
7058 } else if (result_reg.is(right_reg)) {
7059 __ cmpeqps(result_reg, left_reg);
7061 __ movaps(result_reg, left_reg);
7062 __ cmpeqps(result_reg, right_reg);
7065 case kFloat32x4NotEqual:
7066 if (result_reg.is(left_reg)) {
7067 __ cmpneqps(result_reg, right_reg);
7068 } else if (result_reg.is(right_reg)) {
7069 __ cmpneqps(result_reg, left_reg);
7071 __ movaps(result_reg, left_reg);
7072 __ cmpneqps(result_reg, right_reg);
7075 case kFloat32x4GreaterThanOrEqual:
7076 if (result_reg.is(left_reg)) {
7077 __ cmpnltps(result_reg, right_reg);
7078 } else if (result_reg.is(right_reg)) {
7079 __ cmpltps(result_reg, left_reg);
7081 __ movaps(result_reg, left_reg);
7082 __ cmpnltps(result_reg, right_reg);
7085 case kFloat32x4GreaterThan:
7086 if (result_reg.is(left_reg)) {
7087 __ cmpnleps(result_reg, right_reg);
7088 } else if (result_reg.is(right_reg)) {
7089 __ cmpleps(result_reg, left_reg);
7091 __ movaps(result_reg, left_reg);
7092 __ cmpnleps(result_reg, right_reg);
7107 case kInt32x4GreaterThan:
7109 case kInt32x4LessThan: {
7110 ASSERT(instr->left()->Equals(instr->result()));
7111 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7112 ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
7113 XMMRegister left_reg = ToInt32x4Register(instr->left());
7114 XMMRegister right_reg = ToInt32x4Register(instr->right());
7115 switch (instr->op()) {
7117 __ andps(left_reg, right_reg);
7120 __ orps(left_reg, right_reg);
7123 __ xorps(left_reg, right_reg);
7126 __ paddd(left_reg, right_reg);
7129 __ psubd(left_reg, right_reg);
7132 if (CpuFeatures::IsSupported(SSE4_1)) {
7133 CpuFeatureScope scope(masm(), SSE4_1);
7134 __ pmulld(left_reg, right_reg);
7136 // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
7137 XMMRegister xmm_scratch = xmm0;
7138 __ movaps(xmm_scratch, left_reg);
7139 __ pmuludq(left_reg, right_reg);
7140 __ psrlq(xmm_scratch, 4);
7141 __ psrlq(right_reg, 4);
7142 __ pmuludq(xmm_scratch, right_reg);
7143 __ pshufd(left_reg, left_reg, 8);
7144 __ pshufd(xmm_scratch, xmm_scratch, 8);
7145 __ punpackldq(left_reg, xmm_scratch);
7148 case kInt32x4GreaterThan:
7149 __ pcmpgtd(left_reg, right_reg);
7152 __ pcmpeqd(left_reg, right_reg);
7154 case kInt32x4LessThan: {
7155 XMMRegister xmm_scratch = xmm0;
7156 __ movaps(xmm_scratch, right_reg);
7157 __ pcmpgtd(xmm_scratch, left_reg);
7158 __ movaps(left_reg, xmm_scratch);
7167 case kFloat32x4WithW:
7169 case kFloat32x4WithZ:
7171 case kFloat32x4WithY:
7173 case kFloat32x4WithX: {
7174 ASSERT(instr->left()->Equals(instr->result()));
7175 ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
7176 ASSERT(instr->hydrogen()->right()->representation().IsDouble());
7177 XMMRegister left_reg = ToFloat32x4Register(instr->left());
7178 XMMRegister right_reg = ToDoubleRegister(instr->right());
7179 XMMRegister xmm_scratch = xmm0;
7180 __ xorps(xmm_scratch, xmm_scratch);
7181 __ cvtsd2ss(xmm_scratch, right_reg);
7182 if (CpuFeatures::IsSupported(SSE4_1)) {
7184 CpuFeatureScope scope(masm(), SSE4_1);
7185 __ insertps(left_reg, xmm_scratch, imm8);
7187 __ sub(esp, Immediate(kFloat32x4Size));
7188 __ movups(Operand(esp, 0), left_reg);
7189 __ movss(Operand(esp, imm8 * kFloatSize), xmm_scratch);
7190 __ movups(left_reg, Operand(esp, 0));
7191 __ add(esp, Immediate(kFloat32x4Size));
7201 case kInt32x4WithX: {
7202 ASSERT(instr->left()->Equals(instr->result()));
7203 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7204 ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
7205 XMMRegister left_reg = ToInt32x4Register(instr->left());
7206 Register right_reg = ToRegister(instr->right());
7207 if (CpuFeatures::IsSupported(SSE4_1)) {
7208 CpuFeatureScope scope(masm(), SSE4_1);
7209 __ pinsrd(left_reg, right_reg, imm8);
7211 __ sub(esp, Immediate(kInt32x4Size));
7212 __ movdqu(Operand(esp, 0), left_reg);
7213 __ mov(Operand(esp, imm8 * kFloatSize), right_reg);
7214 __ movdqu(left_reg, Operand(esp, 0));
7215 __ add(esp, Immediate(kInt32x4Size));
7219 case kInt32x4WithFlagW:
7221 case kInt32x4WithFlagZ:
7223 case kInt32x4WithFlagY:
7225 case kInt32x4WithFlagX: {
7226 ASSERT(instr->left()->Equals(instr->result()));
7227 ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
7228 ASSERT(instr->hydrogen()->right()->representation().IsTagged());
7229 HType type = instr->hydrogen()->right()->type();
7230 XMMRegister left_reg = ToInt32x4Register(instr->left());
7231 Register right_reg = ToRegister(instr->right());
7232 Label load_false_value, done;
7233 if (type.IsBoolean()) {
7234 __ sub(esp, Immediate(kInt32x4Size));
7235 __ movups(Operand(esp, 0), left_reg);
7236 __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
7237 __ j(not_equal, &load_false_value, Label::kNear);
7239 Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
7240 DeoptimizeIf(no_condition, instr->environment());
7244 __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
7245 __ jmp(&done, Label::kNear);
7246 __ bind(&load_false_value);
7247 __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0x0));
7249 __ movups(left_reg, Operand(esp, 0));
7250 __ add(esp, Immediate(kInt32x4Size));
7260 void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
7261 CpuFeatureScope scope(masm(), SSE2);
7262 switch (instr->op()) {
7263 case kInt32x4Select: {
7264 ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
7265 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7266 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
7268 XMMRegister mask_reg = ToInt32x4Register(instr->first());
7269 XMMRegister left_reg = ToFloat32x4Register(instr->second());
7270 XMMRegister right_reg = ToFloat32x4Register(instr->third());
7271 XMMRegister result_reg = ToFloat32x4Register(instr->result());
7272 XMMRegister temp_reg = xmm0;
7275 __ movaps(temp_reg, mask_reg);
7278 // temp_reg = temp_reg & falseValue.
7279 __ andps(temp_reg, right_reg);
7281 if (!result_reg.is(mask_reg)) {
7282 if (result_reg.is(left_reg)) {
7283 // result_reg = result_reg & trueValue.
7284 __ andps(result_reg, mask_reg);
7285 // out = result_reg | temp_reg.
7286 __ orps(result_reg, temp_reg);
7288 __ movaps(result_reg, mask_reg);
7289 // result_reg = result_reg & trueValue.
7290 __ andps(result_reg, left_reg);
7291 // out = result_reg | temp_reg.
7292 __ orps(result_reg, temp_reg);
7295 // result_reg = result_reg & trueValue.
7296 __ andps(result_reg, left_reg);
7297 // out = result_reg | temp_reg.
7298 __ orps(result_reg, temp_reg);
7302 case kFloat32x4ShuffleMix: {
7303 ASSERT(instr->first()->Equals(instr->result()));
7304 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
7305 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7306 ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
7307 if (instr->hydrogen()->third()->IsConstant() &&
7308 HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
7309 int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
7310 uint8_t select = static_cast<uint8_t>(value & 0xFF);
7311 XMMRegister first_reg = ToFloat32x4Register(instr->first());
7312 XMMRegister second_reg = ToFloat32x4Register(instr->second());
7313 __ shufps(first_reg, second_reg, select);
7316 Comment(";;; deoptimize: non-constant selector for shuffle");
7317 DeoptimizeIf(no_condition, instr->environment());
7321 case kFloat32x4Clamp: {
7322 ASSERT(instr->first()->Equals(instr->result()));
7323 ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
7324 ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
7325 ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
7327 XMMRegister value_reg = ToFloat32x4Register(instr->first());
7328 XMMRegister lower_reg = ToFloat32x4Register(instr->second());
7329 XMMRegister upper_reg = ToFloat32x4Register(instr->third());
7330 __ minps(value_reg, upper_reg);
7331 __ maxps(value_reg, lower_reg);
7341 void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
7342 CpuFeatureScope scope(masm(), SSE2);
7343 switch (instr->op()) {
7344 case kFloat32x4Constructor: {
7345 ASSERT(instr->hydrogen()->x()->representation().IsDouble());
7346 ASSERT(instr->hydrogen()->y()->representation().IsDouble());
7347 ASSERT(instr->hydrogen()->z()->representation().IsDouble());
7348 ASSERT(instr->hydrogen()->w()->representation().IsDouble());
7349 XMMRegister x_reg = ToDoubleRegister(instr->x());
7350 XMMRegister y_reg = ToDoubleRegister(instr->y());
7351 XMMRegister z_reg = ToDoubleRegister(instr->z());
7352 XMMRegister w_reg = ToDoubleRegister(instr->w());
7353 XMMRegister result_reg = ToFloat32x4Register(instr->result());
7354 __ sub(esp, Immediate(kFloat32x4Size));
7355 __ xorps(xmm0, xmm0);
7356 __ cvtsd2ss(xmm0, x_reg);
7357 __ movss(Operand(esp, 0 * kFloatSize), xmm0);
7358 __ xorps(xmm0, xmm0);
7359 __ cvtsd2ss(xmm0, y_reg);
7360 __ movss(Operand(esp, 1 * kFloatSize), xmm0);
7361 __ xorps(xmm0, xmm0);
7362 __ cvtsd2ss(xmm0, z_reg);
7363 __ movss(Operand(esp, 2 * kFloatSize), xmm0);
7364 __ xorps(xmm0, xmm0);
7365 __ cvtsd2ss(xmm0, w_reg);
7366 __ movss(Operand(esp, 3 * kFloatSize), xmm0);
7367 __ movups(result_reg, Operand(esp, 0 * kFloatSize));
7368 __ add(esp, Immediate(kFloat32x4Size));
7371 case kInt32x4Constructor: {
7372 ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
7373 ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
7374 ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
7375 ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
7376 Register x_reg = ToRegister(instr->x());
7377 Register y_reg = ToRegister(instr->y());
7378 Register z_reg = ToRegister(instr->z());
7379 Register w_reg = ToRegister(instr->w());
7380 XMMRegister result_reg = ToInt32x4Register(instr->result());
7381 __ sub(esp, Immediate(kInt32x4Size));
7382 __ mov(Operand(esp, 0 * kInt32Size), x_reg);
7383 __ mov(Operand(esp, 1 * kInt32Size), y_reg);
7384 __ mov(Operand(esp, 2 * kInt32Size), z_reg);
7385 __ mov(Operand(esp, 3 * kInt32Size), w_reg);
7386 __ movups(result_reg, Operand(esp, 0 * kInt32Size));
7387 __ add(esp, Immediate(kInt32x4Size));
7390 case kInt32x4Bool: {
7391 ASSERT(instr->hydrogen()->x()->representation().IsTagged());
7392 ASSERT(instr->hydrogen()->y()->representation().IsTagged());
7393 ASSERT(instr->hydrogen()->z()->representation().IsTagged());
7394 ASSERT(instr->hydrogen()->w()->representation().IsTagged());
7395 HType x_type = instr->hydrogen()->x()->type();
7396 HType y_type = instr->hydrogen()->y()->type();
7397 HType z_type = instr->hydrogen()->z()->type();
7398 HType w_type = instr->hydrogen()->w()->type();
7399 if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
7400 !z_type.IsBoolean() || !w_type.IsBoolean()) {
7401 Comment(";;; deoptimize: other types for int32x4.bool.");
7402 DeoptimizeIf(no_condition, instr->environment());
7405 XMMRegister result_reg = ToInt32x4Register(instr->result());
7406 Register x_reg = ToRegister(instr->x());
7407 Register y_reg = ToRegister(instr->y());
7408 Register z_reg = ToRegister(instr->z());
7409 Register w_reg = ToRegister(instr->w());
7410 Label load_false_x, done_x, load_false_y, done_y,
7411 load_false_z, done_z, load_false_w, done_w;
7412 __ sub(esp, Immediate(kInt32x4Size));
7414 __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
7415 __ j(not_equal, &load_false_x, Label::kNear);
7416 __ mov(Operand(esp, 0 * kInt32Size), Immediate(-1));
7417 __ jmp(&done_x, Label::kNear);
7418 __ bind(&load_false_x);
7419 __ mov(Operand(esp, 0 * kInt32Size), Immediate(0x0));
7422 __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
7423 __ j(not_equal, &load_false_y, Label::kNear);
7424 __ mov(Operand(esp, 1 * kInt32Size), Immediate(-1));
7425 __ jmp(&done_y, Label::kNear);
7426 __ bind(&load_false_y);
7427 __ mov(Operand(esp, 1 * kInt32Size), Immediate(0x0));
7430 __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
7431 __ j(not_equal, &load_false_z, Label::kNear);
7432 __ mov(Operand(esp, 2 * kInt32Size), Immediate(-1));
7433 __ jmp(&done_z, Label::kNear);
7434 __ bind(&load_false_z);
7435 __ mov(Operand(esp, 2 * kInt32Size), Immediate(0x0));
7438 __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
7439 __ j(not_equal, &load_false_w, Label::kNear);
7440 __ mov(Operand(esp, 3 * kInt32Size), Immediate(-1));
7441 __ jmp(&done_w, Label::kNear);
7442 __ bind(&load_false_w);
7443 __ mov(Operand(esp, 3 * kInt32Size), Immediate(0x0));
7446 __ movups(result_reg, Operand(esp, 0));
7447 __ add(esp, Immediate(kInt32x4Size));
7459 } } // namespace v8::internal
7461 #endif // V8_TARGET_ARCH_IA32