1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/ia32/assembler-ia32.h"
12 #include "src/ia32/macro-assembler-ia32.h"
13 #include "src/scopes.h"
22 // Adds IA-32 specific methods for decoding operands.
23 class IA32OperandConverter : public InstructionOperandConverter {
25 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
30 Immediate InputImmediate(int index) {
31 return ToImmediate(instr_->InputAt(index));
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
38 Operand ToOperand(InstructionOperand* op, int extra = 0) {
39 if (op->IsRegister()) {
41 return Operand(ToRegister(op));
42 } else if (op->IsDoubleRegister()) {
44 return Operand(ToDoubleRegister(op));
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
52 Operand HighOperand(InstructionOperand* op) {
53 DCHECK(op->IsDoubleStackSlot());
54 return ToOperand(op, kPointerSize);
57 Immediate ToImmediate(InstructionOperand* operand) {
58 Constant constant = ToConstant(operand);
59 switch (constant.type()) {
60 case Constant::kInt32:
61 return Immediate(constant.ToInt32());
62 case Constant::kFloat64:
64 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
65 case Constant::kExternalReference:
66 return Immediate(constant.ToExternalReference());
67 case Constant::kHeapObject:
68 return Immediate(constant.ToHeapObject());
69 case Constant::kInt64:
76 Operand MemoryOperand(int* first_input) {
77 const int offset = *first_input;
78 switch (AddressingModeField::decode(instr_->opcode())) {
81 return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
83 0); // TODO(dcarney): K != 0
86 return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
87 InputImmediate(offset + 1));
90 return Operand(InputImmediate(offset + 0));
93 return Operand(no_reg);
97 Operand MemoryOperand() {
99 return MemoryOperand(&first_input);
104 static bool HasImmediateInput(Instruction* instr, int index) {
105 return instr->InputAt(index)->IsImmediate();
109 // Assembles an instruction after register allocation, producing machine code.
110 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
111 IA32OperandConverter i(this, instr);
113 switch (ArchOpcodeField::decode(instr->opcode())) {
115 __ jmp(code()->GetLabel(i.InputBlock(0)));
118 // don't emit code for nops.
123 case kArchDeoptimize: {
124 int deoptimization_id = MiscField::decode(instr->opcode());
125 BuildTranslation(instr, deoptimization_id);
127 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
128 isolate(), deoptimization_id, Deoptimizer::LAZY);
129 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
133 if (HasImmediateInput(instr, 1)) {
134 __ add(i.InputOperand(0), i.InputImmediate(1));
136 __ add(i.InputRegister(0), i.InputOperand(1));
140 if (HasImmediateInput(instr, 1)) {
141 __ and_(i.InputOperand(0), i.InputImmediate(1));
143 __ and_(i.InputRegister(0), i.InputOperand(1));
147 if (HasImmediateInput(instr, 1)) {
148 __ cmp(i.InputOperand(0), i.InputImmediate(1));
150 __ cmp(i.InputRegister(0), i.InputOperand(1));
154 if (HasImmediateInput(instr, 1)) {
155 __ test(i.InputOperand(0), i.InputImmediate(1));
157 __ test(i.InputRegister(0), i.InputOperand(1));
161 if (HasImmediateInput(instr, 1)) {
162 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
164 __ imul(i.OutputRegister(), i.InputOperand(1));
169 __ idiv(i.InputOperand(1));
173 __ div(i.InputOperand(1));
176 __ not_(i.OutputOperand());
179 __ neg(i.OutputOperand());
182 if (HasImmediateInput(instr, 1)) {
183 __ or_(i.InputOperand(0), i.InputImmediate(1));
185 __ or_(i.InputRegister(0), i.InputOperand(1));
189 if (HasImmediateInput(instr, 1)) {
190 __ xor_(i.InputOperand(0), i.InputImmediate(1));
192 __ xor_(i.InputRegister(0), i.InputOperand(1));
196 if (HasImmediateInput(instr, 1)) {
197 __ sub(i.InputOperand(0), i.InputImmediate(1));
199 __ sub(i.InputRegister(0), i.InputOperand(1));
203 if (HasImmediateInput(instr, 1)) {
204 __ shl(i.OutputRegister(), i.InputInt5(1));
206 __ shl_cl(i.OutputRegister());
210 if (HasImmediateInput(instr, 1)) {
211 __ shr(i.OutputRegister(), i.InputInt5(1));
213 __ shr_cl(i.OutputRegister());
217 if (HasImmediateInput(instr, 1)) {
218 __ sar(i.OutputRegister(), i.InputInt5(1));
220 __ sar_cl(i.OutputRegister());
224 if (HasImmediateInput(instr, 0)) {
225 __ push(i.InputImmediate(0));
227 __ push(i.InputOperand(0));
230 case kIA32CallCodeObject: {
231 if (HasImmediateInput(instr, 0)) {
232 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
233 __ call(code, RelocInfo::CODE_TARGET);
235 Register reg = i.InputRegister(0);
236 int entry = Code::kHeaderSize - kHeapObjectTag;
237 __ call(Operand(reg, entry));
239 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
240 Safepoint::kNoLazyDeopt);
242 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
244 RecordLazyDeoptimizationEntry(instr);
246 AddNopForSmiCodeInlining();
249 case kIA32CallAddress:
250 if (HasImmediateInput(instr, 0)) {
251 // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
252 __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
253 RelocInfo::RUNTIME_ENTRY);
255 __ call(i.InputRegister(0));
259 int words = MiscField::decode(instr->opcode());
260 __ add(esp, Immediate(kPointerSize * words));
263 case kIA32CallJSFunction: {
264 Register func = i.InputRegister(0);
266 // TODO(jarin) The load of the context should be separated from the call.
267 __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
268 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
270 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
271 Safepoint::kNoLazyDeopt);
272 RecordLazyDeoptimizationEntry(instr);
276 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
279 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
282 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
285 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
288 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
290 case kSSEFloat64Mod: {
291 // TODO(dcarney): alignment is wrong.
292 __ sub(esp, Immediate(kDoubleSize));
293 // Move values to st(0) and st(1).
294 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
295 __ fld_d(Operand(esp, 0));
296 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
297 __ fld_d(Operand(esp, 0));
298 // Loop while fprem isn't done.
301 // This instructions traps on all kinds inputs, but we are assuming the
302 // floating point control word is set to ignore them all.
304 // The following 2 instruction implicitly use eax.
307 __ j(parity_even, &mod_loop);
308 // Move output to stack and clean up.
310 __ fstp_d(Operand(esp, 0));
311 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
312 __ add(esp, Immediate(kDoubleSize));
315 case kSSEFloat64ToInt32:
316 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
318 case kSSEFloat64ToUint32: {
319 XMMRegister scratch = xmm0;
320 __ Move(scratch, -2147483648.0);
321 // TODO(turbofan): IA32 SSE subsd() should take an operand.
322 __ addsd(scratch, i.InputDoubleRegister(0));
323 __ cvttsd2si(i.OutputRegister(), scratch);
324 __ add(i.OutputRegister(), Immediate(0x80000000));
327 case kSSEInt32ToFloat64:
328 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
330 case kSSEUint32ToFloat64:
331 // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
332 __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
335 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
339 Operand operand = i.MemoryOperand(&index);
340 __ movsd(operand, i.InputDoubleRegister(index));
344 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
346 case kIA32StoreWord8: {
348 Operand operand = i.MemoryOperand(&index);
349 __ mov_b(operand, i.InputRegister(index));
352 case kIA32StoreWord8I: {
354 Operand operand = i.MemoryOperand(&index);
355 __ mov_b(operand, i.InputInt8(index));
358 case kIA32LoadWord16:
359 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
361 case kIA32StoreWord16: {
363 Operand operand = i.MemoryOperand(&index);
364 __ mov_w(operand, i.InputRegister(index));
367 case kIA32StoreWord16I: {
369 Operand operand = i.MemoryOperand(&index);
370 __ mov_w(operand, i.InputInt16(index));
373 case kIA32LoadWord32:
374 __ mov(i.OutputRegister(), i.MemoryOperand());
376 case kIA32StoreWord32: {
378 Operand operand = i.MemoryOperand(&index);
379 __ mov(operand, i.InputRegister(index));
382 case kIA32StoreWord32I: {
384 Operand operand = i.MemoryOperand(&index);
385 __ mov(operand, i.InputImmediate(index));
388 case kIA32StoreWriteBarrier: {
389 Register object = i.InputRegister(0);
390 Register index = i.InputRegister(1);
391 Register value = i.InputRegister(2);
392 __ mov(Operand(object, index, times_1, 0), value);
393 __ lea(index, Operand(object, index, times_1, 0));
394 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
397 __ RecordWrite(object, index, value, mode);
404 // Assembles branches after an instruction.
405 void CodeGenerator::AssembleArchBranch(Instruction* instr,
406 FlagsCondition condition) {
407 IA32OperandConverter i(this, instr);
410 // Emit a branch. The true and false targets are always the last two inputs
411 // to the instruction.
412 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
413 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
414 bool fallthru = IsNextInAssemblyOrder(fblock);
415 Label* tlabel = code()->GetLabel(tblock);
416 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
417 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
419 case kUnorderedEqual:
420 __ j(parity_even, flabel, flabel_distance);
425 case kUnorderedNotEqual:
426 __ j(parity_even, tlabel);
429 __ j(not_equal, tlabel);
431 case kSignedLessThan:
434 case kSignedGreaterThanOrEqual:
435 __ j(greater_equal, tlabel);
437 case kSignedLessThanOrEqual:
438 __ j(less_equal, tlabel);
440 case kSignedGreaterThan:
441 __ j(greater, tlabel);
443 case kUnorderedLessThan:
444 __ j(parity_even, flabel, flabel_distance);
446 case kUnsignedLessThan:
449 case kUnorderedGreaterThanOrEqual:
450 __ j(parity_even, tlabel);
452 case kUnsignedGreaterThanOrEqual:
453 __ j(above_equal, tlabel);
455 case kUnorderedLessThanOrEqual:
456 __ j(parity_even, flabel, flabel_distance);
458 case kUnsignedLessThanOrEqual:
459 __ j(below_equal, tlabel);
461 case kUnorderedGreaterThan:
462 __ j(parity_even, tlabel);
464 case kUnsignedGreaterThan:
468 __ j(overflow, tlabel);
471 __ j(no_overflow, tlabel);
474 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
479 // Assembles boolean materializations after an instruction.
480 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
481 FlagsCondition condition) {
482 IA32OperandConverter i(this, instr);
485 // Materialize a full 32-bit 1 or 0 value. The result register is always the
486 // last output of the instruction.
488 DCHECK_NE(0, instr->OutputCount());
489 Register reg = i.OutputRegister(instr->OutputCount() - 1);
490 Condition cc = no_condition;
492 case kUnorderedEqual:
493 __ j(parity_odd, &check, Label::kNear);
494 __ mov(reg, Immediate(0));
495 __ jmp(&done, Label::kNear);
500 case kUnorderedNotEqual:
501 __ j(parity_odd, &check, Label::kNear);
502 __ mov(reg, Immediate(1));
503 __ jmp(&done, Label::kNear);
508 case kSignedLessThan:
511 case kSignedGreaterThanOrEqual:
514 case kSignedLessThanOrEqual:
517 case kSignedGreaterThan:
520 case kUnorderedLessThan:
521 __ j(parity_odd, &check, Label::kNear);
522 __ mov(reg, Immediate(0));
523 __ jmp(&done, Label::kNear);
525 case kUnsignedLessThan:
528 case kUnorderedGreaterThanOrEqual:
529 __ j(parity_odd, &check, Label::kNear);
530 __ mov(reg, Immediate(1));
531 __ jmp(&done, Label::kNear);
533 case kUnsignedGreaterThanOrEqual:
536 case kUnorderedLessThanOrEqual:
537 __ j(parity_odd, &check, Label::kNear);
538 __ mov(reg, Immediate(0));
539 __ jmp(&done, Label::kNear);
541 case kUnsignedLessThanOrEqual:
544 case kUnorderedGreaterThan:
545 __ j(parity_odd, &check, Label::kNear);
546 __ mov(reg, Immediate(1));
547 __ jmp(&done, Label::kNear);
549 case kUnsignedGreaterThan:
560 if (reg.is_byte_register()) {
561 // setcc for byte registers (al, bl, cl, dl).
563 __ movzx_b(reg, reg);
565 // Emit a branch to set a register to either 1 or 0.
567 __ j(cc, &set, Label::kNear);
568 __ mov(reg, Immediate(0));
569 __ jmp(&done, Label::kNear);
571 __ mov(reg, Immediate(1));
577 // The calling convention for JSFunctions on IA32 passes arguments on the
578 // stack and the JSFunction and context in EDI and ESI, respectively, thus
579 // the steps of the call look as follows:
581 // --{ before the call instruction }--------------------------------------------
585 // --{ push arguments and setup ESI, EDI }--------------------------------------
586 // | args + receiver | caller frame |
588 // [edi = JSFunction, esi = context]
590 // --{ call [edi + kCodeEntryOffset] }------------------------------------------
591 // | RET | args + receiver | caller frame |
594 // =={ prologue of called function }============================================
595 // --{ push ebp }---------------------------------------------------------------
596 // | FP | RET | args + receiver | caller frame |
599 // --{ mov ebp, esp }-----------------------------------------------------------
600 // | FP | RET | args + receiver | caller frame |
603 // --{ push esi }---------------------------------------------------------------
604 // | CTX | FP | RET | args + receiver | caller frame |
607 // --{ push edi }---------------------------------------------------------------
608 // | FNC | CTX | FP | RET | args + receiver | caller frame |
611 // --{ subi esp, #N }-----------------------------------------------------------
612 // | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
615 // =={ body of called function }================================================
617 // =={ epilogue of called function }============================================
618 // --{ mov esp, ebp }-----------------------------------------------------------
619 // | FP | RET | args + receiver | caller frame |
622 // --{ pop ebp }-----------------------------------------------------------
623 // | | RET | args + receiver | caller frame |
626 // --{ ret #A+1 }-----------------------------------------------------------
627 // | | caller frame |
631 // Runtime function calls are accomplished by doing a stub call to the
632 // CEntryStub (a real code object). On IA32 passes arguments on the
633 // stack, the number of arguments in EAX, the address of the runtime function
634 // in EBX, and the context in ESI.
636 // --{ before the call instruction }--------------------------------------------
640 // --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
641 // | args + receiver | caller frame |
643 // [eax = #args, ebx = runtime function, esi = context]
645 // --{ call #CEntryStub }-------------------------------------------------------
646 // | RET | args + receiver | caller frame |
649 // =={ body of runtime function }===============================================
651 // --{ runtime returns }--------------------------------------------------------
655 // Other custom linkages (e.g. for calling directly into and out of C++) may
656 // need to save callee-saved registers on the stack, which is done in the
657 // function prologue of generated code.
659 // --{ before the call instruction }--------------------------------------------
663 // --{ set up arguments in registers on stack }---------------------------------
664 // | args | caller frame |
666 // [r0 = arg0, r1 = arg1, ...]
668 // --{ call code }--------------------------------------------------------------
669 // | RET | args | caller frame |
672 // =={ prologue of called function }============================================
673 // --{ push ebp }---------------------------------------------------------------
674 // | FP | RET | args | caller frame |
677 // --{ mov ebp, esp }-----------------------------------------------------------
678 // | FP | RET | args | caller frame |
681 // --{ save registers }---------------------------------------------------------
682 // | regs | FP | RET | args | caller frame |
685 // --{ subi esp, #N }-----------------------------------------------------------
686 // | callee frame | regs | FP | RET | args | caller frame |
689 // =={ body of called function }================================================
691 // =={ epilogue of called function }============================================
692 // --{ restore registers }------------------------------------------------------
693 // | regs | FP | RET | args | caller frame |
696 // --{ mov esp, ebp }-----------------------------------------------------------
697 // | FP | RET | args | caller frame |
700 // --{ pop ebp }----------------------------------------------------------------
701 // | RET | args | caller frame |
705 void CodeGenerator::AssemblePrologue() {
706 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
707 Frame* frame = code_->frame();
708 int stack_slots = frame->GetSpillSlotCount();
709 if (descriptor->kind() == CallDescriptor::kCallAddress) {
710 // Assemble a prologue similar the to cdecl calling convention.
713 const RegList saves = descriptor->CalleeSavedRegisters();
714 if (saves != 0) { // Save callee-saved registers.
715 int register_save_area_size = 0;
716 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
717 if (!((1 << i) & saves)) continue;
718 __ push(Register::from_code(i));
719 register_save_area_size += kPointerSize;
721 frame->SetRegisterSaveAreaSize(register_save_area_size);
723 } else if (descriptor->IsJSFunctionCall()) {
724 CompilationInfo* info = linkage()->info();
725 __ Prologue(info->IsCodePreAgingActive());
726 frame->SetRegisterSaveAreaSize(
727 StandardFrameConstants::kFixedFrameSizeFromFp);
729 // Sloppy mode functions and builtins need to replace the receiver with the
730 // global proxy when called as functions (without an explicit receiver
732 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
733 if (info->strict_mode() == SLOPPY && !info->is_native()) {
735 // +2 for return address and saved frame pointer.
736 int receiver_slot = info->scope()->num_parameters() + 2;
737 __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
738 __ cmp(ecx, isolate()->factory()->undefined_value());
739 __ j(not_equal, &ok, Label::kNear);
740 __ mov(ecx, GlobalObjectOperand());
741 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
742 __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
748 frame->SetRegisterSaveAreaSize(
749 StandardFrameConstants::kFixedFrameSizeFromFp);
751 if (stack_slots > 0) {
752 __ sub(esp, Immediate(stack_slots * kPointerSize));
757 void CodeGenerator::AssembleReturn() {
758 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
759 if (descriptor->kind() == CallDescriptor::kCallAddress) {
760 const RegList saves = descriptor->CalleeSavedRegisters();
761 if (frame()->GetRegisterSaveAreaSize() > 0) {
762 // Remove this frame's spill slots first.
763 int stack_slots = frame()->GetSpillSlotCount();
764 if (stack_slots > 0) {
765 __ add(esp, Immediate(stack_slots * kPointerSize));
767 // Restore registers.
769 for (int i = 0; i < Register::kNumRegisters; i++) {
770 if (!((1 << i) & saves)) continue;
771 __ pop(Register::from_code(i));
774 __ pop(ebp); // Pop caller's frame pointer.
777 // No saved registers.
778 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
779 __ pop(ebp); // Pop caller's frame pointer.
783 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
784 __ pop(ebp); // Pop caller's frame pointer.
786 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
787 __ ret(pop_count * kPointerSize);
792 void CodeGenerator::AssembleMove(InstructionOperand* source,
793 InstructionOperand* destination) {
794 IA32OperandConverter g(this, NULL);
795 // Dispatch on the source and destination operand kinds. Not all
796 // combinations are possible.
797 if (source->IsRegister()) {
798 DCHECK(destination->IsRegister() || destination->IsStackSlot());
799 Register src = g.ToRegister(source);
800 Operand dst = g.ToOperand(destination);
802 } else if (source->IsStackSlot()) {
803 DCHECK(destination->IsRegister() || destination->IsStackSlot());
804 Operand src = g.ToOperand(source);
805 if (destination->IsRegister()) {
806 Register dst = g.ToRegister(destination);
809 Operand dst = g.ToOperand(destination);
813 } else if (source->IsConstant()) {
814 Constant src_constant = g.ToConstant(source);
815 if (src_constant.type() == Constant::kHeapObject) {
816 Handle<HeapObject> src = src_constant.ToHeapObject();
817 if (destination->IsRegister()) {
818 Register dst = g.ToRegister(destination);
819 __ LoadHeapObject(dst, src);
821 DCHECK(destination->IsStackSlot());
822 Operand dst = g.ToOperand(destination);
823 AllowDeferredHandleDereference embedding_raw_address;
824 if (isolate()->heap()->InNewSpace(*src)) {
825 __ PushHeapObject(src);
831 } else if (destination->IsRegister()) {
832 Register dst = g.ToRegister(destination);
833 __ mov(dst, g.ToImmediate(source));
834 } else if (destination->IsStackSlot()) {
835 Operand dst = g.ToOperand(destination);
836 __ mov(dst, g.ToImmediate(source));
838 double v = g.ToDouble(source);
839 uint64_t int_val = BitCast<uint64_t, double>(v);
840 int32_t lower = static_cast<int32_t>(int_val);
841 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
842 if (destination->IsDoubleRegister()) {
843 XMMRegister dst = g.ToDoubleRegister(destination);
846 DCHECK(destination->IsDoubleStackSlot());
847 Operand dst0 = g.ToOperand(destination);
848 Operand dst1 = g.HighOperand(destination);
849 __ mov(dst0, Immediate(lower));
850 __ mov(dst1, Immediate(upper));
853 } else if (source->IsDoubleRegister()) {
854 XMMRegister src = g.ToDoubleRegister(source);
855 if (destination->IsDoubleRegister()) {
856 XMMRegister dst = g.ToDoubleRegister(destination);
859 DCHECK(destination->IsDoubleStackSlot());
860 Operand dst = g.ToOperand(destination);
863 } else if (source->IsDoubleStackSlot()) {
864 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
865 Operand src = g.ToOperand(source);
866 if (destination->IsDoubleRegister()) {
867 XMMRegister dst = g.ToDoubleRegister(destination);
870 // We rely on having xmm0 available as a fixed scratch register.
871 Operand dst = g.ToOperand(destination);
881 void CodeGenerator::AssembleSwap(InstructionOperand* source,
882 InstructionOperand* destination) {
883 IA32OperandConverter g(this, NULL);
884 // Dispatch on the source and destination operand kinds. Not all
885 // combinations are possible.
886 if (source->IsRegister() && destination->IsRegister()) {
887 // Register-register.
888 Register src = g.ToRegister(source);
889 Register dst = g.ToRegister(destination);
891 } else if (source->IsRegister() && destination->IsStackSlot()) {
893 __ xchg(g.ToRegister(source), g.ToOperand(destination));
894 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
896 Operand src = g.ToOperand(source);
897 Operand dst = g.ToOperand(destination);
902 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
903 // XMM register-register swap. We rely on having xmm0
904 // available as a fixed scratch register.
905 XMMRegister src = g.ToDoubleRegister(source);
906 XMMRegister dst = g.ToDoubleRegister(destination);
907 __ movaps(xmm0, src);
909 __ movaps(dst, xmm0);
910 } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
911 // XMM register-memory swap. We rely on having xmm0
912 // available as a fixed scratch register.
913 XMMRegister reg = g.ToDoubleRegister(source);
914 Operand other = g.ToOperand(destination);
915 __ movsd(xmm0, other);
916 __ movsd(other, reg);
917 __ movaps(reg, xmm0);
918 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
919 // Double-width memory-to-memory.
920 Operand src0 = g.ToOperand(source);
921 Operand src1 = g.HighOperand(source);
922 Operand dst0 = g.ToOperand(destination);
923 Operand dst1 = g.HighOperand(destination);
924 __ movsd(xmm0, dst0); // Save destination in xmm0.
925 __ push(src0); // Then use stack to copy source to destination.
929 __ movsd(src0, xmm0);
931 // No other combinations are possible.
937 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
943 // Checks whether the code between start_pc and end_pc is a no-op.
944 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
946 if (start_pc + 1 != end_pc) {
949 return *(code->instruction_start() + start_pc) ==
950 v8::internal::Assembler::kNopByte;
956 } // namespace v8::internal::compiler