1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 Constant constant = ToConstant(operand);
38 if (constant.type() == Constant::kInt32) {
39 return Immediate(constant.ToInt32());
45 Operand ToOperand(InstructionOperand* op, int extra = 0) {
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
52 static int NextOffset(int* offset) {
58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
59 STATIC_ASSERT(0 == static_cast<int>(times_1));
60 STATIC_ASSERT(1 == static_cast<int>(times_2));
61 STATIC_ASSERT(2 == static_cast<int>(times_4));
62 STATIC_ASSERT(3 == static_cast<int>(times_8));
63 int scale = static_cast<int>(mode - one);
64 DCHECK(scale >= 0 && scale < 4);
65 return static_cast<ScaleFactor>(scale);
68 Operand MemoryOperand(int* offset) {
69 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
72 Register base = InputRegister(NextOffset(offset));
74 return Operand(base, disp);
77 Register base = InputRegister(NextOffset(offset));
78 int32_t disp = InputInt32(NextOffset(offset));
79 return Operand(base, disp);
85 Register base = InputRegister(NextOffset(offset));
86 Register index = InputRegister(NextOffset(offset));
87 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
89 return Operand(base, index, scale, disp);
95 Register base = InputRegister(NextOffset(offset));
96 Register index = InputRegister(NextOffset(offset));
97 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
98 int32_t disp = InputInt32(NextOffset(offset));
99 return Operand(base, index, scale, disp);
105 Register index = InputRegister(NextOffset(offset));
106 ScaleFactor scale = ScaleFor(kMode_M1, mode);
108 return Operand(index, scale, disp);
114 Register index = InputRegister(NextOffset(offset));
115 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
116 int32_t disp = InputInt32(NextOffset(offset));
117 return Operand(index, scale, disp);
121 return Operand(no_reg, 0);
124 return Operand(no_reg, 0);
127 Operand MemoryOperand() {
129 return MemoryOperand(&first_input);
134 static bool HasImmediateInput(Instruction* instr, int index) {
135 return instr->InputAt(index)->IsImmediate();
139 #define ASSEMBLE_UNOP(asm_instr) \
141 if (instr->Output()->IsRegister()) { \
142 __ asm_instr(i.OutputRegister()); \
144 __ asm_instr(i.OutputOperand()); \
149 #define ASSEMBLE_BINOP(asm_instr) \
151 if (HasImmediateInput(instr, 1)) { \
152 if (instr->InputAt(0)->IsRegister()) { \
153 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
155 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
158 if (instr->InputAt(1)->IsRegister()) { \
159 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
161 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
167 #define ASSEMBLE_MULT(asm_instr) \
169 if (HasImmediateInput(instr, 1)) { \
170 if (instr->InputAt(0)->IsRegister()) { \
171 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
172 i.InputImmediate(1)); \
174 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
175 i.InputImmediate(1)); \
178 if (instr->InputAt(1)->IsRegister()) { \
179 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
181 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
187 #define ASSEMBLE_SHIFT(asm_instr, width) \
189 if (HasImmediateInput(instr, 1)) { \
190 if (instr->Output()->IsRegister()) { \
191 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
193 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
196 if (instr->Output()->IsRegister()) { \
197 __ asm_instr##_cl(i.OutputRegister()); \
199 __ asm_instr##_cl(i.OutputOperand()); \
205 // Assembles an instruction after register allocation, producing machine code.
206 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
207 X64OperandConverter i(this, instr);
209 switch (ArchOpcodeField::decode(instr->opcode())) {
210 case kArchCallCodeObject: {
211 EnsureSpaceForLazyDeopt();
212 if (HasImmediateInput(instr, 0)) {
213 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
214 __ Call(code, RelocInfo::CODE_TARGET);
216 Register reg = i.InputRegister(0);
217 int entry = Code::kHeaderSize - kHeapObjectTag;
218 __ Call(Operand(reg, entry));
220 AddSafepointAndDeopt(instr);
223 case kArchCallJSFunction: {
224 EnsureSpaceForLazyDeopt();
225 Register func = i.InputRegister(0);
226 if (FLAG_debug_code) {
227 // Check the function's context matches the context argument.
228 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
229 __ Assert(equal, kWrongFunctionContext);
231 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
232 AddSafepointAndDeopt(instr);
236 __ jmp(code_->GetLabel(i.InputBlock(0)));
239 // don't emit code for nops.
244 case kArchStackPointer:
245 __ movq(i.OutputRegister(), rsp);
247 case kArchTruncateDoubleToI:
248 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
251 ASSEMBLE_BINOP(addl);
254 ASSEMBLE_BINOP(addq);
257 ASSEMBLE_BINOP(subl);
260 ASSEMBLE_BINOP(subq);
263 ASSEMBLE_BINOP(andl);
266 ASSEMBLE_BINOP(andq);
269 ASSEMBLE_BINOP(cmpl);
272 ASSEMBLE_BINOP(cmpq);
275 ASSEMBLE_BINOP(testl);
278 ASSEMBLE_BINOP(testq);
281 ASSEMBLE_MULT(imull);
284 ASSEMBLE_MULT(imulq);
288 __ idivl(i.InputRegister(1));
292 __ idivq(i.InputRegister(1));
296 __ divl(i.InputRegister(1));
300 __ divq(i.InputRegister(1));
321 ASSEMBLE_BINOP(xorl);
324 ASSEMBLE_BINOP(xorq);
327 ASSEMBLE_SHIFT(shll, 5);
330 ASSEMBLE_SHIFT(shlq, 6);
333 ASSEMBLE_SHIFT(shrl, 5);
336 ASSEMBLE_SHIFT(shrq, 6);
339 ASSEMBLE_SHIFT(sarl, 5);
342 ASSEMBLE_SHIFT(sarq, 6);
345 ASSEMBLE_SHIFT(rorl, 5);
348 ASSEMBLE_SHIFT(rorq, 6);
351 if (instr->InputAt(1)->IsDoubleRegister()) {
352 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
354 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
358 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
361 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
364 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
367 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
369 case kSSEFloat64Mod: {
370 __ subq(rsp, Immediate(kDoubleSize));
371 // Move values to st(0) and st(1).
372 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
373 __ fld_d(Operand(rsp, 0));
374 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
375 __ fld_d(Operand(rsp, 0));
376 // Loop while fprem isn't done.
379 // This instructions traps on all kinds inputs, but we are assuming the
380 // floating point control word is set to ignore them all.
382 // The following 2 instruction implicitly use rax.
384 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
387 __ shrl(rax, Immediate(8));
388 __ andl(rax, Immediate(0xFF));
392 __ j(parity_even, &mod_loop);
393 // Move output to stack and clean up.
395 __ fstp_d(Operand(rsp, 0));
396 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
397 __ addq(rsp, Immediate(kDoubleSize));
400 case kSSEFloat64Sqrt:
401 if (instr->InputAt(0)->IsDoubleRegister()) {
402 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
404 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
408 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
411 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
413 case kSSEFloat64ToInt32:
414 if (instr->InputAt(0)->IsDoubleRegister()) {
415 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
417 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
420 case kSSEFloat64ToUint32:
421 if (instr->InputAt(0)->IsDoubleRegister()) {
422 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
424 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
426 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
427 // TODO(turbofan): generated code should not look at the upper 32 bits
428 // of the result, but those bits could escape to the outside world.
430 case kSSEInt32ToFloat64:
431 if (instr->InputAt(0)->IsRegister()) {
432 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
434 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
437 case kSSEUint32ToFloat64:
438 if (instr->InputAt(0)->IsRegister()) {
439 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
441 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
445 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
448 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
452 Operand operand = i.MemoryOperand(&index);
453 if (HasImmediateInput(instr, index)) {
454 __ movb(operand, Immediate(i.InputInt8(index)));
456 __ movb(operand, i.InputRegister(index));
461 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
464 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
468 Operand operand = i.MemoryOperand(&index);
469 if (HasImmediateInput(instr, index)) {
470 __ movw(operand, Immediate(i.InputInt16(index)));
472 __ movw(operand, i.InputRegister(index));
477 if (instr->HasOutput()) {
478 if (instr->addressing_mode() == kMode_None) {
479 if (instr->InputAt(0)->IsRegister()) {
480 __ movl(i.OutputRegister(), i.InputRegister(0));
482 __ movl(i.OutputRegister(), i.InputOperand(0));
485 __ movl(i.OutputRegister(), i.MemoryOperand());
489 Operand operand = i.MemoryOperand(&index);
490 if (HasImmediateInput(instr, index)) {
491 __ movl(operand, i.InputImmediate(index));
493 __ movl(operand, i.InputRegister(index));
498 if (instr->InputAt(0)->IsRegister()) {
499 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
501 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
506 if (instr->HasOutput()) {
507 __ movq(i.OutputRegister(), i.MemoryOperand());
510 Operand operand = i.MemoryOperand(&index);
511 if (HasImmediateInput(instr, index)) {
512 __ movq(operand, i.InputImmediate(index));
514 __ movq(operand, i.InputRegister(index));
519 if (instr->HasOutput()) {
520 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
523 Operand operand = i.MemoryOperand(&index);
524 __ movss(operand, i.InputDoubleRegister(index));
528 if (instr->HasOutput()) {
529 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
532 Operand operand = i.MemoryOperand(&index);
533 __ movsd(operand, i.InputDoubleRegister(index));
537 __ leal(i.OutputRegister(), i.MemoryOperand());
540 __ leaq(i.OutputRegister(), i.MemoryOperand());
543 if (HasImmediateInput(instr, 0)) {
544 __ pushq(i.InputImmediate(0));
546 if (instr->InputAt(0)->IsRegister()) {
547 __ pushq(i.InputRegister(0));
549 __ pushq(i.InputOperand(0));
553 case kX64StoreWriteBarrier: {
554 Register object = i.InputRegister(0);
555 Register index = i.InputRegister(1);
556 Register value = i.InputRegister(2);
557 __ movsxlq(index, index);
558 __ movq(Operand(object, index, times_1, 0), value);
559 __ leaq(index, Operand(object, index, times_1, 0));
560 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
563 __ RecordWrite(object, index, value, mode);
570 // Assembles branches after this instruction.
571 void CodeGenerator::AssembleArchBranch(Instruction* instr,
572 FlagsCondition condition) {
573 X64OperandConverter i(this, instr);
576 // Emit a branch. The true and false targets are always the last two inputs
577 // to the instruction.
578 BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
579 BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
580 bool fallthru = IsNextInAssemblyOrder(fblock);
581 Label* tlabel = code()->GetLabel(tblock);
582 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
583 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
585 case kUnorderedEqual:
586 __ j(parity_even, flabel, flabel_distance);
591 case kUnorderedNotEqual:
592 __ j(parity_even, tlabel);
595 __ j(not_equal, tlabel);
597 case kSignedLessThan:
600 case kSignedGreaterThanOrEqual:
601 __ j(greater_equal, tlabel);
603 case kSignedLessThanOrEqual:
604 __ j(less_equal, tlabel);
606 case kSignedGreaterThan:
607 __ j(greater, tlabel);
609 case kUnorderedLessThan:
610 __ j(parity_even, flabel, flabel_distance);
612 case kUnsignedLessThan:
615 case kUnorderedGreaterThanOrEqual:
616 __ j(parity_even, tlabel);
618 case kUnsignedGreaterThanOrEqual:
619 __ j(above_equal, tlabel);
621 case kUnorderedLessThanOrEqual:
622 __ j(parity_even, flabel, flabel_distance);
624 case kUnsignedLessThanOrEqual:
625 __ j(below_equal, tlabel);
627 case kUnorderedGreaterThan:
628 __ j(parity_even, tlabel);
630 case kUnsignedGreaterThan:
634 __ j(overflow, tlabel);
637 __ j(no_overflow, tlabel);
640 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
645 // Assembles boolean materializations after this instruction.
646 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
647 FlagsCondition condition) {
648 X64OperandConverter i(this, instr);
651 // Materialize a full 64-bit 1 or 0 value. The result register is always the
652 // last output of the instruction.
654 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
655 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
656 Condition cc = no_condition;
658 case kUnorderedEqual:
659 __ j(parity_odd, &check, Label::kNear);
660 __ movl(reg, Immediate(0));
661 __ jmp(&done, Label::kNear);
666 case kUnorderedNotEqual:
667 __ j(parity_odd, &check, Label::kNear);
668 __ movl(reg, Immediate(1));
669 __ jmp(&done, Label::kNear);
674 case kSignedLessThan:
677 case kSignedGreaterThanOrEqual:
680 case kSignedLessThanOrEqual:
683 case kSignedGreaterThan:
686 case kUnorderedLessThan:
687 __ j(parity_odd, &check, Label::kNear);
688 __ movl(reg, Immediate(0));
689 __ jmp(&done, Label::kNear);
691 case kUnsignedLessThan:
694 case kUnorderedGreaterThanOrEqual:
695 __ j(parity_odd, &check, Label::kNear);
696 __ movl(reg, Immediate(1));
697 __ jmp(&done, Label::kNear);
699 case kUnsignedGreaterThanOrEqual:
702 case kUnorderedLessThanOrEqual:
703 __ j(parity_odd, &check, Label::kNear);
704 __ movl(reg, Immediate(0));
705 __ jmp(&done, Label::kNear);
707 case kUnsignedLessThanOrEqual:
710 case kUnorderedGreaterThan:
711 __ j(parity_odd, &check, Label::kNear);
712 __ movl(reg, Immediate(1));
713 __ jmp(&done, Label::kNear);
715 case kUnsignedGreaterThan:
727 __ movzxbl(reg, reg);
732 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
733 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
734 isolate(), deoptimization_id, Deoptimizer::LAZY);
735 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
739 void CodeGenerator::AssemblePrologue() {
740 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
741 int stack_slots = frame()->GetSpillSlotCount();
742 if (descriptor->kind() == CallDescriptor::kCallAddress) {
745 const RegList saves = descriptor->CalleeSavedRegisters();
746 if (saves != 0) { // Save callee-saved registers.
747 int register_save_area_size = 0;
748 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
749 if (!((1 << i) & saves)) continue;
750 __ pushq(Register::from_code(i));
751 register_save_area_size += kPointerSize;
753 frame()->SetRegisterSaveAreaSize(register_save_area_size);
755 } else if (descriptor->IsJSFunctionCall()) {
756 CompilationInfo* info = linkage()->info();
757 __ Prologue(info->IsCodePreAgingActive());
758 frame()->SetRegisterSaveAreaSize(
759 StandardFrameConstants::kFixedFrameSizeFromFp);
761 // Sloppy mode functions and builtins need to replace the receiver with the
762 // global proxy when called as functions (without an explicit receiver
764 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
765 if (info->strict_mode() == SLOPPY && !info->is_native()) {
767 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
768 __ movp(rcx, args.GetReceiverOperand());
769 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
770 __ j(not_equal, &ok, Label::kNear);
771 __ movp(rcx, GlobalObjectOperand());
772 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
773 __ movp(args.GetReceiverOperand(), rcx);
779 frame()->SetRegisterSaveAreaSize(
780 StandardFrameConstants::kFixedFrameSizeFromFp);
782 if (stack_slots > 0) {
783 __ subq(rsp, Immediate(stack_slots * kPointerSize));
788 void CodeGenerator::AssembleReturn() {
789 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
790 if (descriptor->kind() == CallDescriptor::kCallAddress) {
791 if (frame()->GetRegisterSaveAreaSize() > 0) {
792 // Remove this frame's spill slots first.
793 int stack_slots = frame()->GetSpillSlotCount();
794 if (stack_slots > 0) {
795 __ addq(rsp, Immediate(stack_slots * kPointerSize));
797 const RegList saves = descriptor->CalleeSavedRegisters();
798 // Restore registers.
800 for (int i = 0; i < Register::kNumRegisters; i++) {
801 if (!((1 << i) & saves)) continue;
802 __ popq(Register::from_code(i));
805 __ popq(rbp); // Pop caller's frame pointer.
808 // No saved registers.
809 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
810 __ popq(rbp); // Pop caller's frame pointer.
814 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
815 __ popq(rbp); // Pop caller's frame pointer.
816 int pop_count = descriptor->IsJSFunctionCall()
817 ? static_cast<int>(descriptor->JSParameterCount())
819 __ ret(pop_count * kPointerSize);
824 void CodeGenerator::AssembleMove(InstructionOperand* source,
825 InstructionOperand* destination) {
826 X64OperandConverter g(this, NULL);
827 // Dispatch on the source and destination operand kinds. Not all
828 // combinations are possible.
829 if (source->IsRegister()) {
830 DCHECK(destination->IsRegister() || destination->IsStackSlot());
831 Register src = g.ToRegister(source);
832 if (destination->IsRegister()) {
833 __ movq(g.ToRegister(destination), src);
835 __ movq(g.ToOperand(destination), src);
837 } else if (source->IsStackSlot()) {
838 DCHECK(destination->IsRegister() || destination->IsStackSlot());
839 Operand src = g.ToOperand(source);
840 if (destination->IsRegister()) {
841 Register dst = g.ToRegister(destination);
844 // Spill on demand to use a temporary register for memory-to-memory
846 Register tmp = kScratchRegister;
847 Operand dst = g.ToOperand(destination);
851 } else if (source->IsConstant()) {
852 ConstantOperand* constant_source = ConstantOperand::cast(source);
853 Constant src = g.ToConstant(constant_source);
854 if (destination->IsRegister() || destination->IsStackSlot()) {
855 Register dst = destination->IsRegister() ? g.ToRegister(destination)
857 switch (src.type()) {
858 case Constant::kInt32:
859 // TODO(dcarney): don't need scratch in this case.
860 __ movq(dst, Immediate(src.ToInt32()));
862 case Constant::kInt64:
863 __ Set(dst, src.ToInt64());
865 case Constant::kFloat32:
867 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
869 case Constant::kFloat64:
871 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
873 case Constant::kExternalReference:
874 __ Move(dst, src.ToExternalReference());
876 case Constant::kHeapObject:
877 __ Move(dst, src.ToHeapObject());
880 if (destination->IsStackSlot()) {
881 __ movq(g.ToOperand(destination), kScratchRegister);
883 } else if (src.type() == Constant::kFloat32) {
884 // TODO(turbofan): Can we do better here?
885 __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
886 if (destination->IsDoubleRegister()) {
887 XMMRegister dst = g.ToDoubleRegister(destination);
888 __ movq(dst, kScratchRegister);
890 DCHECK(destination->IsDoubleStackSlot());
891 Operand dst = g.ToOperand(destination);
892 __ movl(dst, kScratchRegister);
895 DCHECK_EQ(Constant::kFloat64, src.type());
896 __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
897 if (destination->IsDoubleRegister()) {
898 __ movq(g.ToDoubleRegister(destination), kScratchRegister);
900 DCHECK(destination->IsDoubleStackSlot());
901 __ movq(g.ToOperand(destination), kScratchRegister);
904 } else if (source->IsDoubleRegister()) {
905 XMMRegister src = g.ToDoubleRegister(source);
906 if (destination->IsDoubleRegister()) {
907 XMMRegister dst = g.ToDoubleRegister(destination);
910 DCHECK(destination->IsDoubleStackSlot());
911 Operand dst = g.ToOperand(destination);
914 } else if (source->IsDoubleStackSlot()) {
915 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
916 Operand src = g.ToOperand(source);
917 if (destination->IsDoubleRegister()) {
918 XMMRegister dst = g.ToDoubleRegister(destination);
921 // We rely on having xmm0 available as a fixed scratch register.
922 Operand dst = g.ToOperand(destination);
932 void CodeGenerator::AssembleSwap(InstructionOperand* source,
933 InstructionOperand* destination) {
934 X64OperandConverter g(this, NULL);
935 // Dispatch on the source and destination operand kinds. Not all
936 // combinations are possible.
937 if (source->IsRegister() && destination->IsRegister()) {
938 // Register-register.
939 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
940 } else if (source->IsRegister() && destination->IsStackSlot()) {
941 Register src = g.ToRegister(source);
942 Operand dst = g.ToOperand(destination);
944 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
945 (source->IsDoubleStackSlot() &&
946 destination->IsDoubleStackSlot())) {
948 Register tmp = kScratchRegister;
949 Operand src = g.ToOperand(source);
950 Operand dst = g.ToOperand(destination);
954 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
955 // XMM register-register swap. We rely on having xmm0
956 // available as a fixed scratch register.
957 XMMRegister src = g.ToDoubleRegister(source);
958 XMMRegister dst = g.ToDoubleRegister(destination);
962 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
963 // XMM register-memory swap. We rely on having xmm0
964 // available as a fixed scratch register.
965 XMMRegister src = g.ToDoubleRegister(source);
966 Operand dst = g.ToOperand(destination);
971 // No other combinations are possible.
977 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
980 void CodeGenerator::EnsureSpaceForLazyDeopt() {
981 int space_needed = Deoptimizer::patch_size();
982 if (!linkage()->info()->IsStub()) {
983 // Ensure that we have enough space after the previous lazy-bailout
984 // instruction for patching the code here.
985 int current_pc = masm()->pc_offset();
986 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
987 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
988 __ Nop(padding_size);
996 } // namespace internal
997 } // namespace compiler