1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 Constant constant = ToConstant(operand);
38 if (constant.type() == Constant::kInt32) {
39 return Immediate(constant.ToInt32());
45 Operand ToOperand(InstructionOperand* op, int extra = 0) {
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
52 static int NextOffset(int* offset) {
58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
59 STATIC_ASSERT(0 == static_cast<int>(times_1));
60 STATIC_ASSERT(1 == static_cast<int>(times_2));
61 STATIC_ASSERT(2 == static_cast<int>(times_4));
62 STATIC_ASSERT(3 == static_cast<int>(times_8));
63 int scale = static_cast<int>(mode - one);
64 DCHECK(scale >= 0 && scale < 4);
65 return static_cast<ScaleFactor>(scale);
68 Operand MemoryOperand(int* offset) {
69 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
72 Register base = InputRegister(NextOffset(offset));
74 return Operand(base, disp);
77 Register base = InputRegister(NextOffset(offset));
78 int32_t disp = InputInt32(NextOffset(offset));
79 return Operand(base, disp);
85 Register base = InputRegister(NextOffset(offset));
86 Register index = InputRegister(NextOffset(offset));
87 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
89 return Operand(base, index, scale, disp);
95 Register base = InputRegister(NextOffset(offset));
96 Register index = InputRegister(NextOffset(offset));
97 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
98 int32_t disp = InputInt32(NextOffset(offset));
99 return Operand(base, index, scale, disp);
105 Register index = InputRegister(NextOffset(offset));
106 ScaleFactor scale = ScaleFor(kMode_M1, mode);
108 return Operand(index, scale, disp);
114 Register index = InputRegister(NextOffset(offset));
115 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
116 int32_t disp = InputInt32(NextOffset(offset));
117 return Operand(index, scale, disp);
121 return Operand(no_reg, 0);
124 return Operand(no_reg, 0);
127 Operand MemoryOperand() {
129 return MemoryOperand(&first_input);
134 static bool HasImmediateInput(Instruction* instr, int index) {
135 return instr->InputAt(index)->IsImmediate();
139 #define ASSEMBLE_UNOP(asm_instr) \
141 if (instr->Output()->IsRegister()) { \
142 __ asm_instr(i.OutputRegister()); \
144 __ asm_instr(i.OutputOperand()); \
149 #define ASSEMBLE_BINOP(asm_instr) \
151 if (HasImmediateInput(instr, 1)) { \
152 if (instr->InputAt(0)->IsRegister()) { \
153 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
155 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
158 if (instr->InputAt(1)->IsRegister()) { \
159 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
161 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
167 #define ASSEMBLE_MULT(asm_instr) \
169 if (HasImmediateInput(instr, 1)) { \
170 if (instr->InputAt(0)->IsRegister()) { \
171 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
172 i.InputImmediate(1)); \
174 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
175 i.InputImmediate(1)); \
178 if (instr->InputAt(1)->IsRegister()) { \
179 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
181 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
187 #define ASSEMBLE_SHIFT(asm_instr, width) \
189 if (HasImmediateInput(instr, 1)) { \
190 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
192 __ asm_instr##_cl(i.OutputRegister()); \
197 // Assembles an instruction after register allocation, producing machine code.
198 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
199 X64OperandConverter i(this, instr);
201 switch (ArchOpcodeField::decode(instr->opcode())) {
202 case kArchCallCodeObject: {
203 EnsureSpaceForLazyDeopt();
204 if (HasImmediateInput(instr, 0)) {
205 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
206 __ Call(code, RelocInfo::CODE_TARGET);
208 Register reg = i.InputRegister(0);
209 int entry = Code::kHeaderSize - kHeapObjectTag;
210 __ Call(Operand(reg, entry));
212 AddSafepointAndDeopt(instr);
215 case kArchCallJSFunction: {
216 EnsureSpaceForLazyDeopt();
217 Register func = i.InputRegister(0);
218 if (FLAG_debug_code) {
219 // Check the function's context matches the context argument.
220 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
221 __ Assert(equal, kWrongFunctionContext);
223 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
224 AddSafepointAndDeopt(instr);
228 __ jmp(code_->GetLabel(i.InputBlock(0)));
231 // don't emit code for nops.
236 case kArchTruncateDoubleToI:
237 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
240 ASSEMBLE_BINOP(addl);
243 ASSEMBLE_BINOP(addq);
246 ASSEMBLE_BINOP(subl);
249 ASSEMBLE_BINOP(subq);
252 ASSEMBLE_BINOP(andl);
255 ASSEMBLE_BINOP(andq);
258 ASSEMBLE_BINOP(cmpl);
261 ASSEMBLE_BINOP(cmpq);
264 ASSEMBLE_BINOP(testl);
267 ASSEMBLE_BINOP(testq);
270 ASSEMBLE_MULT(imull);
273 ASSEMBLE_MULT(imulq);
277 __ idivl(i.InputRegister(1));
281 __ idivq(i.InputRegister(1));
285 __ divl(i.InputRegister(1));
289 __ divq(i.InputRegister(1));
310 ASSEMBLE_BINOP(xorl);
313 ASSEMBLE_BINOP(xorq);
316 ASSEMBLE_SHIFT(shll, 5);
319 ASSEMBLE_SHIFT(shlq, 6);
322 ASSEMBLE_SHIFT(shrl, 5);
325 ASSEMBLE_SHIFT(shrq, 6);
328 ASSEMBLE_SHIFT(sarl, 5);
331 ASSEMBLE_SHIFT(sarq, 6);
334 ASSEMBLE_SHIFT(rorl, 5);
337 ASSEMBLE_SHIFT(rorq, 6);
340 if (instr->InputAt(1)->IsDoubleRegister()) {
341 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
343 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
347 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
350 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
353 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
356 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
358 case kSSEFloat64Mod: {
359 __ subq(rsp, Immediate(kDoubleSize));
360 // Move values to st(0) and st(1).
361 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
362 __ fld_d(Operand(rsp, 0));
363 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
364 __ fld_d(Operand(rsp, 0));
365 // Loop while fprem isn't done.
368 // This instructions traps on all kinds inputs, but we are assuming the
369 // floating point control word is set to ignore them all.
371 // The following 2 instruction implicitly use rax.
373 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
376 __ shrl(rax, Immediate(8));
377 __ andl(rax, Immediate(0xFF));
381 __ j(parity_even, &mod_loop);
382 // Move output to stack and clean up.
384 __ fstp_d(Operand(rsp, 0));
385 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
386 __ addq(rsp, Immediate(kDoubleSize));
389 case kSSEFloat64Sqrt:
390 if (instr->InputAt(0)->IsDoubleRegister()) {
391 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
393 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
397 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
400 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
402 case kSSEFloat64ToInt32:
403 if (instr->InputAt(0)->IsDoubleRegister()) {
404 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
406 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
409 case kSSEFloat64ToUint32:
410 if (instr->InputAt(0)->IsDoubleRegister()) {
411 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
413 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
415 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
416 // TODO(turbofan): generated code should not look at the upper 32 bits
417 // of the result, but those bits could escape to the outside world.
419 case kSSEInt32ToFloat64:
420 if (instr->InputAt(0)->IsRegister()) {
421 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
423 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
426 case kSSEUint32ToFloat64:
427 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
428 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
431 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
434 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
438 Operand operand = i.MemoryOperand(&index);
439 if (HasImmediateInput(instr, index)) {
440 __ movb(operand, Immediate(i.InputInt8(index)));
442 __ movb(operand, i.InputRegister(index));
447 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
450 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
454 Operand operand = i.MemoryOperand(&index);
455 if (HasImmediateInput(instr, index)) {
456 __ movw(operand, Immediate(i.InputInt16(index)));
458 __ movw(operand, i.InputRegister(index));
463 if (instr->HasOutput()) {
464 if (instr->addressing_mode() == kMode_None) {
465 if (instr->InputAt(0)->IsRegister()) {
466 __ movl(i.OutputRegister(), i.InputRegister(0));
468 __ movl(i.OutputRegister(), i.InputOperand(0));
471 __ movl(i.OutputRegister(), i.MemoryOperand());
475 Operand operand = i.MemoryOperand(&index);
476 if (HasImmediateInput(instr, index)) {
477 __ movl(operand, i.InputImmediate(index));
479 __ movl(operand, i.InputRegister(index));
484 if (instr->InputAt(0)->IsRegister()) {
485 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
487 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
492 if (instr->HasOutput()) {
493 __ movq(i.OutputRegister(), i.MemoryOperand());
496 Operand operand = i.MemoryOperand(&index);
497 if (HasImmediateInput(instr, index)) {
498 __ movq(operand, i.InputImmediate(index));
500 __ movq(operand, i.InputRegister(index));
505 if (instr->HasOutput()) {
506 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
509 Operand operand = i.MemoryOperand(&index);
510 __ movss(operand, i.InputDoubleRegister(index));
514 if (instr->HasOutput()) {
515 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
518 Operand operand = i.MemoryOperand(&index);
519 __ movsd(operand, i.InputDoubleRegister(index));
523 __ leal(i.OutputRegister(), i.MemoryOperand());
526 __ leaq(i.OutputRegister(), i.MemoryOperand());
529 if (HasImmediateInput(instr, 0)) {
530 __ pushq(i.InputImmediate(0));
532 if (instr->InputAt(0)->IsRegister()) {
533 __ pushq(i.InputRegister(0));
535 __ pushq(i.InputOperand(0));
539 case kX64StoreWriteBarrier: {
540 Register object = i.InputRegister(0);
541 Register index = i.InputRegister(1);
542 Register value = i.InputRegister(2);
543 __ movsxlq(index, index);
544 __ movq(Operand(object, index, times_1, 0), value);
545 __ leaq(index, Operand(object, index, times_1, 0));
546 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
549 __ RecordWrite(object, index, value, mode);
556 // Assembles branches after this instruction.
557 void CodeGenerator::AssembleArchBranch(Instruction* instr,
558 FlagsCondition condition) {
559 X64OperandConverter i(this, instr);
562 // Emit a branch. The true and false targets are always the last two inputs
563 // to the instruction.
564 BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
565 BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
566 bool fallthru = IsNextInAssemblyOrder(fblock);
567 Label* tlabel = code()->GetLabel(tblock);
568 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
569 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
571 case kUnorderedEqual:
572 __ j(parity_even, flabel, flabel_distance);
577 case kUnorderedNotEqual:
578 __ j(parity_even, tlabel);
581 __ j(not_equal, tlabel);
583 case kSignedLessThan:
586 case kSignedGreaterThanOrEqual:
587 __ j(greater_equal, tlabel);
589 case kSignedLessThanOrEqual:
590 __ j(less_equal, tlabel);
592 case kSignedGreaterThan:
593 __ j(greater, tlabel);
595 case kUnorderedLessThan:
596 __ j(parity_even, flabel, flabel_distance);
598 case kUnsignedLessThan:
601 case kUnorderedGreaterThanOrEqual:
602 __ j(parity_even, tlabel);
604 case kUnsignedGreaterThanOrEqual:
605 __ j(above_equal, tlabel);
607 case kUnorderedLessThanOrEqual:
608 __ j(parity_even, flabel, flabel_distance);
610 case kUnsignedLessThanOrEqual:
611 __ j(below_equal, tlabel);
613 case kUnorderedGreaterThan:
614 __ j(parity_even, tlabel);
616 case kUnsignedGreaterThan:
620 __ j(overflow, tlabel);
623 __ j(no_overflow, tlabel);
626 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
631 // Assembles boolean materializations after this instruction.
632 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
633 FlagsCondition condition) {
634 X64OperandConverter i(this, instr);
637 // Materialize a full 64-bit 1 or 0 value. The result register is always the
638 // last output of the instruction.
640 DCHECK_NE(0, instr->OutputCount());
641 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
642 Condition cc = no_condition;
644 case kUnorderedEqual:
645 __ j(parity_odd, &check, Label::kNear);
646 __ movl(reg, Immediate(0));
647 __ jmp(&done, Label::kNear);
652 case kUnorderedNotEqual:
653 __ j(parity_odd, &check, Label::kNear);
654 __ movl(reg, Immediate(1));
655 __ jmp(&done, Label::kNear);
660 case kSignedLessThan:
663 case kSignedGreaterThanOrEqual:
666 case kSignedLessThanOrEqual:
669 case kSignedGreaterThan:
672 case kUnorderedLessThan:
673 __ j(parity_odd, &check, Label::kNear);
674 __ movl(reg, Immediate(0));
675 __ jmp(&done, Label::kNear);
677 case kUnsignedLessThan:
680 case kUnorderedGreaterThanOrEqual:
681 __ j(parity_odd, &check, Label::kNear);
682 __ movl(reg, Immediate(1));
683 __ jmp(&done, Label::kNear);
685 case kUnsignedGreaterThanOrEqual:
688 case kUnorderedLessThanOrEqual:
689 __ j(parity_odd, &check, Label::kNear);
690 __ movl(reg, Immediate(0));
691 __ jmp(&done, Label::kNear);
693 case kUnsignedLessThanOrEqual:
696 case kUnorderedGreaterThan:
697 __ j(parity_odd, &check, Label::kNear);
698 __ movl(reg, Immediate(1));
699 __ jmp(&done, Label::kNear);
701 case kUnsignedGreaterThan:
713 __ movzxbl(reg, reg);
718 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
719 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
720 isolate(), deoptimization_id, Deoptimizer::LAZY);
721 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
725 void CodeGenerator::AssemblePrologue() {
726 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
727 int stack_slots = frame()->GetSpillSlotCount();
728 if (descriptor->kind() == CallDescriptor::kCallAddress) {
731 const RegList saves = descriptor->CalleeSavedRegisters();
732 if (saves != 0) { // Save callee-saved registers.
733 int register_save_area_size = 0;
734 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
735 if (!((1 << i) & saves)) continue;
736 __ pushq(Register::from_code(i));
737 register_save_area_size += kPointerSize;
739 frame()->SetRegisterSaveAreaSize(register_save_area_size);
741 } else if (descriptor->IsJSFunctionCall()) {
742 CompilationInfo* info = linkage()->info();
743 __ Prologue(info->IsCodePreAgingActive());
744 frame()->SetRegisterSaveAreaSize(
745 StandardFrameConstants::kFixedFrameSizeFromFp);
747 // Sloppy mode functions and builtins need to replace the receiver with the
748 // global proxy when called as functions (without an explicit receiver
750 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
751 if (info->strict_mode() == SLOPPY && !info->is_native()) {
753 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
754 __ movp(rcx, args.GetReceiverOperand());
755 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
756 __ j(not_equal, &ok, Label::kNear);
757 __ movp(rcx, GlobalObjectOperand());
758 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
759 __ movp(args.GetReceiverOperand(), rcx);
765 frame()->SetRegisterSaveAreaSize(
766 StandardFrameConstants::kFixedFrameSizeFromFp);
768 if (stack_slots > 0) {
769 __ subq(rsp, Immediate(stack_slots * kPointerSize));
774 void CodeGenerator::AssembleReturn() {
775 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
776 if (descriptor->kind() == CallDescriptor::kCallAddress) {
777 if (frame()->GetRegisterSaveAreaSize() > 0) {
778 // Remove this frame's spill slots first.
779 int stack_slots = frame()->GetSpillSlotCount();
780 if (stack_slots > 0) {
781 __ addq(rsp, Immediate(stack_slots * kPointerSize));
783 const RegList saves = descriptor->CalleeSavedRegisters();
784 // Restore registers.
786 for (int i = 0; i < Register::kNumRegisters; i++) {
787 if (!((1 << i) & saves)) continue;
788 __ popq(Register::from_code(i));
791 __ popq(rbp); // Pop caller's frame pointer.
794 // No saved registers.
795 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
796 __ popq(rbp); // Pop caller's frame pointer.
800 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
801 __ popq(rbp); // Pop caller's frame pointer.
802 int pop_count = descriptor->IsJSFunctionCall()
803 ? static_cast<int>(descriptor->JSParameterCount())
805 __ ret(pop_count * kPointerSize);
810 void CodeGenerator::AssembleMove(InstructionOperand* source,
811 InstructionOperand* destination) {
812 X64OperandConverter g(this, NULL);
813 // Dispatch on the source and destination operand kinds. Not all
814 // combinations are possible.
815 if (source->IsRegister()) {
816 DCHECK(destination->IsRegister() || destination->IsStackSlot());
817 Register src = g.ToRegister(source);
818 if (destination->IsRegister()) {
819 __ movq(g.ToRegister(destination), src);
821 __ movq(g.ToOperand(destination), src);
823 } else if (source->IsStackSlot()) {
824 DCHECK(destination->IsRegister() || destination->IsStackSlot());
825 Operand src = g.ToOperand(source);
826 if (destination->IsRegister()) {
827 Register dst = g.ToRegister(destination);
830 // Spill on demand to use a temporary register for memory-to-memory
832 Register tmp = kScratchRegister;
833 Operand dst = g.ToOperand(destination);
837 } else if (source->IsConstant()) {
838 ConstantOperand* constant_source = ConstantOperand::cast(source);
839 Constant src = g.ToConstant(constant_source);
840 if (destination->IsRegister() || destination->IsStackSlot()) {
841 Register dst = destination->IsRegister() ? g.ToRegister(destination)
843 switch (src.type()) {
844 case Constant::kInt32:
845 // TODO(dcarney): don't need scratch in this case.
846 __ movq(dst, Immediate(src.ToInt32()));
848 case Constant::kInt64:
849 __ Set(dst, src.ToInt64());
851 case Constant::kFloat32:
853 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
855 case Constant::kFloat64:
857 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
859 case Constant::kExternalReference:
860 __ Move(dst, src.ToExternalReference());
862 case Constant::kHeapObject:
863 __ Move(dst, src.ToHeapObject());
866 if (destination->IsStackSlot()) {
867 __ movq(g.ToOperand(destination), kScratchRegister);
869 } else if (src.type() == Constant::kFloat32) {
870 // TODO(turbofan): Can we do better here?
871 __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
872 if (destination->IsDoubleRegister()) {
873 XMMRegister dst = g.ToDoubleRegister(destination);
874 __ movq(dst, kScratchRegister);
876 DCHECK(destination->IsDoubleStackSlot());
877 Operand dst = g.ToOperand(destination);
878 __ movl(dst, kScratchRegister);
881 DCHECK_EQ(Constant::kFloat64, src.type());
882 __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
883 if (destination->IsDoubleRegister()) {
884 __ movq(g.ToDoubleRegister(destination), kScratchRegister);
886 DCHECK(destination->IsDoubleStackSlot());
887 __ movq(g.ToOperand(destination), kScratchRegister);
890 } else if (source->IsDoubleRegister()) {
891 XMMRegister src = g.ToDoubleRegister(source);
892 if (destination->IsDoubleRegister()) {
893 XMMRegister dst = g.ToDoubleRegister(destination);
896 DCHECK(destination->IsDoubleStackSlot());
897 Operand dst = g.ToOperand(destination);
900 } else if (source->IsDoubleStackSlot()) {
901 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
902 Operand src = g.ToOperand(source);
903 if (destination->IsDoubleRegister()) {
904 XMMRegister dst = g.ToDoubleRegister(destination);
907 // We rely on having xmm0 available as a fixed scratch register.
908 Operand dst = g.ToOperand(destination);
918 void CodeGenerator::AssembleSwap(InstructionOperand* source,
919 InstructionOperand* destination) {
920 X64OperandConverter g(this, NULL);
921 // Dispatch on the source and destination operand kinds. Not all
922 // combinations are possible.
923 if (source->IsRegister() && destination->IsRegister()) {
924 // Register-register.
925 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
926 } else if (source->IsRegister() && destination->IsStackSlot()) {
927 Register src = g.ToRegister(source);
928 Operand dst = g.ToOperand(destination);
930 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
931 (source->IsDoubleStackSlot() &&
932 destination->IsDoubleStackSlot())) {
934 Register tmp = kScratchRegister;
935 Operand src = g.ToOperand(source);
936 Operand dst = g.ToOperand(destination);
940 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
941 // XMM register-register swap. We rely on having xmm0
942 // available as a fixed scratch register.
943 XMMRegister src = g.ToDoubleRegister(source);
944 XMMRegister dst = g.ToDoubleRegister(destination);
948 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
949 // XMM register-memory swap. We rely on having xmm0
950 // available as a fixed scratch register.
951 XMMRegister src = g.ToDoubleRegister(source);
952 Operand dst = g.ToOperand(destination);
957 // No other combinations are possible.
963 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
966 void CodeGenerator::EnsureSpaceForLazyDeopt() {
967 int space_needed = Deoptimizer::patch_size();
968 if (!linkage()->info()->IsStub()) {
969 // Ensure that we have enough space after the previous lazy-bailout
970 // instruction for patching the code here.
971 int current_pc = masm()->pc_offset();
972 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
973 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
974 __ Nop(padding_size);
982 } // namespace internal
983 } // namespace compiler