1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // TODO(turbofan): Cleanup these hacks.
23 enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
28 Handle<Object> handle;
29 ExternalReference reference;
34 enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
37 struct RegisterOrOperand {
38 RegisterOrOperand() : operand(no_reg, 0) {}
40 DoubleRegister double_reg;
42 RegisterOrOperandType type;
46 // Adds X64 specific methods for decoding operands.
47 class X64OperandConverter : public InstructionOperandConverter {
49 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
50 : InstructionOperandConverter(gen, instr) {}
52 RegisterOrOperand InputRegisterOrOperand(int index) {
53 return ToRegisterOrOperand(instr_->InputAt(index));
56 Immediate InputImmediate(int index) {
57 return ToImmediate(instr_->InputAt(index));
60 RegisterOrOperand OutputRegisterOrOperand() {
61 return ToRegisterOrOperand(instr_->Output());
64 Immediate64 InputImmediate64(int index) {
65 return ToImmediate64(instr_->InputAt(index));
68 Immediate64 ToImmediate64(InstructionOperand* operand) {
69 Constant constant = ToConstant(operand);
70 Immediate64 immediate;
71 immediate.value = 0xbeefdeaddeefbeed;
72 immediate.type = kImm64Value;
73 switch (constant.type()) {
74 case Constant::kInt32:
75 case Constant::kInt64:
76 immediate.value = constant.ToInt64();
78 case Constant::kFloat64:
79 immediate.type = kImm64Handle;
81 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
83 case Constant::kExternalReference:
84 immediate.type = kImm64Reference;
85 immediate.reference = constant.ToExternalReference();
87 case Constant::kHeapObject:
88 immediate.type = kImm64Handle;
89 immediate.handle = constant.ToHeapObject();
96 Immediate ToImmediate(InstructionOperand* operand) {
97 Constant constant = ToConstant(operand);
98 switch (constant.type()) {
99 case Constant::kInt32:
100 return Immediate(constant.ToInt32());
101 case Constant::kInt64:
102 case Constant::kFloat64:
103 case Constant::kExternalReference:
104 case Constant::kHeapObject:
108 return Immediate(-1);
111 Operand ToOperand(InstructionOperand* op, int extra = 0) {
112 RegisterOrOperand result = ToRegisterOrOperand(op, extra);
113 DCHECK_EQ(kOperand, result.type);
114 return result.operand;
117 RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
118 RegisterOrOperand result;
119 if (op->IsRegister()) {
121 result.type = kRegister;
122 result.reg = ToRegister(op);
124 } else if (op->IsDoubleRegister()) {
127 result.type = kDoubleRegister;
128 result.double_reg = ToDoubleRegister(op);
132 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
134 result.type = kOperand;
135 // The linkage computes where all spill slots are located.
136 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
138 Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
142 Operand MemoryOperand(int* first_input) {
143 const int offset = *first_input;
144 switch (AddressingModeField::decode(instr_->opcode())) {
147 Register index = InputRegister(offset + 1);
148 return Operand(InputRegister(offset + 0), index, times_1,
149 0); // TODO(dcarney): K != 0
153 return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
156 return Operand(no_reg, 0);
160 Operand MemoryOperand() {
162 return MemoryOperand(&first_input);
167 static bool HasImmediateInput(Instruction* instr, int index) {
168 return instr->InputAt(index)->IsImmediate();
172 #define ASSEMBLE_BINOP(asm_instr) \
174 if (HasImmediateInput(instr, 1)) { \
175 RegisterOrOperand input = i.InputRegisterOrOperand(0); \
176 if (input.type == kRegister) { \
177 __ asm_instr(input.reg, i.InputImmediate(1)); \
179 __ asm_instr(input.operand, i.InputImmediate(1)); \
182 RegisterOrOperand input = i.InputRegisterOrOperand(1); \
183 if (input.type == kRegister) { \
184 __ asm_instr(i.InputRegister(0), input.reg); \
186 __ asm_instr(i.InputRegister(0), input.operand); \
192 #define ASSEMBLE_SHIFT(asm_instr, width) \
194 if (HasImmediateInput(instr, 1)) { \
195 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
197 __ asm_instr##_cl(i.OutputRegister()); \
202 // Assembles an instruction after register allocation, producing machine code.
203 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
204 X64OperandConverter i(this, instr);
206 switch (ArchOpcodeField::decode(instr->opcode())) {
207 case kArchCallCodeObject: {
208 EnsureSpaceForLazyDeopt();
209 if (HasImmediateInput(instr, 0)) {
210 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
211 __ Call(code, RelocInfo::CODE_TARGET);
213 Register reg = i.InputRegister(0);
214 int entry = Code::kHeaderSize - kHeapObjectTag;
215 __ Call(Operand(reg, entry));
217 AddSafepointAndDeopt(instr);
220 case kArchCallJSFunction: {
221 EnsureSpaceForLazyDeopt();
222 Register func = i.InputRegister(0);
223 if (FLAG_debug_code) {
224 // Check the function's context matches the context argument.
225 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
226 __ Assert(equal, kWrongFunctionContext);
228 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
229 AddSafepointAndDeopt(instr);
233 __ jmp(code_->GetLabel(i.InputBlock(0)));
236 // don't emit code for nops.
241 case kArchTruncateDoubleToI:
242 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
245 ASSEMBLE_BINOP(addl);
248 ASSEMBLE_BINOP(addq);
251 ASSEMBLE_BINOP(subl);
254 ASSEMBLE_BINOP(subq);
257 ASSEMBLE_BINOP(andl);
260 ASSEMBLE_BINOP(andq);
263 ASSEMBLE_BINOP(cmpl);
266 ASSEMBLE_BINOP(cmpq);
269 ASSEMBLE_BINOP(testl);
272 ASSEMBLE_BINOP(testq);
275 if (HasImmediateInput(instr, 1)) {
276 RegisterOrOperand input = i.InputRegisterOrOperand(0);
277 if (input.type == kRegister) {
278 __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
280 __ movq(kScratchRegister, input.operand);
281 __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
284 RegisterOrOperand input = i.InputRegisterOrOperand(1);
285 if (input.type == kRegister) {
286 __ imull(i.OutputRegister(), input.reg);
288 __ imull(i.OutputRegister(), input.operand);
293 if (HasImmediateInput(instr, 1)) {
294 RegisterOrOperand input = i.InputRegisterOrOperand(0);
295 if (input.type == kRegister) {
296 __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
298 __ movq(kScratchRegister, input.operand);
299 __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
302 RegisterOrOperand input = i.InputRegisterOrOperand(1);
303 if (input.type == kRegister) {
304 __ imulq(i.OutputRegister(), input.reg);
306 __ imulq(i.OutputRegister(), input.operand);
312 __ idivl(i.InputRegister(1));
316 __ idivq(i.InputRegister(1));
320 __ divl(i.InputRegister(1));
324 __ divq(i.InputRegister(1));
327 RegisterOrOperand output = i.OutputRegisterOrOperand();
328 if (output.type == kRegister) {
331 __ notq(output.operand);
336 RegisterOrOperand output = i.OutputRegisterOrOperand();
337 if (output.type == kRegister) {
340 __ notl(output.operand);
345 RegisterOrOperand output = i.OutputRegisterOrOperand();
346 if (output.type == kRegister) {
349 __ negq(output.operand);
354 RegisterOrOperand output = i.OutputRegisterOrOperand();
355 if (output.type == kRegister) {
358 __ negl(output.operand);
369 ASSEMBLE_BINOP(xorl);
372 ASSEMBLE_BINOP(xorq);
375 ASSEMBLE_SHIFT(shll, 5);
378 ASSEMBLE_SHIFT(shlq, 6);
381 ASSEMBLE_SHIFT(shrl, 5);
384 ASSEMBLE_SHIFT(shrq, 6);
387 ASSEMBLE_SHIFT(sarl, 5);
390 ASSEMBLE_SHIFT(sarq, 6);
393 ASSEMBLE_SHIFT(rorl, 5);
396 ASSEMBLE_SHIFT(rorq, 6);
398 case kSSEFloat64Cmp: {
399 RegisterOrOperand input = i.InputRegisterOrOperand(1);
400 if (input.type == kDoubleRegister) {
401 __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
403 __ ucomisd(i.InputDoubleRegister(0), input.operand);
408 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
411 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
414 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
417 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
419 case kSSEFloat64Mod: {
420 __ subq(rsp, Immediate(kDoubleSize));
421 // Move values to st(0) and st(1).
422 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
423 __ fld_d(Operand(rsp, 0));
424 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
425 __ fld_d(Operand(rsp, 0));
426 // Loop while fprem isn't done.
429 // This instructions traps on all kinds inputs, but we are assuming the
430 // floating point control word is set to ignore them all.
432 // The following 2 instruction implicitly use rax.
434 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
437 __ shrl(rax, Immediate(8));
438 __ andl(rax, Immediate(0xFF));
442 __ j(parity_even, &mod_loop);
443 // Move output to stack and clean up.
445 __ fstp_d(Operand(rsp, 0));
446 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
447 __ addq(rsp, Immediate(kDoubleSize));
450 case kSSEFloat64Sqrt: {
451 RegisterOrOperand input = i.InputRegisterOrOperand(0);
452 if (input.type == kDoubleRegister) {
453 __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
455 __ sqrtsd(i.OutputDoubleRegister(), input.operand);
460 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
463 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
465 case kSSEFloat64ToInt32: {
466 RegisterOrOperand input = i.InputRegisterOrOperand(0);
467 if (input.type == kDoubleRegister) {
468 __ cvttsd2si(i.OutputRegister(), input.double_reg);
470 __ cvttsd2si(i.OutputRegister(), input.operand);
474 case kSSEFloat64ToUint32: {
475 RegisterOrOperand input = i.InputRegisterOrOperand(0);
476 if (input.type == kDoubleRegister) {
477 __ cvttsd2siq(i.OutputRegister(), input.double_reg);
479 __ cvttsd2siq(i.OutputRegister(), input.operand);
481 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
482 // TODO(turbofan): generated code should not look at the upper 32 bits
483 // of the result, but those bits could escape to the outside world.
486 case kSSEInt32ToFloat64: {
487 RegisterOrOperand input = i.InputRegisterOrOperand(0);
488 if (input.type == kRegister) {
489 __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
491 __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
495 case kSSEUint32ToFloat64: {
496 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
497 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
501 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
504 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
508 Operand operand = i.MemoryOperand(&index);
509 if (HasImmediateInput(instr, index)) {
510 __ movb(operand, Immediate(i.InputInt8(index)));
512 __ movb(operand, i.InputRegister(index));
517 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
520 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
524 Operand operand = i.MemoryOperand(&index);
525 if (HasImmediateInput(instr, index)) {
526 __ movw(operand, Immediate(i.InputInt16(index)));
528 __ movw(operand, i.InputRegister(index));
533 if (instr->HasOutput()) {
534 if (instr->addressing_mode() == kMode_None) {
535 RegisterOrOperand input = i.InputRegisterOrOperand(0);
536 if (input.type == kRegister) {
537 __ movl(i.OutputRegister(), input.reg);
539 __ movl(i.OutputRegister(), input.operand);
542 __ movl(i.OutputRegister(), i.MemoryOperand());
546 Operand operand = i.MemoryOperand(&index);
547 if (HasImmediateInput(instr, index)) {
548 __ movl(operand, i.InputImmediate(index));
550 __ movl(operand, i.InputRegister(index));
555 RegisterOrOperand input = i.InputRegisterOrOperand(0);
556 if (input.type == kRegister) {
557 __ movsxlq(i.OutputRegister(), input.reg);
559 __ movsxlq(i.OutputRegister(), input.operand);
564 if (instr->HasOutput()) {
565 __ movq(i.OutputRegister(), i.MemoryOperand());
568 Operand operand = i.MemoryOperand(&index);
569 if (HasImmediateInput(instr, index)) {
570 __ movq(operand, i.InputImmediate(index));
572 __ movq(operand, i.InputRegister(index));
577 if (instr->HasOutput()) {
578 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
581 Operand operand = i.MemoryOperand(&index);
582 __ movss(operand, i.InputDoubleRegister(index));
586 if (instr->HasOutput()) {
587 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
590 Operand operand = i.MemoryOperand(&index);
591 __ movsd(operand, i.InputDoubleRegister(index));
595 if (HasImmediateInput(instr, 0)) {
596 __ pushq(i.InputImmediate(0));
598 RegisterOrOperand input = i.InputRegisterOrOperand(0);
599 if (input.type == kRegister) {
602 __ pushq(input.operand);
606 case kX64StoreWriteBarrier: {
607 Register object = i.InputRegister(0);
608 Register index = i.InputRegister(1);
609 Register value = i.InputRegister(2);
610 __ movsxlq(index, index);
611 __ movq(Operand(object, index, times_1, 0), value);
612 __ leaq(index, Operand(object, index, times_1, 0));
613 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
616 __ RecordWrite(object, index, value, mode);
623 // Assembles branches after this instruction.
624 void CodeGenerator::AssembleArchBranch(Instruction* instr,
625 FlagsCondition condition) {
626 X64OperandConverter i(this, instr);
629 // Emit a branch. The true and false targets are always the last two inputs
630 // to the instruction.
631 BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
632 BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
633 bool fallthru = IsNextInAssemblyOrder(fblock);
634 Label* tlabel = code()->GetLabel(tblock);
635 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
636 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
638 case kUnorderedEqual:
639 __ j(parity_even, flabel, flabel_distance);
644 case kUnorderedNotEqual:
645 __ j(parity_even, tlabel);
648 __ j(not_equal, tlabel);
650 case kSignedLessThan:
653 case kSignedGreaterThanOrEqual:
654 __ j(greater_equal, tlabel);
656 case kSignedLessThanOrEqual:
657 __ j(less_equal, tlabel);
659 case kSignedGreaterThan:
660 __ j(greater, tlabel);
662 case kUnorderedLessThan:
663 __ j(parity_even, flabel, flabel_distance);
665 case kUnsignedLessThan:
668 case kUnorderedGreaterThanOrEqual:
669 __ j(parity_even, tlabel);
671 case kUnsignedGreaterThanOrEqual:
672 __ j(above_equal, tlabel);
674 case kUnorderedLessThanOrEqual:
675 __ j(parity_even, flabel, flabel_distance);
677 case kUnsignedLessThanOrEqual:
678 __ j(below_equal, tlabel);
680 case kUnorderedGreaterThan:
681 __ j(parity_even, tlabel);
683 case kUnsignedGreaterThan:
687 __ j(overflow, tlabel);
690 __ j(no_overflow, tlabel);
693 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
698 // Assembles boolean materializations after this instruction.
699 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
700 FlagsCondition condition) {
701 X64OperandConverter i(this, instr);
704 // Materialize a full 64-bit 1 or 0 value. The result register is always the
705 // last output of the instruction.
707 DCHECK_NE(0, instr->OutputCount());
708 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
709 Condition cc = no_condition;
711 case kUnorderedEqual:
712 __ j(parity_odd, &check, Label::kNear);
713 __ movl(reg, Immediate(0));
714 __ jmp(&done, Label::kNear);
719 case kUnorderedNotEqual:
720 __ j(parity_odd, &check, Label::kNear);
721 __ movl(reg, Immediate(1));
722 __ jmp(&done, Label::kNear);
727 case kSignedLessThan:
730 case kSignedGreaterThanOrEqual:
733 case kSignedLessThanOrEqual:
736 case kSignedGreaterThan:
739 case kUnorderedLessThan:
740 __ j(parity_odd, &check, Label::kNear);
741 __ movl(reg, Immediate(0));
742 __ jmp(&done, Label::kNear);
744 case kUnsignedLessThan:
747 case kUnorderedGreaterThanOrEqual:
748 __ j(parity_odd, &check, Label::kNear);
749 __ movl(reg, Immediate(1));
750 __ jmp(&done, Label::kNear);
752 case kUnsignedGreaterThanOrEqual:
755 case kUnorderedLessThanOrEqual:
756 __ j(parity_odd, &check, Label::kNear);
757 __ movl(reg, Immediate(0));
758 __ jmp(&done, Label::kNear);
760 case kUnsignedLessThanOrEqual:
763 case kUnorderedGreaterThan:
764 __ j(parity_odd, &check, Label::kNear);
765 __ movl(reg, Immediate(1));
766 __ jmp(&done, Label::kNear);
768 case kUnsignedGreaterThan:
780 __ movzxbl(reg, reg);
785 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
786 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
787 isolate(), deoptimization_id, Deoptimizer::LAZY);
788 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
792 void CodeGenerator::AssemblePrologue() {
793 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
794 int stack_slots = frame()->GetSpillSlotCount();
795 if (descriptor->kind() == CallDescriptor::kCallAddress) {
798 const RegList saves = descriptor->CalleeSavedRegisters();
799 if (saves != 0) { // Save callee-saved registers.
800 int register_save_area_size = 0;
801 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
802 if (!((1 << i) & saves)) continue;
803 __ pushq(Register::from_code(i));
804 register_save_area_size += kPointerSize;
806 frame()->SetRegisterSaveAreaSize(register_save_area_size);
808 } else if (descriptor->IsJSFunctionCall()) {
809 CompilationInfo* info = linkage()->info();
810 __ Prologue(info->IsCodePreAgingActive());
811 frame()->SetRegisterSaveAreaSize(
812 StandardFrameConstants::kFixedFrameSizeFromFp);
814 // Sloppy mode functions and builtins need to replace the receiver with the
815 // global proxy when called as functions (without an explicit receiver
817 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
818 if (info->strict_mode() == SLOPPY && !info->is_native()) {
820 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
821 __ movp(rcx, args.GetReceiverOperand());
822 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
823 __ j(not_equal, &ok, Label::kNear);
824 __ movp(rcx, GlobalObjectOperand());
825 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
826 __ movp(args.GetReceiverOperand(), rcx);
832 frame()->SetRegisterSaveAreaSize(
833 StandardFrameConstants::kFixedFrameSizeFromFp);
835 if (stack_slots > 0) {
836 __ subq(rsp, Immediate(stack_slots * kPointerSize));
841 void CodeGenerator::AssembleReturn() {
842 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
843 if (descriptor->kind() == CallDescriptor::kCallAddress) {
844 if (frame()->GetRegisterSaveAreaSize() > 0) {
845 // Remove this frame's spill slots first.
846 int stack_slots = frame()->GetSpillSlotCount();
847 if (stack_slots > 0) {
848 __ addq(rsp, Immediate(stack_slots * kPointerSize));
850 const RegList saves = descriptor->CalleeSavedRegisters();
851 // Restore registers.
853 for (int i = 0; i < Register::kNumRegisters; i++) {
854 if (!((1 << i) & saves)) continue;
855 __ popq(Register::from_code(i));
858 __ popq(rbp); // Pop caller's frame pointer.
861 // No saved registers.
862 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
863 __ popq(rbp); // Pop caller's frame pointer.
867 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
868 __ popq(rbp); // Pop caller's frame pointer.
869 int pop_count = descriptor->IsJSFunctionCall()
870 ? static_cast<int>(descriptor->JSParameterCount())
872 __ ret(pop_count * kPointerSize);
877 void CodeGenerator::AssembleMove(InstructionOperand* source,
878 InstructionOperand* destination) {
879 X64OperandConverter g(this, NULL);
880 // Dispatch on the source and destination operand kinds. Not all
881 // combinations are possible.
882 if (source->IsRegister()) {
883 DCHECK(destination->IsRegister() || destination->IsStackSlot());
884 Register src = g.ToRegister(source);
885 if (destination->IsRegister()) {
886 __ movq(g.ToRegister(destination), src);
888 __ movq(g.ToOperand(destination), src);
890 } else if (source->IsStackSlot()) {
891 DCHECK(destination->IsRegister() || destination->IsStackSlot());
892 Operand src = g.ToOperand(source);
893 if (destination->IsRegister()) {
894 Register dst = g.ToRegister(destination);
897 // Spill on demand to use a temporary register for memory-to-memory
899 Register tmp = kScratchRegister;
900 Operand dst = g.ToOperand(destination);
904 } else if (source->IsConstant()) {
905 ConstantOperand* constant_source = ConstantOperand::cast(source);
906 if (destination->IsRegister() || destination->IsStackSlot()) {
907 Register dst = destination->IsRegister() ? g.ToRegister(destination)
909 Immediate64 imm = g.ToImmediate64(constant_source);
912 __ Set(dst, imm.value);
914 case kImm64Reference:
915 __ Move(dst, imm.reference);
918 __ Move(dst, imm.handle);
921 if (destination->IsStackSlot()) {
922 __ movq(g.ToOperand(destination), kScratchRegister);
925 __ movq(kScratchRegister,
926 bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
927 if (destination->IsDoubleRegister()) {
928 __ movq(g.ToDoubleRegister(destination), kScratchRegister);
930 DCHECK(destination->IsDoubleStackSlot());
931 __ movq(g.ToOperand(destination), kScratchRegister);
934 } else if (source->IsDoubleRegister()) {
935 XMMRegister src = g.ToDoubleRegister(source);
936 if (destination->IsDoubleRegister()) {
937 XMMRegister dst = g.ToDoubleRegister(destination);
940 DCHECK(destination->IsDoubleStackSlot());
941 Operand dst = g.ToOperand(destination);
944 } else if (source->IsDoubleStackSlot()) {
945 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
946 Operand src = g.ToOperand(source);
947 if (destination->IsDoubleRegister()) {
948 XMMRegister dst = g.ToDoubleRegister(destination);
951 // We rely on having xmm0 available as a fixed scratch register.
952 Operand dst = g.ToOperand(destination);
962 void CodeGenerator::AssembleSwap(InstructionOperand* source,
963 InstructionOperand* destination) {
964 X64OperandConverter g(this, NULL);
965 // Dispatch on the source and destination operand kinds. Not all
966 // combinations are possible.
967 if (source->IsRegister() && destination->IsRegister()) {
968 // Register-register.
969 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
970 } else if (source->IsRegister() && destination->IsStackSlot()) {
971 Register src = g.ToRegister(source);
972 Operand dst = g.ToOperand(destination);
974 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
975 (source->IsDoubleStackSlot() &&
976 destination->IsDoubleStackSlot())) {
978 Register tmp = kScratchRegister;
979 Operand src = g.ToOperand(source);
980 Operand dst = g.ToOperand(destination);
984 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
985 // XMM register-register swap. We rely on having xmm0
986 // available as a fixed scratch register.
987 XMMRegister src = g.ToDoubleRegister(source);
988 XMMRegister dst = g.ToDoubleRegister(destination);
992 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
993 // XMM register-memory swap. We rely on having xmm0
994 // available as a fixed scratch register.
995 XMMRegister src = g.ToDoubleRegister(source);
996 Operand dst = g.ToOperand(destination);
1001 // No other combinations are possible.
1007 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1010 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1011 int space_needed = Deoptimizer::patch_size();
1012 if (!linkage()->info()->IsStub()) {
1013 // Ensure that we have enough space after the previous lazy-bailout
1014 // instruction for patching the code here.
1015 int current_pc = masm()->pc_offset();
1016 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1017 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1018 __ Nop(padding_size);
1021 MarkLazyDeoptSite();
1026 } // namespace internal
1027 } // namespace compiler