1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 return Immediate(ToConstant(operand).ToInt32());
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
47 static int NextOffset(int* offset) {
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
67 Register base = InputRegister(NextOffset(offset));
69 return Operand(base, disp);
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84 return Operand(base, index, scale, disp);
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
100 Register index = InputRegister(NextOffset(offset));
101 ScaleFactor scale = ScaleFor(kMode_M1, mode);
103 return Operand(index, scale, disp);
109 Register index = InputRegister(NextOffset(offset));
110 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
111 int32_t disp = InputInt32(NextOffset(offset));
112 return Operand(index, scale, disp);
116 return Operand(no_reg, 0);
119 return Operand(no_reg, 0);
122 Operand MemoryOperand() {
124 return MemoryOperand(&first_input);
129 static bool HasImmediateInput(Instruction* instr, int index) {
130 return instr->InputAt(index)->IsImmediate();
134 #define ASSEMBLE_UNOP(asm_instr) \
136 if (instr->Output()->IsRegister()) { \
137 __ asm_instr(i.OutputRegister()); \
139 __ asm_instr(i.OutputOperand()); \
144 #define ASSEMBLE_BINOP(asm_instr) \
146 if (HasImmediateInput(instr, 1)) { \
147 if (instr->InputAt(0)->IsRegister()) { \
148 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
150 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
153 if (instr->InputAt(1)->IsRegister()) { \
154 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
156 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
162 #define ASSEMBLE_MULT(asm_instr) \
164 if (HasImmediateInput(instr, 1)) { \
165 if (instr->InputAt(0)->IsRegister()) { \
166 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
167 i.InputImmediate(1)); \
169 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
170 i.InputImmediate(1)); \
173 if (instr->InputAt(1)->IsRegister()) { \
174 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
176 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
182 #define ASSEMBLE_SHIFT(asm_instr, width) \
184 if (HasImmediateInput(instr, 1)) { \
185 if (instr->Output()->IsRegister()) { \
186 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
188 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
191 if (instr->Output()->IsRegister()) { \
192 __ asm_instr##_cl(i.OutputRegister()); \
194 __ asm_instr##_cl(i.OutputOperand()); \
200 #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
202 if (instr->InputAt(1)->IsDoubleRegister()) { \
203 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
205 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
210 // Assembles an instruction after register allocation, producing machine code.
211 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
212 X64OperandConverter i(this, instr);
214 switch (ArchOpcodeField::decode(instr->opcode())) {
215 case kArchCallCodeObject: {
216 EnsureSpaceForLazyDeopt();
217 if (HasImmediateInput(instr, 0)) {
218 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
219 __ Call(code, RelocInfo::CODE_TARGET);
221 Register reg = i.InputRegister(0);
222 int entry = Code::kHeaderSize - kHeapObjectTag;
223 __ Call(Operand(reg, entry));
225 AddSafepointAndDeopt(instr);
228 case kArchCallJSFunction: {
229 EnsureSpaceForLazyDeopt();
230 Register func = i.InputRegister(0);
231 if (FLAG_debug_code) {
232 // Check the function's context matches the context argument.
233 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
234 __ Assert(equal, kWrongFunctionContext);
236 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
237 AddSafepointAndDeopt(instr);
241 __ jmp(GetLabel(i.InputRpo(0)));
244 // don't emit code for nops.
249 case kArchStackPointer:
250 __ movq(i.OutputRegister(), rsp);
252 case kArchTruncateDoubleToI:
253 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
256 ASSEMBLE_BINOP(addl);
259 ASSEMBLE_BINOP(addq);
262 ASSEMBLE_BINOP(subl);
265 ASSEMBLE_BINOP(subq);
268 ASSEMBLE_BINOP(andl);
271 ASSEMBLE_BINOP(andq);
274 ASSEMBLE_BINOP(cmpl);
277 ASSEMBLE_BINOP(cmpq);
280 ASSEMBLE_BINOP(testl);
283 ASSEMBLE_BINOP(testq);
286 ASSEMBLE_MULT(imull);
289 ASSEMBLE_MULT(imulq);
292 if (instr->InputAt(1)->IsRegister()) {
293 __ imull(i.InputRegister(1));
295 __ imull(i.InputOperand(1));
299 if (instr->InputAt(1)->IsRegister()) {
300 __ mull(i.InputRegister(1));
302 __ mull(i.InputOperand(1));
307 __ idivl(i.InputRegister(1));
311 __ idivq(i.InputRegister(1));
315 __ divl(i.InputRegister(1));
319 __ divq(i.InputRegister(1));
340 ASSEMBLE_BINOP(xorl);
343 ASSEMBLE_BINOP(xorq);
346 ASSEMBLE_SHIFT(shll, 5);
349 ASSEMBLE_SHIFT(shlq, 6);
352 ASSEMBLE_SHIFT(shrl, 5);
355 ASSEMBLE_SHIFT(shrq, 6);
358 ASSEMBLE_SHIFT(sarl, 5);
361 ASSEMBLE_SHIFT(sarq, 6);
364 ASSEMBLE_SHIFT(rorl, 5);
367 ASSEMBLE_SHIFT(rorq, 6);
370 ASSEMBLE_DOUBLE_BINOP(ucomisd);
373 ASSEMBLE_DOUBLE_BINOP(addsd);
376 ASSEMBLE_DOUBLE_BINOP(subsd);
379 ASSEMBLE_DOUBLE_BINOP(mulsd);
382 ASSEMBLE_DOUBLE_BINOP(divsd);
384 case kSSEFloat64Mod: {
385 __ subq(rsp, Immediate(kDoubleSize));
386 // Move values to st(0) and st(1).
387 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
388 __ fld_d(Operand(rsp, 0));
389 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
390 __ fld_d(Operand(rsp, 0));
391 // Loop while fprem isn't done.
394 // This instructions traps on all kinds inputs, but we are assuming the
395 // floating point control word is set to ignore them all.
397 // The following 2 instruction implicitly use rax.
399 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
402 __ shrl(rax, Immediate(8));
403 __ andl(rax, Immediate(0xFF));
407 __ j(parity_even, &mod_loop);
408 // Move output to stack and clean up.
410 __ fstp_d(Operand(rsp, 0));
411 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
412 __ addq(rsp, Immediate(kDoubleSize));
415 case kSSEFloat64Sqrt:
416 if (instr->InputAt(0)->IsDoubleRegister()) {
417 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
419 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
422 case kSSEFloat64Floor: {
423 CpuFeatureScope sse_scope(masm(), SSE4_1);
424 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
425 v8::internal::Assembler::kRoundDown);
428 case kSSEFloat64Ceil: {
429 CpuFeatureScope sse_scope(masm(), SSE4_1);
430 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
431 v8::internal::Assembler::kRoundUp);
434 case kSSEFloat64RoundTruncate: {
435 CpuFeatureScope sse_scope(masm(), SSE4_1);
436 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
437 v8::internal::Assembler::kRoundToZero);
441 if (instr->InputAt(0)->IsDoubleRegister()) {
442 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
444 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
448 if (instr->InputAt(0)->IsDoubleRegister()) {
449 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
451 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
454 case kSSEFloat64ToInt32:
455 if (instr->InputAt(0)->IsDoubleRegister()) {
456 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
458 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
461 case kSSEFloat64ToUint32: {
462 if (instr->InputAt(0)->IsDoubleRegister()) {
463 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
465 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
467 __ AssertZeroExtended(i.OutputRegister());
470 case kSSEInt32ToFloat64:
471 if (instr->InputAt(0)->IsRegister()) {
472 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
474 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
477 case kSSEUint32ToFloat64:
478 if (instr->InputAt(0)->IsRegister()) {
479 __ movl(kScratchRegister, i.InputRegister(0));
481 __ movl(kScratchRegister, i.InputOperand(0));
483 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
486 if (instr->addressing_mode() != kMode_None) {
487 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
488 } else if (instr->InputAt(0)->IsRegister()) {
489 __ movsxbl(i.OutputRegister(), i.InputRegister(0));
491 __ movsxbl(i.OutputRegister(), i.InputOperand(0));
493 __ AssertZeroExtended(i.OutputRegister());
496 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
500 Operand operand = i.MemoryOperand(&index);
501 if (HasImmediateInput(instr, index)) {
502 __ movb(operand, Immediate(i.InputInt8(index)));
504 __ movb(operand, i.InputRegister(index));
509 if (instr->addressing_mode() != kMode_None) {
510 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
511 } else if (instr->InputAt(0)->IsRegister()) {
512 __ movsxwl(i.OutputRegister(), i.InputRegister(0));
514 __ movsxwl(i.OutputRegister(), i.InputOperand(0));
516 __ AssertZeroExtended(i.OutputRegister());
519 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
520 __ AssertZeroExtended(i.OutputRegister());
524 Operand operand = i.MemoryOperand(&index);
525 if (HasImmediateInput(instr, index)) {
526 __ movw(operand, Immediate(i.InputInt16(index)));
528 __ movw(operand, i.InputRegister(index));
533 if (instr->HasOutput()) {
534 if (instr->addressing_mode() == kMode_None) {
535 if (instr->InputAt(0)->IsRegister()) {
536 __ movl(i.OutputRegister(), i.InputRegister(0));
538 __ movl(i.OutputRegister(), i.InputOperand(0));
541 __ movl(i.OutputRegister(), i.MemoryOperand());
543 __ AssertZeroExtended(i.OutputRegister());
546 Operand operand = i.MemoryOperand(&index);
547 if (HasImmediateInput(instr, index)) {
548 __ movl(operand, i.InputImmediate(index));
550 __ movl(operand, i.InputRegister(index));
555 if (instr->InputAt(0)->IsRegister()) {
556 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
558 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
563 if (instr->HasOutput()) {
564 __ movq(i.OutputRegister(), i.MemoryOperand());
567 Operand operand = i.MemoryOperand(&index);
568 if (HasImmediateInput(instr, index)) {
569 __ movq(operand, i.InputImmediate(index));
571 __ movq(operand, i.InputRegister(index));
576 if (instr->HasOutput()) {
577 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
580 Operand operand = i.MemoryOperand(&index);
581 __ movss(operand, i.InputDoubleRegister(index));
585 if (instr->HasOutput()) {
586 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
589 Operand operand = i.MemoryOperand(&index);
590 __ movsd(operand, i.InputDoubleRegister(index));
594 __ leal(i.OutputRegister(), i.MemoryOperand());
595 __ AssertZeroExtended(i.OutputRegister());
598 __ leaq(i.OutputRegister(), i.MemoryOperand());
601 __ decl(i.OutputRegister());
604 __ incl(i.OutputRegister());
607 if (HasImmediateInput(instr, 0)) {
608 __ pushq(i.InputImmediate(0));
610 if (instr->InputAt(0)->IsRegister()) {
611 __ pushq(i.InputRegister(0));
613 __ pushq(i.InputOperand(0));
617 case kX64StoreWriteBarrier: {
618 Register object = i.InputRegister(0);
619 Register index = i.InputRegister(1);
620 Register value = i.InputRegister(2);
621 __ movsxlq(index, index);
622 __ movq(Operand(object, index, times_1, 0), value);
623 __ leaq(index, Operand(object, index, times_1, 0));
624 SaveFPRegsMode mode =
625 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
626 __ RecordWrite(object, index, value, mode);
633 // Assembles branches after this instruction.
634 void CodeGenerator::AssembleArchBranch(Instruction* instr,
635 FlagsCondition condition) {
636 X64OperandConverter i(this, instr);
639 // Emit a branch. The true and false targets are always the last two inputs
640 // to the instruction.
641 BasicBlock::RpoNumber tblock =
642 i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
643 BasicBlock::RpoNumber fblock =
644 i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
645 bool fallthru = IsNextInAssemblyOrder(fblock);
646 Label* tlabel = GetLabel(tblock);
647 Label* flabel = fallthru ? &done : GetLabel(fblock);
648 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
650 case kUnorderedEqual:
651 __ j(parity_even, flabel, flabel_distance);
656 case kUnorderedNotEqual:
657 __ j(parity_even, tlabel);
660 __ j(not_equal, tlabel);
662 case kSignedLessThan:
665 case kSignedGreaterThanOrEqual:
666 __ j(greater_equal, tlabel);
668 case kSignedLessThanOrEqual:
669 __ j(less_equal, tlabel);
671 case kSignedGreaterThan:
672 __ j(greater, tlabel);
674 case kUnorderedLessThan:
675 __ j(parity_even, flabel, flabel_distance);
677 case kUnsignedLessThan:
680 case kUnorderedGreaterThanOrEqual:
681 __ j(parity_even, tlabel);
683 case kUnsignedGreaterThanOrEqual:
684 __ j(above_equal, tlabel);
686 case kUnorderedLessThanOrEqual:
687 __ j(parity_even, flabel, flabel_distance);
689 case kUnsignedLessThanOrEqual:
690 __ j(below_equal, tlabel);
692 case kUnorderedGreaterThan:
693 __ j(parity_even, tlabel);
695 case kUnsignedGreaterThan:
699 __ j(overflow, tlabel);
702 __ j(no_overflow, tlabel);
705 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
710 // Assembles boolean materializations after this instruction.
711 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
712 FlagsCondition condition) {
713 X64OperandConverter i(this, instr);
716 // Materialize a full 64-bit 1 or 0 value. The result register is always the
717 // last output of the instruction.
719 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
720 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
721 Condition cc = no_condition;
723 case kUnorderedEqual:
724 __ j(parity_odd, &check, Label::kNear);
725 __ movl(reg, Immediate(0));
726 __ jmp(&done, Label::kNear);
731 case kUnorderedNotEqual:
732 __ j(parity_odd, &check, Label::kNear);
733 __ movl(reg, Immediate(1));
734 __ jmp(&done, Label::kNear);
739 case kSignedLessThan:
742 case kSignedGreaterThanOrEqual:
745 case kSignedLessThanOrEqual:
748 case kSignedGreaterThan:
751 case kUnorderedLessThan:
752 __ j(parity_odd, &check, Label::kNear);
753 __ movl(reg, Immediate(0));
754 __ jmp(&done, Label::kNear);
756 case kUnsignedLessThan:
759 case kUnorderedGreaterThanOrEqual:
760 __ j(parity_odd, &check, Label::kNear);
761 __ movl(reg, Immediate(1));
762 __ jmp(&done, Label::kNear);
764 case kUnsignedGreaterThanOrEqual:
767 case kUnorderedLessThanOrEqual:
768 __ j(parity_odd, &check, Label::kNear);
769 __ movl(reg, Immediate(0));
770 __ jmp(&done, Label::kNear);
772 case kUnsignedLessThanOrEqual:
775 case kUnorderedGreaterThan:
776 __ j(parity_odd, &check, Label::kNear);
777 __ movl(reg, Immediate(1));
778 __ jmp(&done, Label::kNear);
780 case kUnsignedGreaterThan:
792 __ movzxbl(reg, reg);
797 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
798 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
799 isolate(), deoptimization_id, Deoptimizer::LAZY);
800 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
804 void CodeGenerator::AssemblePrologue() {
805 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
806 int stack_slots = frame()->GetSpillSlotCount();
807 if (descriptor->kind() == CallDescriptor::kCallAddress) {
810 const RegList saves = descriptor->CalleeSavedRegisters();
811 if (saves != 0) { // Save callee-saved registers.
812 int register_save_area_size = 0;
813 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
814 if (!((1 << i) & saves)) continue;
815 __ pushq(Register::from_code(i));
816 register_save_area_size += kPointerSize;
818 frame()->SetRegisterSaveAreaSize(register_save_area_size);
820 } else if (descriptor->IsJSFunctionCall()) {
821 CompilationInfo* info = this->info();
822 __ Prologue(info->IsCodePreAgingActive());
823 frame()->SetRegisterSaveAreaSize(
824 StandardFrameConstants::kFixedFrameSizeFromFp);
827 frame()->SetRegisterSaveAreaSize(
828 StandardFrameConstants::kFixedFrameSizeFromFp);
830 if (stack_slots > 0) {
831 __ subq(rsp, Immediate(stack_slots * kPointerSize));
836 void CodeGenerator::AssembleReturn() {
837 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
838 if (descriptor->kind() == CallDescriptor::kCallAddress) {
839 if (frame()->GetRegisterSaveAreaSize() > 0) {
840 // Remove this frame's spill slots first.
841 int stack_slots = frame()->GetSpillSlotCount();
842 if (stack_slots > 0) {
843 __ addq(rsp, Immediate(stack_slots * kPointerSize));
845 const RegList saves = descriptor->CalleeSavedRegisters();
846 // Restore registers.
848 for (int i = 0; i < Register::kNumRegisters; i++) {
849 if (!((1 << i) & saves)) continue;
850 __ popq(Register::from_code(i));
853 __ popq(rbp); // Pop caller's frame pointer.
856 // No saved registers.
857 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
858 __ popq(rbp); // Pop caller's frame pointer.
862 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
863 __ popq(rbp); // Pop caller's frame pointer.
864 int pop_count = descriptor->IsJSFunctionCall()
865 ? static_cast<int>(descriptor->JSParameterCount())
867 __ ret(pop_count * kPointerSize);
872 void CodeGenerator::AssembleMove(InstructionOperand* source,
873 InstructionOperand* destination) {
874 X64OperandConverter g(this, NULL);
875 // Dispatch on the source and destination operand kinds. Not all
876 // combinations are possible.
877 if (source->IsRegister()) {
878 DCHECK(destination->IsRegister() || destination->IsStackSlot());
879 Register src = g.ToRegister(source);
880 if (destination->IsRegister()) {
881 __ movq(g.ToRegister(destination), src);
883 __ movq(g.ToOperand(destination), src);
885 } else if (source->IsStackSlot()) {
886 DCHECK(destination->IsRegister() || destination->IsStackSlot());
887 Operand src = g.ToOperand(source);
888 if (destination->IsRegister()) {
889 Register dst = g.ToRegister(destination);
892 // Spill on demand to use a temporary register for memory-to-memory
894 Register tmp = kScratchRegister;
895 Operand dst = g.ToOperand(destination);
899 } else if (source->IsConstant()) {
900 ConstantOperand* constant_source = ConstantOperand::cast(source);
901 Constant src = g.ToConstant(constant_source);
902 if (destination->IsRegister() || destination->IsStackSlot()) {
903 Register dst = destination->IsRegister() ? g.ToRegister(destination)
905 switch (src.type()) {
906 case Constant::kInt32:
907 // TODO(dcarney): don't need scratch in this case.
908 __ Set(dst, src.ToInt32());
910 case Constant::kInt64:
911 __ Set(dst, src.ToInt64());
913 case Constant::kFloat32:
915 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
917 case Constant::kFloat64:
919 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
921 case Constant::kExternalReference:
922 __ Move(dst, src.ToExternalReference());
924 case Constant::kHeapObject:
925 __ Move(dst, src.ToHeapObject());
928 if (destination->IsStackSlot()) {
929 __ movq(g.ToOperand(destination), kScratchRegister);
931 } else if (src.type() == Constant::kFloat32) {
932 // TODO(turbofan): Can we do better here?
933 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
934 if (destination->IsDoubleRegister()) {
935 __ Move(g.ToDoubleRegister(destination), src_const);
937 DCHECK(destination->IsDoubleStackSlot());
938 Operand dst = g.ToOperand(destination);
939 __ movl(dst, Immediate(src_const));
942 DCHECK_EQ(Constant::kFloat64, src.type());
943 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
944 if (destination->IsDoubleRegister()) {
945 __ Move(g.ToDoubleRegister(destination), src_const);
947 DCHECK(destination->IsDoubleStackSlot());
948 __ movq(kScratchRegister, src_const);
949 __ movq(g.ToOperand(destination), kScratchRegister);
952 } else if (source->IsDoubleRegister()) {
953 XMMRegister src = g.ToDoubleRegister(source);
954 if (destination->IsDoubleRegister()) {
955 XMMRegister dst = g.ToDoubleRegister(destination);
958 DCHECK(destination->IsDoubleStackSlot());
959 Operand dst = g.ToOperand(destination);
962 } else if (source->IsDoubleStackSlot()) {
963 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
964 Operand src = g.ToOperand(source);
965 if (destination->IsDoubleRegister()) {
966 XMMRegister dst = g.ToDoubleRegister(destination);
969 // We rely on having xmm0 available as a fixed scratch register.
970 Operand dst = g.ToOperand(destination);
980 void CodeGenerator::AssembleSwap(InstructionOperand* source,
981 InstructionOperand* destination) {
982 X64OperandConverter g(this, NULL);
983 // Dispatch on the source and destination operand kinds. Not all
984 // combinations are possible.
985 if (source->IsRegister() && destination->IsRegister()) {
986 // Register-register.
987 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
988 } else if (source->IsRegister() && destination->IsStackSlot()) {
989 Register src = g.ToRegister(source);
990 Operand dst = g.ToOperand(destination);
992 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
993 (source->IsDoubleStackSlot() &&
994 destination->IsDoubleStackSlot())) {
996 Register tmp = kScratchRegister;
997 Operand src = g.ToOperand(source);
998 Operand dst = g.ToOperand(destination);
1002 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1003 // XMM register-register swap. We rely on having xmm0
1004 // available as a fixed scratch register.
1005 XMMRegister src = g.ToDoubleRegister(source);
1006 XMMRegister dst = g.ToDoubleRegister(destination);
1007 __ movsd(xmm0, src);
1009 __ movsd(dst, xmm0);
1010 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1011 // XMM register-memory swap. We rely on having xmm0
1012 // available as a fixed scratch register.
1013 XMMRegister src = g.ToDoubleRegister(source);
1014 Operand dst = g.ToOperand(destination);
1015 __ movsd(xmm0, src);
1017 __ movsd(dst, xmm0);
1019 // No other combinations are possible.
1025 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1028 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1029 int space_needed = Deoptimizer::patch_size();
1030 if (!info()->IsStub()) {
1031 // Ensure that we have enough space after the previous lazy-bailout
1032 // instruction for patching the code here.
1033 int current_pc = masm()->pc_offset();
1034 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1035 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1036 __ Nop(padding_size);
1039 MarkLazyDeoptSite();
1044 } // namespace internal
1045 } // namespace compiler