1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // TODO(turbofan): Cleanup these hacks.
23 enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
28 Handle<Object> handle;
29 ExternalReference reference;
34 enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
37 struct RegisterOrOperand {
38 RegisterOrOperand() : operand(no_reg, 0) {}
40 DoubleRegister double_reg;
42 RegisterOrOperandType type;
46 // Adds X64 specific methods for decoding operands.
47 class X64OperandConverter : public InstructionOperandConverter {
49 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
50 : InstructionOperandConverter(gen, instr) {}
52 RegisterOrOperand InputRegisterOrOperand(int index) {
53 return ToRegisterOrOperand(instr_->InputAt(index));
56 Immediate InputImmediate(int index) {
57 return ToImmediate(instr_->InputAt(index));
60 RegisterOrOperand OutputRegisterOrOperand() {
61 return ToRegisterOrOperand(instr_->Output());
64 Immediate64 InputImmediate64(int index) {
65 return ToImmediate64(instr_->InputAt(index));
68 Immediate64 ToImmediate64(InstructionOperand* operand) {
69 Constant constant = ToConstant(operand);
70 Immediate64 immediate;
71 immediate.value = 0xbeefdeaddeefbeed;
72 immediate.type = kImm64Value;
73 switch (constant.type()) {
74 case Constant::kInt32:
75 case Constant::kInt64:
76 immediate.value = constant.ToInt64();
78 case Constant::kFloat64:
79 immediate.type = kImm64Handle;
81 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
83 case Constant::kExternalReference:
84 immediate.type = kImm64Reference;
85 immediate.reference = constant.ToExternalReference();
87 case Constant::kHeapObject:
88 immediate.type = kImm64Handle;
89 immediate.handle = constant.ToHeapObject();
96 Immediate ToImmediate(InstructionOperand* operand) {
97 Constant constant = ToConstant(operand);
98 switch (constant.type()) {
99 case Constant::kInt32:
100 return Immediate(constant.ToInt32());
101 case Constant::kInt64:
102 case Constant::kFloat64:
103 case Constant::kExternalReference:
104 case Constant::kHeapObject:
108 return Immediate(-1);
111 Operand ToOperand(InstructionOperand* op, int extra = 0) {
112 RegisterOrOperand result = ToRegisterOrOperand(op, extra);
113 DCHECK_EQ(kOperand, result.type);
114 return result.operand;
117 RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
118 RegisterOrOperand result;
119 if (op->IsRegister()) {
121 result.type = kRegister;
122 result.reg = ToRegister(op);
124 } else if (op->IsDoubleRegister()) {
127 result.type = kDoubleRegister;
128 result.double_reg = ToDoubleRegister(op);
132 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
134 result.type = kOperand;
135 // The linkage computes where all spill slots are located.
136 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
138 Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
142 Operand MemoryOperand(int* first_input) {
143 const int offset = *first_input;
144 switch (AddressingModeField::decode(instr_->opcode())) {
147 Register index = InputRegister(offset + 1);
148 return Operand(InputRegister(offset + 0), index, times_1,
149 0); // TODO(dcarney): K != 0
153 return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
156 return Operand(no_reg, 0);
160 Operand MemoryOperand() {
162 return MemoryOperand(&first_input);
167 static bool HasImmediateInput(Instruction* instr, int index) {
168 return instr->InputAt(index)->IsImmediate();
172 #define ASSEMBLE_BINOP(asm_instr) \
174 if (HasImmediateInput(instr, 1)) { \
175 RegisterOrOperand input = i.InputRegisterOrOperand(0); \
176 if (input.type == kRegister) { \
177 __ asm_instr(input.reg, i.InputImmediate(1)); \
179 __ asm_instr(input.operand, i.InputImmediate(1)); \
182 RegisterOrOperand input = i.InputRegisterOrOperand(1); \
183 if (input.type == kRegister) { \
184 __ asm_instr(i.InputRegister(0), input.reg); \
186 __ asm_instr(i.InputRegister(0), input.operand); \
192 #define ASSEMBLE_SHIFT(asm_instr, width) \
194 if (HasImmediateInput(instr, 1)) { \
195 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
197 __ asm_instr##_cl(i.OutputRegister()); \
202 // Assembles an instruction after register allocation, producing machine code.
203 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
204 X64OperandConverter i(this, instr);
206 switch (ArchOpcodeField::decode(instr->opcode())) {
208 __ jmp(code_->GetLabel(i.InputBlock(0)));
211 // don't emit code for nops.
216 case kArchDeoptimize: {
217 int deoptimization_id = MiscField::decode(instr->opcode());
218 BuildTranslation(instr, deoptimization_id);
220 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
221 isolate(), deoptimization_id, Deoptimizer::LAZY);
222 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
226 ASSEMBLE_BINOP(addl);
229 ASSEMBLE_BINOP(addq);
232 ASSEMBLE_BINOP(subl);
235 ASSEMBLE_BINOP(subq);
238 ASSEMBLE_BINOP(andl);
241 ASSEMBLE_BINOP(andq);
244 ASSEMBLE_BINOP(cmpl);
247 ASSEMBLE_BINOP(cmpq);
250 ASSEMBLE_BINOP(testl);
253 ASSEMBLE_BINOP(testq);
256 if (HasImmediateInput(instr, 1)) {
257 RegisterOrOperand input = i.InputRegisterOrOperand(0);
258 if (input.type == kRegister) {
259 __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
261 __ movq(kScratchRegister, input.operand);
262 __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
265 RegisterOrOperand input = i.InputRegisterOrOperand(1);
266 if (input.type == kRegister) {
267 __ imull(i.OutputRegister(), input.reg);
269 __ imull(i.OutputRegister(), input.operand);
274 if (HasImmediateInput(instr, 1)) {
275 RegisterOrOperand input = i.InputRegisterOrOperand(0);
276 if (input.type == kRegister) {
277 __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
279 __ movq(kScratchRegister, input.operand);
280 __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
283 RegisterOrOperand input = i.InputRegisterOrOperand(1);
284 if (input.type == kRegister) {
285 __ imulq(i.OutputRegister(), input.reg);
287 __ imulq(i.OutputRegister(), input.operand);
293 __ idivl(i.InputRegister(1));
297 __ idivq(i.InputRegister(1));
301 __ divl(i.InputRegister(1));
305 __ divq(i.InputRegister(1));
308 RegisterOrOperand output = i.OutputRegisterOrOperand();
309 if (output.type == kRegister) {
312 __ notq(output.operand);
317 RegisterOrOperand output = i.OutputRegisterOrOperand();
318 if (output.type == kRegister) {
321 __ notl(output.operand);
326 RegisterOrOperand output = i.OutputRegisterOrOperand();
327 if (output.type == kRegister) {
330 __ negq(output.operand);
335 RegisterOrOperand output = i.OutputRegisterOrOperand();
336 if (output.type == kRegister) {
339 __ negl(output.operand);
350 ASSEMBLE_BINOP(xorl);
353 ASSEMBLE_BINOP(xorq);
356 ASSEMBLE_SHIFT(shll, 5);
359 ASSEMBLE_SHIFT(shlq, 6);
362 ASSEMBLE_SHIFT(shrl, 5);
365 ASSEMBLE_SHIFT(shrq, 6);
368 ASSEMBLE_SHIFT(sarl, 5);
371 ASSEMBLE_SHIFT(sarq, 6);
374 RegisterOrOperand input = i.InputRegisterOrOperand(0);
375 if (input.type == kRegister) {
378 __ pushq(input.operand);
383 __ pushq(i.InputImmediate(0));
385 case kX64CallCodeObject: {
386 if (HasImmediateInput(instr, 0)) {
387 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
388 __ Call(code, RelocInfo::CODE_TARGET);
390 Register reg = i.InputRegister(0);
391 int entry = Code::kHeaderSize - kHeapObjectTag;
392 __ Call(Operand(reg, entry));
394 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
395 Safepoint::kNoLazyDeopt);
396 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
398 RecordLazyDeoptimizationEntry(instr);
400 AddNopForSmiCodeInlining();
403 case kX64CallAddress:
404 if (HasImmediateInput(instr, 0)) {
405 Immediate64 imm = i.InputImmediate64(0);
406 DCHECK_EQ(kImm64Value, imm.type);
407 __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
409 __ call(i.InputRegister(0));
413 int words = MiscField::decode(instr->opcode());
414 __ addq(rsp, Immediate(kPointerSize * words));
417 case kX64CallJSFunction: {
418 Register func = i.InputRegister(0);
420 // TODO(jarin) The load of the context should be separated from the call.
421 __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset));
422 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
424 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
425 Safepoint::kNoLazyDeopt);
426 RecordLazyDeoptimizationEntry(instr);
429 case kSSEFloat64Cmp: {
430 RegisterOrOperand input = i.InputRegisterOrOperand(1);
431 if (input.type == kDoubleRegister) {
432 __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
434 __ ucomisd(i.InputDoubleRegister(0), input.operand);
439 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
442 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
445 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
448 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
450 case kSSEFloat64Mod: {
451 __ subq(rsp, Immediate(kDoubleSize));
452 // Move values to st(0) and st(1).
453 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
454 __ fld_d(Operand(rsp, 0));
455 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
456 __ fld_d(Operand(rsp, 0));
457 // Loop while fprem isn't done.
460 // This instructions traps on all kinds inputs, but we are assuming the
461 // floating point control word is set to ignore them all.
463 // The following 2 instruction implicitly use rax.
465 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
468 __ shrl(rax, Immediate(8));
469 __ andl(rax, Immediate(0xFF));
473 __ j(parity_even, &mod_loop);
474 // Move output to stack and clean up.
476 __ fstp_d(Operand(rsp, 0));
477 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
478 __ addq(rsp, Immediate(kDoubleSize));
481 case kX64Int32ToInt64:
482 __ movzxwq(i.OutputRegister(), i.InputRegister(0));
484 case kX64Int64ToInt32:
485 __ Move(i.OutputRegister(), i.InputRegister(0));
487 case kSSEFloat64ToInt32: {
488 RegisterOrOperand input = i.InputRegisterOrOperand(0);
489 if (input.type == kDoubleRegister) {
490 __ cvttsd2si(i.OutputRegister(), input.double_reg);
492 __ cvttsd2si(i.OutputRegister(), input.operand);
496 case kSSEFloat64ToUint32: {
497 // TODO(turbofan): X64 SSE cvttsd2siq should support operands.
498 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
499 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
500 // TODO(turbofan): generated code should not look at the upper 32 bits
501 // of the result, but those bits could escape to the outside world.
504 case kSSEInt32ToFloat64: {
505 RegisterOrOperand input = i.InputRegisterOrOperand(0);
506 if (input.type == kRegister) {
507 __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
509 __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
513 case kSSEUint32ToFloat64: {
514 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
515 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
520 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
524 Operand operand = i.MemoryOperand(&index);
525 __ movsd(operand, i.InputDoubleRegister(index));
529 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
531 case kX64StoreWord8: {
533 Operand operand = i.MemoryOperand(&index);
534 __ movb(operand, i.InputRegister(index));
537 case kX64StoreWord8I: {
539 Operand operand = i.MemoryOperand(&index);
540 __ movb(operand, Immediate(i.InputInt8(index)));
544 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
546 case kX64StoreWord16: {
548 Operand operand = i.MemoryOperand(&index);
549 __ movw(operand, i.InputRegister(index));
552 case kX64StoreWord16I: {
554 Operand operand = i.MemoryOperand(&index);
555 __ movw(operand, Immediate(i.InputInt16(index)));
559 __ movl(i.OutputRegister(), i.MemoryOperand());
561 case kX64StoreWord32: {
563 Operand operand = i.MemoryOperand(&index);
564 __ movl(operand, i.InputRegister(index));
567 case kX64StoreWord32I: {
569 Operand operand = i.MemoryOperand(&index);
570 __ movl(operand, i.InputImmediate(index));
574 __ movq(i.OutputRegister(), i.MemoryOperand());
576 case kX64StoreWord64: {
578 Operand operand = i.MemoryOperand(&index);
579 __ movq(operand, i.InputRegister(index));
582 case kX64StoreWord64I: {
584 Operand operand = i.MemoryOperand(&index);
585 __ movq(operand, i.InputImmediate(index));
588 case kX64StoreWriteBarrier: {
589 Register object = i.InputRegister(0);
590 Register index = i.InputRegister(1);
591 Register value = i.InputRegister(2);
592 __ movsxlq(index, index);
593 __ movq(Operand(object, index, times_1, 0), value);
594 __ leaq(index, Operand(object, index, times_1, 0));
595 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
598 __ RecordWrite(object, index, value, mode);
605 // Assembles branches after this instruction.
606 void CodeGenerator::AssembleArchBranch(Instruction* instr,
607 FlagsCondition condition) {
608 X64OperandConverter i(this, instr);
611 // Emit a branch. The true and false targets are always the last two inputs
612 // to the instruction.
613 BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
614 BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
615 bool fallthru = IsNextInAssemblyOrder(fblock);
616 Label* tlabel = code()->GetLabel(tblock);
617 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
618 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
620 case kUnorderedEqual:
621 __ j(parity_even, flabel, flabel_distance);
626 case kUnorderedNotEqual:
627 __ j(parity_even, tlabel);
630 __ j(not_equal, tlabel);
632 case kSignedLessThan:
635 case kSignedGreaterThanOrEqual:
636 __ j(greater_equal, tlabel);
638 case kSignedLessThanOrEqual:
639 __ j(less_equal, tlabel);
641 case kSignedGreaterThan:
642 __ j(greater, tlabel);
644 case kUnorderedLessThan:
645 __ j(parity_even, flabel, flabel_distance);
647 case kUnsignedLessThan:
650 case kUnorderedGreaterThanOrEqual:
651 __ j(parity_even, tlabel);
653 case kUnsignedGreaterThanOrEqual:
654 __ j(above_equal, tlabel);
656 case kUnorderedLessThanOrEqual:
657 __ j(parity_even, flabel, flabel_distance);
659 case kUnsignedLessThanOrEqual:
660 __ j(below_equal, tlabel);
662 case kUnorderedGreaterThan:
663 __ j(parity_even, tlabel);
665 case kUnsignedGreaterThan:
669 __ j(overflow, tlabel);
672 __ j(no_overflow, tlabel);
675 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
680 // Assembles boolean materializations after this instruction.
681 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
682 FlagsCondition condition) {
683 X64OperandConverter i(this, instr);
686 // Materialize a full 64-bit 1 or 0 value. The result register is always the
687 // last output of the instruction.
689 DCHECK_NE(0, instr->OutputCount());
690 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
691 Condition cc = no_condition;
693 case kUnorderedEqual:
694 __ j(parity_odd, &check, Label::kNear);
695 __ movl(reg, Immediate(0));
696 __ jmp(&done, Label::kNear);
701 case kUnorderedNotEqual:
702 __ j(parity_odd, &check, Label::kNear);
703 __ movl(reg, Immediate(1));
704 __ jmp(&done, Label::kNear);
709 case kSignedLessThan:
712 case kSignedGreaterThanOrEqual:
715 case kSignedLessThanOrEqual:
718 case kSignedGreaterThan:
721 case kUnorderedLessThan:
722 __ j(parity_odd, &check, Label::kNear);
723 __ movl(reg, Immediate(0));
724 __ jmp(&done, Label::kNear);
726 case kUnsignedLessThan:
729 case kUnorderedGreaterThanOrEqual:
730 __ j(parity_odd, &check, Label::kNear);
731 __ movl(reg, Immediate(1));
732 __ jmp(&done, Label::kNear);
734 case kUnsignedGreaterThanOrEqual:
737 case kUnorderedLessThanOrEqual:
738 __ j(parity_odd, &check, Label::kNear);
739 __ movl(reg, Immediate(0));
740 __ jmp(&done, Label::kNear);
742 case kUnsignedLessThanOrEqual:
745 case kUnorderedGreaterThan:
746 __ j(parity_odd, &check, Label::kNear);
747 __ movl(reg, Immediate(1));
748 __ jmp(&done, Label::kNear);
750 case kUnsignedGreaterThan:
762 __ movzxbl(reg, reg);
767 void CodeGenerator::AssemblePrologue() {
768 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
769 int stack_slots = frame()->GetSpillSlotCount();
770 if (descriptor->kind() == CallDescriptor::kCallAddress) {
773 const RegList saves = descriptor->CalleeSavedRegisters();
774 if (saves != 0) { // Save callee-saved registers.
775 int register_save_area_size = 0;
776 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
777 if (!((1 << i) & saves)) continue;
778 __ pushq(Register::from_code(i));
779 register_save_area_size += kPointerSize;
781 frame()->SetRegisterSaveAreaSize(register_save_area_size);
783 } else if (descriptor->IsJSFunctionCall()) {
784 CompilationInfo* info = linkage()->info();
785 __ Prologue(info->IsCodePreAgingActive());
786 frame()->SetRegisterSaveAreaSize(
787 StandardFrameConstants::kFixedFrameSizeFromFp);
789 // Sloppy mode functions and builtins need to replace the receiver with the
790 // global proxy when called as functions (without an explicit receiver
792 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
793 if (info->strict_mode() == SLOPPY && !info->is_native()) {
795 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
796 __ movp(rcx, args.GetReceiverOperand());
797 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
798 __ j(not_equal, &ok, Label::kNear);
799 __ movp(rcx, GlobalObjectOperand());
800 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
801 __ movp(args.GetReceiverOperand(), rcx);
807 frame()->SetRegisterSaveAreaSize(
808 StandardFrameConstants::kFixedFrameSizeFromFp);
810 if (stack_slots > 0) {
811 __ subq(rsp, Immediate(stack_slots * kPointerSize));
816 void CodeGenerator::AssembleReturn() {
817 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
818 if (descriptor->kind() == CallDescriptor::kCallAddress) {
819 if (frame()->GetRegisterSaveAreaSize() > 0) {
820 // Remove this frame's spill slots first.
821 int stack_slots = frame()->GetSpillSlotCount();
822 if (stack_slots > 0) {
823 __ addq(rsp, Immediate(stack_slots * kPointerSize));
825 const RegList saves = descriptor->CalleeSavedRegisters();
826 // Restore registers.
828 for (int i = 0; i < Register::kNumRegisters; i++) {
829 if (!((1 << i) & saves)) continue;
830 __ popq(Register::from_code(i));
833 __ popq(rbp); // Pop caller's frame pointer.
836 // No saved registers.
837 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
838 __ popq(rbp); // Pop caller's frame pointer.
842 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
843 __ popq(rbp); // Pop caller's frame pointer.
845 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
846 __ ret(pop_count * kPointerSize);
851 void CodeGenerator::AssembleMove(InstructionOperand* source,
852 InstructionOperand* destination) {
853 X64OperandConverter g(this, NULL);
854 // Dispatch on the source and destination operand kinds. Not all
855 // combinations are possible.
856 if (source->IsRegister()) {
857 DCHECK(destination->IsRegister() || destination->IsStackSlot());
858 Register src = g.ToRegister(source);
859 if (destination->IsRegister()) {
860 __ movq(g.ToRegister(destination), src);
862 __ movq(g.ToOperand(destination), src);
864 } else if (source->IsStackSlot()) {
865 DCHECK(destination->IsRegister() || destination->IsStackSlot());
866 Operand src = g.ToOperand(source);
867 if (destination->IsRegister()) {
868 Register dst = g.ToRegister(destination);
871 // Spill on demand to use a temporary register for memory-to-memory
873 Register tmp = kScratchRegister;
874 Operand dst = g.ToOperand(destination);
878 } else if (source->IsConstant()) {
879 ConstantOperand* constant_source = ConstantOperand::cast(source);
880 if (destination->IsRegister() || destination->IsStackSlot()) {
881 Register dst = destination->IsRegister() ? g.ToRegister(destination)
883 Immediate64 imm = g.ToImmediate64(constant_source);
886 __ Set(dst, imm.value);
888 case kImm64Reference:
889 __ Move(dst, imm.reference);
892 __ Move(dst, imm.handle);
895 if (destination->IsStackSlot()) {
896 __ movq(g.ToOperand(destination), kScratchRegister);
899 __ movq(kScratchRegister,
900 BitCast<uint64_t, double>(g.ToDouble(constant_source)));
901 if (destination->IsDoubleRegister()) {
902 __ movq(g.ToDoubleRegister(destination), kScratchRegister);
904 DCHECK(destination->IsDoubleStackSlot());
905 __ movq(g.ToOperand(destination), kScratchRegister);
908 } else if (source->IsDoubleRegister()) {
909 XMMRegister src = g.ToDoubleRegister(source);
910 if (destination->IsDoubleRegister()) {
911 XMMRegister dst = g.ToDoubleRegister(destination);
914 DCHECK(destination->IsDoubleStackSlot());
915 Operand dst = g.ToOperand(destination);
918 } else if (source->IsDoubleStackSlot()) {
919 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
920 Operand src = g.ToOperand(source);
921 if (destination->IsDoubleRegister()) {
922 XMMRegister dst = g.ToDoubleRegister(destination);
925 // We rely on having xmm0 available as a fixed scratch register.
926 Operand dst = g.ToOperand(destination);
936 void CodeGenerator::AssembleSwap(InstructionOperand* source,
937 InstructionOperand* destination) {
938 X64OperandConverter g(this, NULL);
939 // Dispatch on the source and destination operand kinds. Not all
940 // combinations are possible.
941 if (source->IsRegister() && destination->IsRegister()) {
942 // Register-register.
943 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
944 } else if (source->IsRegister() && destination->IsStackSlot()) {
945 Register src = g.ToRegister(source);
946 Operand dst = g.ToOperand(destination);
948 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
949 (source->IsDoubleStackSlot() &&
950 destination->IsDoubleStackSlot())) {
952 Register tmp = kScratchRegister;
953 Operand src = g.ToOperand(source);
954 Operand dst = g.ToOperand(destination);
958 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
959 // XMM register-register swap. We rely on having xmm0
960 // available as a fixed scratch register.
961 XMMRegister src = g.ToDoubleRegister(source);
962 XMMRegister dst = g.ToDoubleRegister(destination);
966 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
967 // XMM register-memory swap. We rely on having xmm0
968 // available as a fixed scratch register.
969 XMMRegister src = g.ToDoubleRegister(source);
970 Operand dst = g.ToOperand(destination);
975 // No other combinations are possible.
981 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
987 // Checks whether the code between start_pc and end_pc is a no-op.
988 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
990 if (start_pc + 1 != end_pc) {
993 return *(code->instruction_start() + start_pc) ==
994 v8::internal::Assembler::kNopByte;
999 } // namespace internal
1000 } // namespace compiler