1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 Constant constant = ToConstant(operand);
38 if (constant.type() == Constant::kInt32) {
39 return Immediate(constant.ToInt32());
45 Operand ToOperand(InstructionOperand* op, int extra = 0) {
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
52 static int NextOffset(int* offset) {
58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
59 STATIC_ASSERT(0 == static_cast<int>(times_1));
60 STATIC_ASSERT(1 == static_cast<int>(times_2));
61 STATIC_ASSERT(2 == static_cast<int>(times_4));
62 STATIC_ASSERT(3 == static_cast<int>(times_8));
63 int scale = static_cast<int>(mode - one);
64 DCHECK(scale >= 0 && scale < 4);
65 return static_cast<ScaleFactor>(scale);
68 Operand MemoryOperand(int* offset) {
69 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
72 Register base = InputRegister(NextOffset(offset));
74 return Operand(base, disp);
77 Register base = InputRegister(NextOffset(offset));
78 int32_t disp = InputInt32(NextOffset(offset));
79 return Operand(base, disp);
85 Register base = InputRegister(NextOffset(offset));
86 Register index = InputRegister(NextOffset(offset));
87 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
89 return Operand(base, index, scale, disp);
95 Register base = InputRegister(NextOffset(offset));
96 Register index = InputRegister(NextOffset(offset));
97 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
98 int32_t disp = InputInt32(NextOffset(offset));
99 return Operand(base, index, scale, disp);
105 Register index = InputRegister(NextOffset(offset));
106 ScaleFactor scale = ScaleFor(kMode_M1, mode);
108 return Operand(index, scale, disp);
114 Register index = InputRegister(NextOffset(offset));
115 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
116 int32_t disp = InputInt32(NextOffset(offset));
117 return Operand(index, scale, disp);
121 return Operand(no_reg, 0);
124 return Operand(no_reg, 0);
127 Operand MemoryOperand() {
129 return MemoryOperand(&first_input);
134 static bool HasImmediateInput(Instruction* instr, int index) {
135 return instr->InputAt(index)->IsImmediate();
139 #define ASSEMBLE_UNOP(asm_instr) \
141 if (instr->Output()->IsRegister()) { \
142 __ asm_instr(i.OutputRegister()); \
144 __ asm_instr(i.OutputOperand()); \
149 #define ASSEMBLE_BINOP(asm_instr) \
151 if (HasImmediateInput(instr, 1)) { \
152 if (instr->InputAt(0)->IsRegister()) { \
153 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
155 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
158 if (instr->InputAt(1)->IsRegister()) { \
159 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
161 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
167 #define ASSEMBLE_MULT(asm_instr) \
169 if (HasImmediateInput(instr, 1)) { \
170 if (instr->InputAt(0)->IsRegister()) { \
171 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
172 i.InputImmediate(1)); \
174 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
175 i.InputImmediate(1)); \
178 if (instr->InputAt(1)->IsRegister()) { \
179 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
181 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
187 #define ASSEMBLE_SHIFT(asm_instr, width) \
189 if (HasImmediateInput(instr, 1)) { \
190 if (instr->Output()->IsRegister()) { \
191 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
193 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
196 if (instr->Output()->IsRegister()) { \
197 __ asm_instr##_cl(i.OutputRegister()); \
199 __ asm_instr##_cl(i.OutputOperand()); \
205 // Assembles an instruction after register allocation, producing machine code.
206 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
207 X64OperandConverter i(this, instr);
209 switch (ArchOpcodeField::decode(instr->opcode())) {
210 case kArchCallCodeObject: {
211 EnsureSpaceForLazyDeopt();
212 if (HasImmediateInput(instr, 0)) {
213 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
214 __ Call(code, RelocInfo::CODE_TARGET);
216 Register reg = i.InputRegister(0);
217 int entry = Code::kHeaderSize - kHeapObjectTag;
218 __ Call(Operand(reg, entry));
220 AddSafepointAndDeopt(instr);
223 case kArchCallJSFunction: {
224 EnsureSpaceForLazyDeopt();
225 Register func = i.InputRegister(0);
226 if (FLAG_debug_code) {
227 // Check the function's context matches the context argument.
228 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
229 __ Assert(equal, kWrongFunctionContext);
231 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
232 AddSafepointAndDeopt(instr);
236 __ jmp(code_->GetLabel(i.InputBlock(0)));
239 // don't emit code for nops.
244 case kArchStackPointer:
245 __ movq(i.OutputRegister(), rsp);
247 case kArchTruncateDoubleToI:
248 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
251 ASSEMBLE_BINOP(addl);
254 ASSEMBLE_BINOP(addq);
257 ASSEMBLE_BINOP(subl);
260 ASSEMBLE_BINOP(subq);
263 ASSEMBLE_BINOP(andl);
266 ASSEMBLE_BINOP(andq);
269 ASSEMBLE_BINOP(cmpl);
272 ASSEMBLE_BINOP(cmpq);
275 ASSEMBLE_BINOP(testl);
278 ASSEMBLE_BINOP(testq);
281 ASSEMBLE_MULT(imull);
284 ASSEMBLE_MULT(imulq);
288 __ idivl(i.InputRegister(1));
292 __ idivq(i.InputRegister(1));
296 __ divl(i.InputRegister(1));
300 __ divq(i.InputRegister(1));
321 ASSEMBLE_BINOP(xorl);
324 ASSEMBLE_BINOP(xorq);
327 ASSEMBLE_SHIFT(shll, 5);
330 ASSEMBLE_SHIFT(shlq, 6);
333 ASSEMBLE_SHIFT(shrl, 5);
336 ASSEMBLE_SHIFT(shrq, 6);
339 ASSEMBLE_SHIFT(sarl, 5);
342 ASSEMBLE_SHIFT(sarq, 6);
345 ASSEMBLE_SHIFT(rorl, 5);
348 ASSEMBLE_SHIFT(rorq, 6);
351 if (instr->InputAt(1)->IsDoubleRegister()) {
352 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
354 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
358 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
361 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
364 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
367 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
369 case kSSEFloat64Mod: {
370 __ subq(rsp, Immediate(kDoubleSize));
371 // Move values to st(0) and st(1).
372 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
373 __ fld_d(Operand(rsp, 0));
374 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
375 __ fld_d(Operand(rsp, 0));
376 // Loop while fprem isn't done.
379 // This instructions traps on all kinds inputs, but we are assuming the
380 // floating point control word is set to ignore them all.
382 // The following 2 instruction implicitly use rax.
384 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
387 __ shrl(rax, Immediate(8));
388 __ andl(rax, Immediate(0xFF));
392 __ j(parity_even, &mod_loop);
393 // Move output to stack and clean up.
395 __ fstp_d(Operand(rsp, 0));
396 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
397 __ addq(rsp, Immediate(kDoubleSize));
400 case kSSEFloat64Sqrt:
401 if (instr->InputAt(0)->IsDoubleRegister()) {
402 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
404 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
408 if (instr->InputAt(0)->IsDoubleRegister()) {
409 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
411 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
415 if (instr->InputAt(0)->IsDoubleRegister()) {
416 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
418 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
421 case kSSEFloat64ToInt32:
422 if (instr->InputAt(0)->IsDoubleRegister()) {
423 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
425 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
428 case kSSEFloat64ToUint32:
429 if (instr->InputAt(0)->IsDoubleRegister()) {
430 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
432 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
434 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
435 // TODO(turbofan): generated code should not look at the upper 32 bits
436 // of the result, but those bits could escape to the outside world.
438 case kSSEInt32ToFloat64:
439 if (instr->InputAt(0)->IsRegister()) {
440 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
442 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
445 case kSSEUint32ToFloat64:
446 if (instr->InputAt(0)->IsRegister()) {
447 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
449 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
453 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
456 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
460 Operand operand = i.MemoryOperand(&index);
461 if (HasImmediateInput(instr, index)) {
462 __ movb(operand, Immediate(i.InputInt8(index)));
464 __ movb(operand, i.InputRegister(index));
469 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
472 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
476 Operand operand = i.MemoryOperand(&index);
477 if (HasImmediateInput(instr, index)) {
478 __ movw(operand, Immediate(i.InputInt16(index)));
480 __ movw(operand, i.InputRegister(index));
485 if (instr->HasOutput()) {
486 if (instr->addressing_mode() == kMode_None) {
487 if (instr->InputAt(0)->IsRegister()) {
488 __ movl(i.OutputRegister(), i.InputRegister(0));
490 __ movl(i.OutputRegister(), i.InputOperand(0));
493 __ movl(i.OutputRegister(), i.MemoryOperand());
497 Operand operand = i.MemoryOperand(&index);
498 if (HasImmediateInput(instr, index)) {
499 __ movl(operand, i.InputImmediate(index));
501 __ movl(operand, i.InputRegister(index));
506 if (instr->InputAt(0)->IsRegister()) {
507 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
509 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
514 if (instr->HasOutput()) {
515 __ movq(i.OutputRegister(), i.MemoryOperand());
518 Operand operand = i.MemoryOperand(&index);
519 if (HasImmediateInput(instr, index)) {
520 __ movq(operand, i.InputImmediate(index));
522 __ movq(operand, i.InputRegister(index));
527 if (instr->HasOutput()) {
528 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
531 Operand operand = i.MemoryOperand(&index);
532 __ movss(operand, i.InputDoubleRegister(index));
536 if (instr->HasOutput()) {
537 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
540 Operand operand = i.MemoryOperand(&index);
541 __ movsd(operand, i.InputDoubleRegister(index));
545 __ leal(i.OutputRegister(), i.MemoryOperand());
548 __ leaq(i.OutputRegister(), i.MemoryOperand());
551 if (HasImmediateInput(instr, 0)) {
552 __ pushq(i.InputImmediate(0));
554 if (instr->InputAt(0)->IsRegister()) {
555 __ pushq(i.InputRegister(0));
557 __ pushq(i.InputOperand(0));
561 case kX64StoreWriteBarrier: {
562 Register object = i.InputRegister(0);
563 Register index = i.InputRegister(1);
564 Register value = i.InputRegister(2);
565 __ movsxlq(index, index);
566 __ movq(Operand(object, index, times_1, 0), value);
567 __ leaq(index, Operand(object, index, times_1, 0));
568 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
571 __ RecordWrite(object, index, value, mode);
578 // Assembles branches after this instruction.
579 void CodeGenerator::AssembleArchBranch(Instruction* instr,
580 FlagsCondition condition) {
581 X64OperandConverter i(this, instr);
584 // Emit a branch. The true and false targets are always the last two inputs
585 // to the instruction.
586 BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
587 BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
588 bool fallthru = IsNextInAssemblyOrder(fblock);
589 Label* tlabel = code()->GetLabel(tblock);
590 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
591 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
593 case kUnorderedEqual:
594 __ j(parity_even, flabel, flabel_distance);
599 case kUnorderedNotEqual:
600 __ j(parity_even, tlabel);
603 __ j(not_equal, tlabel);
605 case kSignedLessThan:
608 case kSignedGreaterThanOrEqual:
609 __ j(greater_equal, tlabel);
611 case kSignedLessThanOrEqual:
612 __ j(less_equal, tlabel);
614 case kSignedGreaterThan:
615 __ j(greater, tlabel);
617 case kUnorderedLessThan:
618 __ j(parity_even, flabel, flabel_distance);
620 case kUnsignedLessThan:
623 case kUnorderedGreaterThanOrEqual:
624 __ j(parity_even, tlabel);
626 case kUnsignedGreaterThanOrEqual:
627 __ j(above_equal, tlabel);
629 case kUnorderedLessThanOrEqual:
630 __ j(parity_even, flabel, flabel_distance);
632 case kUnsignedLessThanOrEqual:
633 __ j(below_equal, tlabel);
635 case kUnorderedGreaterThan:
636 __ j(parity_even, tlabel);
638 case kUnsignedGreaterThan:
642 __ j(overflow, tlabel);
645 __ j(no_overflow, tlabel);
648 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
653 // Assembles boolean materializations after this instruction.
654 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
655 FlagsCondition condition) {
656 X64OperandConverter i(this, instr);
659 // Materialize a full 64-bit 1 or 0 value. The result register is always the
660 // last output of the instruction.
662 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
663 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
664 Condition cc = no_condition;
666 case kUnorderedEqual:
667 __ j(parity_odd, &check, Label::kNear);
668 __ movl(reg, Immediate(0));
669 __ jmp(&done, Label::kNear);
674 case kUnorderedNotEqual:
675 __ j(parity_odd, &check, Label::kNear);
676 __ movl(reg, Immediate(1));
677 __ jmp(&done, Label::kNear);
682 case kSignedLessThan:
685 case kSignedGreaterThanOrEqual:
688 case kSignedLessThanOrEqual:
691 case kSignedGreaterThan:
694 case kUnorderedLessThan:
695 __ j(parity_odd, &check, Label::kNear);
696 __ movl(reg, Immediate(0));
697 __ jmp(&done, Label::kNear);
699 case kUnsignedLessThan:
702 case kUnorderedGreaterThanOrEqual:
703 __ j(parity_odd, &check, Label::kNear);
704 __ movl(reg, Immediate(1));
705 __ jmp(&done, Label::kNear);
707 case kUnsignedGreaterThanOrEqual:
710 case kUnorderedLessThanOrEqual:
711 __ j(parity_odd, &check, Label::kNear);
712 __ movl(reg, Immediate(0));
713 __ jmp(&done, Label::kNear);
715 case kUnsignedLessThanOrEqual:
718 case kUnorderedGreaterThan:
719 __ j(parity_odd, &check, Label::kNear);
720 __ movl(reg, Immediate(1));
721 __ jmp(&done, Label::kNear);
723 case kUnsignedGreaterThan:
735 __ movzxbl(reg, reg);
740 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
741 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
742 isolate(), deoptimization_id, Deoptimizer::LAZY);
743 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
747 void CodeGenerator::AssemblePrologue() {
748 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
749 int stack_slots = frame()->GetSpillSlotCount();
750 if (descriptor->kind() == CallDescriptor::kCallAddress) {
753 const RegList saves = descriptor->CalleeSavedRegisters();
754 if (saves != 0) { // Save callee-saved registers.
755 int register_save_area_size = 0;
756 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
757 if (!((1 << i) & saves)) continue;
758 __ pushq(Register::from_code(i));
759 register_save_area_size += kPointerSize;
761 frame()->SetRegisterSaveAreaSize(register_save_area_size);
763 } else if (descriptor->IsJSFunctionCall()) {
764 CompilationInfo* info = linkage()->info();
765 __ Prologue(info->IsCodePreAgingActive());
766 frame()->SetRegisterSaveAreaSize(
767 StandardFrameConstants::kFixedFrameSizeFromFp);
769 // Sloppy mode functions and builtins need to replace the receiver with the
770 // global proxy when called as functions (without an explicit receiver
772 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
773 if (info->strict_mode() == SLOPPY && !info->is_native()) {
775 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
776 __ movp(rcx, args.GetReceiverOperand());
777 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
778 __ j(not_equal, &ok, Label::kNear);
779 __ movp(rcx, GlobalObjectOperand());
780 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
781 __ movp(args.GetReceiverOperand(), rcx);
787 frame()->SetRegisterSaveAreaSize(
788 StandardFrameConstants::kFixedFrameSizeFromFp);
790 if (stack_slots > 0) {
791 __ subq(rsp, Immediate(stack_slots * kPointerSize));
796 void CodeGenerator::AssembleReturn() {
797 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
798 if (descriptor->kind() == CallDescriptor::kCallAddress) {
799 if (frame()->GetRegisterSaveAreaSize() > 0) {
800 // Remove this frame's spill slots first.
801 int stack_slots = frame()->GetSpillSlotCount();
802 if (stack_slots > 0) {
803 __ addq(rsp, Immediate(stack_slots * kPointerSize));
805 const RegList saves = descriptor->CalleeSavedRegisters();
806 // Restore registers.
808 for (int i = 0; i < Register::kNumRegisters; i++) {
809 if (!((1 << i) & saves)) continue;
810 __ popq(Register::from_code(i));
813 __ popq(rbp); // Pop caller's frame pointer.
816 // No saved registers.
817 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
818 __ popq(rbp); // Pop caller's frame pointer.
822 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
823 __ popq(rbp); // Pop caller's frame pointer.
824 int pop_count = descriptor->IsJSFunctionCall()
825 ? static_cast<int>(descriptor->JSParameterCount())
827 __ ret(pop_count * kPointerSize);
832 void CodeGenerator::AssembleMove(InstructionOperand* source,
833 InstructionOperand* destination) {
834 X64OperandConverter g(this, NULL);
835 // Dispatch on the source and destination operand kinds. Not all
836 // combinations are possible.
837 if (source->IsRegister()) {
838 DCHECK(destination->IsRegister() || destination->IsStackSlot());
839 Register src = g.ToRegister(source);
840 if (destination->IsRegister()) {
841 __ movq(g.ToRegister(destination), src);
843 __ movq(g.ToOperand(destination), src);
845 } else if (source->IsStackSlot()) {
846 DCHECK(destination->IsRegister() || destination->IsStackSlot());
847 Operand src = g.ToOperand(source);
848 if (destination->IsRegister()) {
849 Register dst = g.ToRegister(destination);
852 // Spill on demand to use a temporary register for memory-to-memory
854 Register tmp = kScratchRegister;
855 Operand dst = g.ToOperand(destination);
859 } else if (source->IsConstant()) {
860 ConstantOperand* constant_source = ConstantOperand::cast(source);
861 Constant src = g.ToConstant(constant_source);
862 if (destination->IsRegister() || destination->IsStackSlot()) {
863 Register dst = destination->IsRegister() ? g.ToRegister(destination)
865 switch (src.type()) {
866 case Constant::kInt32:
867 // TODO(dcarney): don't need scratch in this case.
868 __ movq(dst, Immediate(src.ToInt32()));
870 case Constant::kInt64:
871 __ Set(dst, src.ToInt64());
873 case Constant::kFloat32:
875 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
877 case Constant::kFloat64:
879 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
881 case Constant::kExternalReference:
882 __ Move(dst, src.ToExternalReference());
884 case Constant::kHeapObject:
885 __ Move(dst, src.ToHeapObject());
888 if (destination->IsStackSlot()) {
889 __ movq(g.ToOperand(destination), kScratchRegister);
891 } else if (src.type() == Constant::kFloat32) {
892 // TODO(turbofan): Can we do better here?
893 __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
894 if (destination->IsDoubleRegister()) {
895 XMMRegister dst = g.ToDoubleRegister(destination);
896 __ movq(dst, kScratchRegister);
898 DCHECK(destination->IsDoubleStackSlot());
899 Operand dst = g.ToOperand(destination);
900 __ movl(dst, kScratchRegister);
903 DCHECK_EQ(Constant::kFloat64, src.type());
904 __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
905 if (destination->IsDoubleRegister()) {
906 __ movq(g.ToDoubleRegister(destination), kScratchRegister);
908 DCHECK(destination->IsDoubleStackSlot());
909 __ movq(g.ToOperand(destination), kScratchRegister);
912 } else if (source->IsDoubleRegister()) {
913 XMMRegister src = g.ToDoubleRegister(source);
914 if (destination->IsDoubleRegister()) {
915 XMMRegister dst = g.ToDoubleRegister(destination);
918 DCHECK(destination->IsDoubleStackSlot());
919 Operand dst = g.ToOperand(destination);
922 } else if (source->IsDoubleStackSlot()) {
923 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
924 Operand src = g.ToOperand(source);
925 if (destination->IsDoubleRegister()) {
926 XMMRegister dst = g.ToDoubleRegister(destination);
929 // We rely on having xmm0 available as a fixed scratch register.
930 Operand dst = g.ToOperand(destination);
940 void CodeGenerator::AssembleSwap(InstructionOperand* source,
941 InstructionOperand* destination) {
942 X64OperandConverter g(this, NULL);
943 // Dispatch on the source and destination operand kinds. Not all
944 // combinations are possible.
945 if (source->IsRegister() && destination->IsRegister()) {
946 // Register-register.
947 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
948 } else if (source->IsRegister() && destination->IsStackSlot()) {
949 Register src = g.ToRegister(source);
950 Operand dst = g.ToOperand(destination);
952 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
953 (source->IsDoubleStackSlot() &&
954 destination->IsDoubleStackSlot())) {
956 Register tmp = kScratchRegister;
957 Operand src = g.ToOperand(source);
958 Operand dst = g.ToOperand(destination);
962 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
963 // XMM register-register swap. We rely on having xmm0
964 // available as a fixed scratch register.
965 XMMRegister src = g.ToDoubleRegister(source);
966 XMMRegister dst = g.ToDoubleRegister(destination);
970 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
971 // XMM register-memory swap. We rely on having xmm0
972 // available as a fixed scratch register.
973 XMMRegister src = g.ToDoubleRegister(source);
974 Operand dst = g.ToOperand(destination);
979 // No other combinations are possible.
985 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
988 void CodeGenerator::EnsureSpaceForLazyDeopt() {
989 int space_needed = Deoptimizer::patch_size();
990 if (!linkage()->info()->IsStub()) {
991 // Ensure that we have enough space after the previous lazy-bailout
992 // instruction for patching the code here.
993 int current_pc = masm()->pc_offset();
994 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
995 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
996 __ Nop(padding_size);
1004 } // namespace internal
1005 } // namespace compiler