1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 Constant constant = ToConstant(operand);
38 if (constant.type() == Constant::kInt32) {
39 return Immediate(constant.ToInt32());
45 Operand ToOperand(InstructionOperand* op, int extra = 0) {
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
52 static int NextOffset(int* offset) {
58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
59 STATIC_ASSERT(0 == static_cast<int>(times_1));
60 STATIC_ASSERT(1 == static_cast<int>(times_2));
61 STATIC_ASSERT(2 == static_cast<int>(times_4));
62 STATIC_ASSERT(3 == static_cast<int>(times_8));
63 int scale = static_cast<int>(mode - one);
64 DCHECK(scale >= 0 && scale < 4);
65 return static_cast<ScaleFactor>(scale);
68 Operand MemoryOperand(int* offset) {
69 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
72 Register base = InputRegister(NextOffset(offset));
74 return Operand(base, disp);
77 Register base = InputRegister(NextOffset(offset));
78 int32_t disp = InputInt32(NextOffset(offset));
79 return Operand(base, disp);
85 Register base = InputRegister(NextOffset(offset));
86 Register index = InputRegister(NextOffset(offset));
87 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
89 return Operand(base, index, scale, disp);
95 Register base = InputRegister(NextOffset(offset));
96 Register index = InputRegister(NextOffset(offset));
97 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
98 int32_t disp = InputInt32(NextOffset(offset));
99 return Operand(base, index, scale, disp);
105 Register index = InputRegister(NextOffset(offset));
106 ScaleFactor scale = ScaleFor(kMode_M1, mode);
108 return Operand(index, scale, disp);
114 Register index = InputRegister(NextOffset(offset));
115 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
116 int32_t disp = InputInt32(NextOffset(offset));
117 return Operand(index, scale, disp);
121 return Operand(no_reg, 0);
124 return Operand(no_reg, 0);
127 Operand MemoryOperand() {
129 return MemoryOperand(&first_input);
134 static bool HasImmediateInput(Instruction* instr, int index) {
135 return instr->InputAt(index)->IsImmediate();
139 #define ASSEMBLE_UNOP(asm_instr) \
141 if (instr->Output()->IsRegister()) { \
142 __ asm_instr(i.OutputRegister()); \
144 __ asm_instr(i.OutputOperand()); \
149 #define ASSEMBLE_BINOP(asm_instr) \
151 if (HasImmediateInput(instr, 1)) { \
152 if (instr->InputAt(0)->IsRegister()) { \
153 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
155 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
158 if (instr->InputAt(1)->IsRegister()) { \
159 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
161 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
167 #define ASSEMBLE_MULT(asm_instr) \
169 if (HasImmediateInput(instr, 1)) { \
170 if (instr->InputAt(0)->IsRegister()) { \
171 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
172 i.InputImmediate(1)); \
174 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
175 i.InputImmediate(1)); \
178 if (instr->InputAt(1)->IsRegister()) { \
179 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
181 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
187 #define ASSEMBLE_SHIFT(asm_instr, width) \
189 if (HasImmediateInput(instr, 1)) { \
190 if (instr->Output()->IsRegister()) { \
191 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
193 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
196 if (instr->Output()->IsRegister()) { \
197 __ asm_instr##_cl(i.OutputRegister()); \
199 __ asm_instr##_cl(i.OutputOperand()); \
205 // Assembles an instruction after register allocation, producing machine code.
206 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
207 X64OperandConverter i(this, instr);
209 switch (ArchOpcodeField::decode(instr->opcode())) {
210 case kArchCallCodeObject: {
211 EnsureSpaceForLazyDeopt();
212 if (HasImmediateInput(instr, 0)) {
213 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
214 __ Call(code, RelocInfo::CODE_TARGET);
216 Register reg = i.InputRegister(0);
217 int entry = Code::kHeaderSize - kHeapObjectTag;
218 __ Call(Operand(reg, entry));
220 AddSafepointAndDeopt(instr);
223 case kArchCallJSFunction: {
224 EnsureSpaceForLazyDeopt();
225 Register func = i.InputRegister(0);
226 if (FLAG_debug_code) {
227 // Check the function's context matches the context argument.
228 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
229 __ Assert(equal, kWrongFunctionContext);
231 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
232 AddSafepointAndDeopt(instr);
236 __ jmp(code_->GetLabel(i.InputRpo(0)));
239 // don't emit code for nops.
244 case kArchStackPointer:
245 __ movq(i.OutputRegister(), rsp);
247 case kArchTruncateDoubleToI:
248 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
251 ASSEMBLE_BINOP(addl);
254 ASSEMBLE_BINOP(addq);
257 ASSEMBLE_BINOP(subl);
260 ASSEMBLE_BINOP(subq);
263 ASSEMBLE_BINOP(andl);
266 ASSEMBLE_BINOP(andq);
269 ASSEMBLE_BINOP(cmpl);
272 ASSEMBLE_BINOP(cmpq);
275 ASSEMBLE_BINOP(testl);
278 ASSEMBLE_BINOP(testq);
281 ASSEMBLE_MULT(imull);
284 ASSEMBLE_MULT(imulq);
287 __ imull(i.InputRegister(1));
291 __ idivl(i.InputRegister(1));
295 __ idivq(i.InputRegister(1));
299 __ divl(i.InputRegister(1));
303 __ divq(i.InputRegister(1));
324 ASSEMBLE_BINOP(xorl);
327 ASSEMBLE_BINOP(xorq);
330 ASSEMBLE_SHIFT(shll, 5);
333 ASSEMBLE_SHIFT(shlq, 6);
336 ASSEMBLE_SHIFT(shrl, 5);
339 ASSEMBLE_SHIFT(shrq, 6);
342 ASSEMBLE_SHIFT(sarl, 5);
345 ASSEMBLE_SHIFT(sarq, 6);
348 ASSEMBLE_SHIFT(rorl, 5);
351 ASSEMBLE_SHIFT(rorq, 6);
354 if (instr->InputAt(1)->IsDoubleRegister()) {
355 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
357 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
361 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
364 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
367 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
370 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
372 case kSSEFloat64Mod: {
373 __ subq(rsp, Immediate(kDoubleSize));
374 // Move values to st(0) and st(1).
375 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
376 __ fld_d(Operand(rsp, 0));
377 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
378 __ fld_d(Operand(rsp, 0));
379 // Loop while fprem isn't done.
382 // This instructions traps on all kinds inputs, but we are assuming the
383 // floating point control word is set to ignore them all.
385 // The following 2 instruction implicitly use rax.
387 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
390 __ shrl(rax, Immediate(8));
391 __ andl(rax, Immediate(0xFF));
395 __ j(parity_even, &mod_loop);
396 // Move output to stack and clean up.
398 __ fstp_d(Operand(rsp, 0));
399 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
400 __ addq(rsp, Immediate(kDoubleSize));
403 case kSSEFloat64Sqrt:
404 if (instr->InputAt(0)->IsDoubleRegister()) {
405 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
407 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
411 if (instr->InputAt(0)->IsDoubleRegister()) {
412 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
414 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
418 if (instr->InputAt(0)->IsDoubleRegister()) {
419 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
421 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
424 case kSSEFloat64ToInt32:
425 if (instr->InputAt(0)->IsDoubleRegister()) {
426 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
428 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
431 case kSSEFloat64ToUint32:
432 if (instr->InputAt(0)->IsDoubleRegister()) {
433 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
435 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
437 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
438 // TODO(turbofan): generated code should not look at the upper 32 bits
439 // of the result, but those bits could escape to the outside world.
441 case kSSEInt32ToFloat64:
442 if (instr->InputAt(0)->IsRegister()) {
443 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
445 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
448 case kSSEUint32ToFloat64:
449 if (instr->InputAt(0)->IsRegister()) {
450 __ movl(kScratchRegister, i.InputRegister(0));
452 __ movl(kScratchRegister, i.InputOperand(0));
454 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
457 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
460 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
464 Operand operand = i.MemoryOperand(&index);
465 if (HasImmediateInput(instr, index)) {
466 __ movb(operand, Immediate(i.InputInt8(index)));
468 __ movb(operand, i.InputRegister(index));
473 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
476 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
480 Operand operand = i.MemoryOperand(&index);
481 if (HasImmediateInput(instr, index)) {
482 __ movw(operand, Immediate(i.InputInt16(index)));
484 __ movw(operand, i.InputRegister(index));
489 if (instr->HasOutput()) {
490 if (instr->addressing_mode() == kMode_None) {
491 if (instr->InputAt(0)->IsRegister()) {
492 __ movl(i.OutputRegister(), i.InputRegister(0));
494 __ movl(i.OutputRegister(), i.InputOperand(0));
497 __ movl(i.OutputRegister(), i.MemoryOperand());
501 Operand operand = i.MemoryOperand(&index);
502 if (HasImmediateInput(instr, index)) {
503 __ movl(operand, i.InputImmediate(index));
505 __ movl(operand, i.InputRegister(index));
510 if (instr->InputAt(0)->IsRegister()) {
511 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
513 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
518 if (instr->HasOutput()) {
519 __ movq(i.OutputRegister(), i.MemoryOperand());
522 Operand operand = i.MemoryOperand(&index);
523 if (HasImmediateInput(instr, index)) {
524 __ movq(operand, i.InputImmediate(index));
526 __ movq(operand, i.InputRegister(index));
531 if (instr->HasOutput()) {
532 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
535 Operand operand = i.MemoryOperand(&index);
536 __ movss(operand, i.InputDoubleRegister(index));
540 if (instr->HasOutput()) {
541 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
544 Operand operand = i.MemoryOperand(&index);
545 __ movsd(operand, i.InputDoubleRegister(index));
549 __ leal(i.OutputRegister(), i.MemoryOperand());
552 __ leaq(i.OutputRegister(), i.MemoryOperand());
555 if (HasImmediateInput(instr, 0)) {
556 __ pushq(i.InputImmediate(0));
558 if (instr->InputAt(0)->IsRegister()) {
559 __ pushq(i.InputRegister(0));
561 __ pushq(i.InputOperand(0));
565 case kX64StoreWriteBarrier: {
566 Register object = i.InputRegister(0);
567 Register index = i.InputRegister(1);
568 Register value = i.InputRegister(2);
569 __ movsxlq(index, index);
570 __ movq(Operand(object, index, times_1, 0), value);
571 __ leaq(index, Operand(object, index, times_1, 0));
572 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
575 __ RecordWrite(object, index, value, mode);
582 // Assembles branches after this instruction.
583 void CodeGenerator::AssembleArchBranch(Instruction* instr,
584 FlagsCondition condition) {
585 X64OperandConverter i(this, instr);
588 // Emit a branch. The true and false targets are always the last two inputs
589 // to the instruction.
590 BasicBlock::RpoNumber tblock =
591 i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
592 BasicBlock::RpoNumber fblock =
593 i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
594 bool fallthru = IsNextInAssemblyOrder(fblock);
595 Label* tlabel = code()->GetLabel(tblock);
596 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
597 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
599 case kUnorderedEqual:
600 __ j(parity_even, flabel, flabel_distance);
605 case kUnorderedNotEqual:
606 __ j(parity_even, tlabel);
609 __ j(not_equal, tlabel);
611 case kSignedLessThan:
614 case kSignedGreaterThanOrEqual:
615 __ j(greater_equal, tlabel);
617 case kSignedLessThanOrEqual:
618 __ j(less_equal, tlabel);
620 case kSignedGreaterThan:
621 __ j(greater, tlabel);
623 case kUnorderedLessThan:
624 __ j(parity_even, flabel, flabel_distance);
626 case kUnsignedLessThan:
629 case kUnorderedGreaterThanOrEqual:
630 __ j(parity_even, tlabel);
632 case kUnsignedGreaterThanOrEqual:
633 __ j(above_equal, tlabel);
635 case kUnorderedLessThanOrEqual:
636 __ j(parity_even, flabel, flabel_distance);
638 case kUnsignedLessThanOrEqual:
639 __ j(below_equal, tlabel);
641 case kUnorderedGreaterThan:
642 __ j(parity_even, tlabel);
644 case kUnsignedGreaterThan:
648 __ j(overflow, tlabel);
651 __ j(no_overflow, tlabel);
654 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
659 // Assembles boolean materializations after this instruction.
660 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
661 FlagsCondition condition) {
662 X64OperandConverter i(this, instr);
665 // Materialize a full 64-bit 1 or 0 value. The result register is always the
666 // last output of the instruction.
668 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
669 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
670 Condition cc = no_condition;
672 case kUnorderedEqual:
673 __ j(parity_odd, &check, Label::kNear);
674 __ movl(reg, Immediate(0));
675 __ jmp(&done, Label::kNear);
680 case kUnorderedNotEqual:
681 __ j(parity_odd, &check, Label::kNear);
682 __ movl(reg, Immediate(1));
683 __ jmp(&done, Label::kNear);
688 case kSignedLessThan:
691 case kSignedGreaterThanOrEqual:
694 case kSignedLessThanOrEqual:
697 case kSignedGreaterThan:
700 case kUnorderedLessThan:
701 __ j(parity_odd, &check, Label::kNear);
702 __ movl(reg, Immediate(0));
703 __ jmp(&done, Label::kNear);
705 case kUnsignedLessThan:
708 case kUnorderedGreaterThanOrEqual:
709 __ j(parity_odd, &check, Label::kNear);
710 __ movl(reg, Immediate(1));
711 __ jmp(&done, Label::kNear);
713 case kUnsignedGreaterThanOrEqual:
716 case kUnorderedLessThanOrEqual:
717 __ j(parity_odd, &check, Label::kNear);
718 __ movl(reg, Immediate(0));
719 __ jmp(&done, Label::kNear);
721 case kUnsignedLessThanOrEqual:
724 case kUnorderedGreaterThan:
725 __ j(parity_odd, &check, Label::kNear);
726 __ movl(reg, Immediate(1));
727 __ jmp(&done, Label::kNear);
729 case kUnsignedGreaterThan:
741 __ movzxbl(reg, reg);
746 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
747 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
748 isolate(), deoptimization_id, Deoptimizer::LAZY);
749 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
753 void CodeGenerator::AssemblePrologue() {
754 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
755 int stack_slots = frame()->GetSpillSlotCount();
756 if (descriptor->kind() == CallDescriptor::kCallAddress) {
759 const RegList saves = descriptor->CalleeSavedRegisters();
760 if (saves != 0) { // Save callee-saved registers.
761 int register_save_area_size = 0;
762 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
763 if (!((1 << i) & saves)) continue;
764 __ pushq(Register::from_code(i));
765 register_save_area_size += kPointerSize;
767 frame()->SetRegisterSaveAreaSize(register_save_area_size);
769 } else if (descriptor->IsJSFunctionCall()) {
770 CompilationInfo* info = linkage()->info();
771 __ Prologue(info->IsCodePreAgingActive());
772 frame()->SetRegisterSaveAreaSize(
773 StandardFrameConstants::kFixedFrameSizeFromFp);
775 // Sloppy mode functions and builtins need to replace the receiver with the
776 // global proxy when called as functions (without an explicit receiver
778 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
779 if (info->strict_mode() == SLOPPY && !info->is_native()) {
781 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
782 __ movp(rcx, args.GetReceiverOperand());
783 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
784 __ j(not_equal, &ok, Label::kNear);
785 __ movp(rcx, GlobalObjectOperand());
786 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
787 __ movp(args.GetReceiverOperand(), rcx);
793 frame()->SetRegisterSaveAreaSize(
794 StandardFrameConstants::kFixedFrameSizeFromFp);
796 if (stack_slots > 0) {
797 __ subq(rsp, Immediate(stack_slots * kPointerSize));
802 void CodeGenerator::AssembleReturn() {
803 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
804 if (descriptor->kind() == CallDescriptor::kCallAddress) {
805 if (frame()->GetRegisterSaveAreaSize() > 0) {
806 // Remove this frame's spill slots first.
807 int stack_slots = frame()->GetSpillSlotCount();
808 if (stack_slots > 0) {
809 __ addq(rsp, Immediate(stack_slots * kPointerSize));
811 const RegList saves = descriptor->CalleeSavedRegisters();
812 // Restore registers.
814 for (int i = 0; i < Register::kNumRegisters; i++) {
815 if (!((1 << i) & saves)) continue;
816 __ popq(Register::from_code(i));
819 __ popq(rbp); // Pop caller's frame pointer.
822 // No saved registers.
823 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
824 __ popq(rbp); // Pop caller's frame pointer.
828 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
829 __ popq(rbp); // Pop caller's frame pointer.
830 int pop_count = descriptor->IsJSFunctionCall()
831 ? static_cast<int>(descriptor->JSParameterCount())
833 __ ret(pop_count * kPointerSize);
838 void CodeGenerator::AssembleMove(InstructionOperand* source,
839 InstructionOperand* destination) {
840 X64OperandConverter g(this, NULL);
841 // Dispatch on the source and destination operand kinds. Not all
842 // combinations are possible.
843 if (source->IsRegister()) {
844 DCHECK(destination->IsRegister() || destination->IsStackSlot());
845 Register src = g.ToRegister(source);
846 if (destination->IsRegister()) {
847 __ movq(g.ToRegister(destination), src);
849 __ movq(g.ToOperand(destination), src);
851 } else if (source->IsStackSlot()) {
852 DCHECK(destination->IsRegister() || destination->IsStackSlot());
853 Operand src = g.ToOperand(source);
854 if (destination->IsRegister()) {
855 Register dst = g.ToRegister(destination);
858 // Spill on demand to use a temporary register for memory-to-memory
860 Register tmp = kScratchRegister;
861 Operand dst = g.ToOperand(destination);
865 } else if (source->IsConstant()) {
866 ConstantOperand* constant_source = ConstantOperand::cast(source);
867 Constant src = g.ToConstant(constant_source);
868 if (destination->IsRegister() || destination->IsStackSlot()) {
869 Register dst = destination->IsRegister() ? g.ToRegister(destination)
871 switch (src.type()) {
872 case Constant::kInt32:
873 // TODO(dcarney): don't need scratch in this case.
874 __ Set(dst, src.ToInt32());
876 case Constant::kInt64:
877 __ Set(dst, src.ToInt64());
879 case Constant::kFloat32:
881 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
883 case Constant::kFloat64:
885 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
887 case Constant::kExternalReference:
888 __ Move(dst, src.ToExternalReference());
890 case Constant::kHeapObject:
891 __ Move(dst, src.ToHeapObject());
894 if (destination->IsStackSlot()) {
895 __ movq(g.ToOperand(destination), kScratchRegister);
897 } else if (src.type() == Constant::kFloat32) {
898 // TODO(turbofan): Can we do better here?
899 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
900 if (destination->IsDoubleRegister()) {
901 __ Move(g.ToDoubleRegister(destination), src_const);
903 DCHECK(destination->IsDoubleStackSlot());
904 Operand dst = g.ToOperand(destination);
905 __ movl(dst, Immediate(src_const));
908 DCHECK_EQ(Constant::kFloat64, src.type());
909 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
910 if (destination->IsDoubleRegister()) {
911 __ Move(g.ToDoubleRegister(destination), src_const);
913 DCHECK(destination->IsDoubleStackSlot());
914 __ movq(kScratchRegister, src_const);
915 __ movq(g.ToOperand(destination), kScratchRegister);
918 } else if (source->IsDoubleRegister()) {
919 XMMRegister src = g.ToDoubleRegister(source);
920 if (destination->IsDoubleRegister()) {
921 XMMRegister dst = g.ToDoubleRegister(destination);
924 DCHECK(destination->IsDoubleStackSlot());
925 Operand dst = g.ToOperand(destination);
928 } else if (source->IsDoubleStackSlot()) {
929 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
930 Operand src = g.ToOperand(source);
931 if (destination->IsDoubleRegister()) {
932 XMMRegister dst = g.ToDoubleRegister(destination);
935 // We rely on having xmm0 available as a fixed scratch register.
936 Operand dst = g.ToOperand(destination);
946 void CodeGenerator::AssembleSwap(InstructionOperand* source,
947 InstructionOperand* destination) {
948 X64OperandConverter g(this, NULL);
949 // Dispatch on the source and destination operand kinds. Not all
950 // combinations are possible.
951 if (source->IsRegister() && destination->IsRegister()) {
952 // Register-register.
953 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
954 } else if (source->IsRegister() && destination->IsStackSlot()) {
955 Register src = g.ToRegister(source);
956 Operand dst = g.ToOperand(destination);
958 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
959 (source->IsDoubleStackSlot() &&
960 destination->IsDoubleStackSlot())) {
962 Register tmp = kScratchRegister;
963 Operand src = g.ToOperand(source);
964 Operand dst = g.ToOperand(destination);
968 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
969 // XMM register-register swap. We rely on having xmm0
970 // available as a fixed scratch register.
971 XMMRegister src = g.ToDoubleRegister(source);
972 XMMRegister dst = g.ToDoubleRegister(destination);
976 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
977 // XMM register-memory swap. We rely on having xmm0
978 // available as a fixed scratch register.
979 XMMRegister src = g.ToDoubleRegister(source);
980 Operand dst = g.ToOperand(destination);
985 // No other combinations are possible.
991 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
994 void CodeGenerator::EnsureSpaceForLazyDeopt() {
995 int space_needed = Deoptimizer::patch_size();
996 if (!linkage()->info()->IsStub()) {
997 // Ensure that we have enough space after the previous lazy-bailout
998 // instruction for patching the code here.
999 int current_pc = masm()->pc_offset();
1000 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1001 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1002 __ Nop(padding_size);
1005 MarkLazyDeoptSite();
1010 } // namespace internal
1011 } // namespace compiler