1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 return Immediate(ToConstant(operand).ToInt32());
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
47 static int NextOffset(int* offset) {
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
67 Register base = InputRegister(NextOffset(offset));
69 return Operand(base, disp);
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84 return Operand(base, index, scale, disp);
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
100 Register index = InputRegister(NextOffset(offset));
101 ScaleFactor scale = ScaleFor(kMode_M1, mode);
103 return Operand(index, scale, disp);
109 Register index = InputRegister(NextOffset(offset));
110 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
111 int32_t disp = InputInt32(NextOffset(offset));
112 return Operand(index, scale, disp);
116 return Operand(no_reg, 0);
119 return Operand(no_reg, 0);
122 Operand MemoryOperand() {
124 return MemoryOperand(&first_input);
129 static bool HasImmediateInput(Instruction* instr, int index) {
130 return instr->InputAt(index)->IsImmediate();
134 #define ASSEMBLE_UNOP(asm_instr) \
136 if (instr->Output()->IsRegister()) { \
137 __ asm_instr(i.OutputRegister()); \
139 __ asm_instr(i.OutputOperand()); \
144 #define ASSEMBLE_BINOP(asm_instr) \
146 if (HasImmediateInput(instr, 1)) { \
147 if (instr->InputAt(0)->IsRegister()) { \
148 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
150 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
153 if (instr->InputAt(1)->IsRegister()) { \
154 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
156 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
162 #define ASSEMBLE_MULT(asm_instr) \
164 if (HasImmediateInput(instr, 1)) { \
165 if (instr->InputAt(0)->IsRegister()) { \
166 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
167 i.InputImmediate(1)); \
169 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
170 i.InputImmediate(1)); \
173 if (instr->InputAt(1)->IsRegister()) { \
174 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
176 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
182 #define ASSEMBLE_SHIFT(asm_instr, width) \
184 if (HasImmediateInput(instr, 1)) { \
185 if (instr->Output()->IsRegister()) { \
186 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
188 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
191 if (instr->Output()->IsRegister()) { \
192 __ asm_instr##_cl(i.OutputRegister()); \
194 __ asm_instr##_cl(i.OutputOperand()); \
200 // Assembles an instruction after register allocation, producing machine code.
201 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
202 X64OperandConverter i(this, instr);
204 switch (ArchOpcodeField::decode(instr->opcode())) {
205 case kArchCallCodeObject: {
206 EnsureSpaceForLazyDeopt();
207 if (HasImmediateInput(instr, 0)) {
208 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
209 __ Call(code, RelocInfo::CODE_TARGET);
211 Register reg = i.InputRegister(0);
212 int entry = Code::kHeaderSize - kHeapObjectTag;
213 __ Call(Operand(reg, entry));
215 AddSafepointAndDeopt(instr);
218 case kArchCallJSFunction: {
219 EnsureSpaceForLazyDeopt();
220 Register func = i.InputRegister(0);
221 if (FLAG_debug_code) {
222 // Check the function's context matches the context argument.
223 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
224 __ Assert(equal, kWrongFunctionContext);
226 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
227 AddSafepointAndDeopt(instr);
231 __ jmp(code_->GetLabel(i.InputRpo(0)));
234 // don't emit code for nops.
239 case kArchStackPointer:
240 __ movq(i.OutputRegister(), rsp);
242 case kArchTruncateDoubleToI:
243 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
246 ASSEMBLE_BINOP(addl);
249 ASSEMBLE_BINOP(addq);
252 ASSEMBLE_BINOP(subl);
255 ASSEMBLE_BINOP(subq);
258 ASSEMBLE_BINOP(andl);
261 ASSEMBLE_BINOP(andq);
264 ASSEMBLE_BINOP(cmpl);
267 ASSEMBLE_BINOP(cmpq);
270 ASSEMBLE_BINOP(testl);
273 ASSEMBLE_BINOP(testq);
276 ASSEMBLE_MULT(imull);
279 ASSEMBLE_MULT(imulq);
282 __ imull(i.InputRegister(1));
286 __ idivl(i.InputRegister(1));
290 __ idivq(i.InputRegister(1));
294 __ divl(i.InputRegister(1));
298 __ divq(i.InputRegister(1));
319 ASSEMBLE_BINOP(xorl);
322 ASSEMBLE_BINOP(xorq);
325 ASSEMBLE_SHIFT(shll, 5);
328 ASSEMBLE_SHIFT(shlq, 6);
331 ASSEMBLE_SHIFT(shrl, 5);
334 ASSEMBLE_SHIFT(shrq, 6);
337 ASSEMBLE_SHIFT(sarl, 5);
340 ASSEMBLE_SHIFT(sarq, 6);
343 ASSEMBLE_SHIFT(rorl, 5);
346 ASSEMBLE_SHIFT(rorq, 6);
349 if (instr->InputAt(1)->IsDoubleRegister()) {
350 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
352 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
356 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
359 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
362 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
365 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
367 case kSSEFloat64Mod: {
368 __ subq(rsp, Immediate(kDoubleSize));
369 // Move values to st(0) and st(1).
370 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
371 __ fld_d(Operand(rsp, 0));
372 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
373 __ fld_d(Operand(rsp, 0));
374 // Loop while fprem isn't done.
377 // This instructions traps on all kinds inputs, but we are assuming the
378 // floating point control word is set to ignore them all.
380 // The following 2 instruction implicitly use rax.
382 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
385 __ shrl(rax, Immediate(8));
386 __ andl(rax, Immediate(0xFF));
390 __ j(parity_even, &mod_loop);
391 // Move output to stack and clean up.
393 __ fstp_d(Operand(rsp, 0));
394 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
395 __ addq(rsp, Immediate(kDoubleSize));
398 case kSSEFloat64Sqrt:
399 if (instr->InputAt(0)->IsDoubleRegister()) {
400 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
402 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
405 case kSSEFloat64Floor: {
406 CpuFeatureScope sse_scope(masm(), SSE4_1);
407 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
408 v8::internal::Assembler::kRoundDown);
411 case kSSEFloat64Ceil: {
412 CpuFeatureScope sse_scope(masm(), SSE4_1);
413 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
414 v8::internal::Assembler::kRoundUp);
417 case kSSEFloat64RoundTruncate: {
418 CpuFeatureScope sse_scope(masm(), SSE4_1);
419 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
420 v8::internal::Assembler::kRoundToZero);
424 if (instr->InputAt(0)->IsDoubleRegister()) {
425 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
427 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
431 if (instr->InputAt(0)->IsDoubleRegister()) {
432 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
434 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
437 case kSSEFloat64ToInt32:
438 if (instr->InputAt(0)->IsDoubleRegister()) {
439 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
441 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
444 case kSSEFloat64ToUint32: {
445 if (instr->InputAt(0)->IsDoubleRegister()) {
446 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
448 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
450 __ AssertZeroExtended(i.OutputRegister());
453 case kSSEInt32ToFloat64:
454 if (instr->InputAt(0)->IsRegister()) {
455 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
457 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
460 case kSSEUint32ToFloat64:
461 if (instr->InputAt(0)->IsRegister()) {
462 __ movl(kScratchRegister, i.InputRegister(0));
464 __ movl(kScratchRegister, i.InputOperand(0));
466 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
469 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
472 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
476 Operand operand = i.MemoryOperand(&index);
477 if (HasImmediateInput(instr, index)) {
478 __ movb(operand, Immediate(i.InputInt8(index)));
480 __ movb(operand, i.InputRegister(index));
485 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
488 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
492 Operand operand = i.MemoryOperand(&index);
493 if (HasImmediateInput(instr, index)) {
494 __ movw(operand, Immediate(i.InputInt16(index)));
496 __ movw(operand, i.InputRegister(index));
501 if (instr->HasOutput()) {
502 if (instr->addressing_mode() == kMode_None) {
503 if (instr->InputAt(0)->IsRegister()) {
504 __ movl(i.OutputRegister(), i.InputRegister(0));
506 __ movl(i.OutputRegister(), i.InputOperand(0));
509 __ movl(i.OutputRegister(), i.MemoryOperand());
513 Operand operand = i.MemoryOperand(&index);
514 if (HasImmediateInput(instr, index)) {
515 __ movl(operand, i.InputImmediate(index));
517 __ movl(operand, i.InputRegister(index));
522 if (instr->InputAt(0)->IsRegister()) {
523 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
525 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
530 if (instr->HasOutput()) {
531 __ movq(i.OutputRegister(), i.MemoryOperand());
534 Operand operand = i.MemoryOperand(&index);
535 if (HasImmediateInput(instr, index)) {
536 __ movq(operand, i.InputImmediate(index));
538 __ movq(operand, i.InputRegister(index));
543 if (instr->HasOutput()) {
544 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
547 Operand operand = i.MemoryOperand(&index);
548 __ movss(operand, i.InputDoubleRegister(index));
552 if (instr->HasOutput()) {
553 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
556 Operand operand = i.MemoryOperand(&index);
557 __ movsd(operand, i.InputDoubleRegister(index));
561 __ leal(i.OutputRegister(), i.MemoryOperand());
564 __ leaq(i.OutputRegister(), i.MemoryOperand());
567 if (HasImmediateInput(instr, 0)) {
568 __ pushq(i.InputImmediate(0));
570 if (instr->InputAt(0)->IsRegister()) {
571 __ pushq(i.InputRegister(0));
573 __ pushq(i.InputOperand(0));
577 case kX64StoreWriteBarrier: {
578 Register object = i.InputRegister(0);
579 Register index = i.InputRegister(1);
580 Register value = i.InputRegister(2);
581 __ movsxlq(index, index);
582 __ movq(Operand(object, index, times_1, 0), value);
583 __ leaq(index, Operand(object, index, times_1, 0));
584 SaveFPRegsMode mode =
585 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
586 __ RecordWrite(object, index, value, mode);
593 // Assembles branches after this instruction.
594 void CodeGenerator::AssembleArchBranch(Instruction* instr,
595 FlagsCondition condition) {
596 X64OperandConverter i(this, instr);
599 // Emit a branch. The true and false targets are always the last two inputs
600 // to the instruction.
601 BasicBlock::RpoNumber tblock =
602 i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
603 BasicBlock::RpoNumber fblock =
604 i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
605 bool fallthru = IsNextInAssemblyOrder(fblock);
606 Label* tlabel = code()->GetLabel(tblock);
607 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
608 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
610 case kUnorderedEqual:
611 __ j(parity_even, flabel, flabel_distance);
616 case kUnorderedNotEqual:
617 __ j(parity_even, tlabel);
620 __ j(not_equal, tlabel);
622 case kSignedLessThan:
625 case kSignedGreaterThanOrEqual:
626 __ j(greater_equal, tlabel);
628 case kSignedLessThanOrEqual:
629 __ j(less_equal, tlabel);
631 case kSignedGreaterThan:
632 __ j(greater, tlabel);
634 case kUnorderedLessThan:
635 __ j(parity_even, flabel, flabel_distance);
637 case kUnsignedLessThan:
640 case kUnorderedGreaterThanOrEqual:
641 __ j(parity_even, tlabel);
643 case kUnsignedGreaterThanOrEqual:
644 __ j(above_equal, tlabel);
646 case kUnorderedLessThanOrEqual:
647 __ j(parity_even, flabel, flabel_distance);
649 case kUnsignedLessThanOrEqual:
650 __ j(below_equal, tlabel);
652 case kUnorderedGreaterThan:
653 __ j(parity_even, tlabel);
655 case kUnsignedGreaterThan:
659 __ j(overflow, tlabel);
662 __ j(no_overflow, tlabel);
665 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
670 // Assembles boolean materializations after this instruction.
671 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
672 FlagsCondition condition) {
673 X64OperandConverter i(this, instr);
676 // Materialize a full 64-bit 1 or 0 value. The result register is always the
677 // last output of the instruction.
679 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
680 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
681 Condition cc = no_condition;
683 case kUnorderedEqual:
684 __ j(parity_odd, &check, Label::kNear);
685 __ movl(reg, Immediate(0));
686 __ jmp(&done, Label::kNear);
691 case kUnorderedNotEqual:
692 __ j(parity_odd, &check, Label::kNear);
693 __ movl(reg, Immediate(1));
694 __ jmp(&done, Label::kNear);
699 case kSignedLessThan:
702 case kSignedGreaterThanOrEqual:
705 case kSignedLessThanOrEqual:
708 case kSignedGreaterThan:
711 case kUnorderedLessThan:
712 __ j(parity_odd, &check, Label::kNear);
713 __ movl(reg, Immediate(0));
714 __ jmp(&done, Label::kNear);
716 case kUnsignedLessThan:
719 case kUnorderedGreaterThanOrEqual:
720 __ j(parity_odd, &check, Label::kNear);
721 __ movl(reg, Immediate(1));
722 __ jmp(&done, Label::kNear);
724 case kUnsignedGreaterThanOrEqual:
727 case kUnorderedLessThanOrEqual:
728 __ j(parity_odd, &check, Label::kNear);
729 __ movl(reg, Immediate(0));
730 __ jmp(&done, Label::kNear);
732 case kUnsignedLessThanOrEqual:
735 case kUnorderedGreaterThan:
736 __ j(parity_odd, &check, Label::kNear);
737 __ movl(reg, Immediate(1));
738 __ jmp(&done, Label::kNear);
740 case kUnsignedGreaterThan:
752 __ movzxbl(reg, reg);
757 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
758 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
759 isolate(), deoptimization_id, Deoptimizer::LAZY);
760 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
764 void CodeGenerator::AssemblePrologue() {
765 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
766 int stack_slots = frame()->GetSpillSlotCount();
767 if (descriptor->kind() == CallDescriptor::kCallAddress) {
770 const RegList saves = descriptor->CalleeSavedRegisters();
771 if (saves != 0) { // Save callee-saved registers.
772 int register_save_area_size = 0;
773 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
774 if (!((1 << i) & saves)) continue;
775 __ pushq(Register::from_code(i));
776 register_save_area_size += kPointerSize;
778 frame()->SetRegisterSaveAreaSize(register_save_area_size);
780 } else if (descriptor->IsJSFunctionCall()) {
781 CompilationInfo* info = this->info();
782 __ Prologue(info->IsCodePreAgingActive());
783 frame()->SetRegisterSaveAreaSize(
784 StandardFrameConstants::kFixedFrameSizeFromFp);
786 // Sloppy mode functions and builtins need to replace the receiver with the
787 // global proxy when called as functions (without an explicit receiver
789 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
790 if (info->strict_mode() == SLOPPY && !info->is_native()) {
792 StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
793 __ movp(rcx, args.GetReceiverOperand());
794 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
795 __ j(not_equal, &ok, Label::kNear);
796 __ movp(rcx, GlobalObjectOperand());
797 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
798 __ movp(args.GetReceiverOperand(), rcx);
804 frame()->SetRegisterSaveAreaSize(
805 StandardFrameConstants::kFixedFrameSizeFromFp);
807 if (stack_slots > 0) {
808 __ subq(rsp, Immediate(stack_slots * kPointerSize));
813 void CodeGenerator::AssembleReturn() {
814 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
815 if (descriptor->kind() == CallDescriptor::kCallAddress) {
816 if (frame()->GetRegisterSaveAreaSize() > 0) {
817 // Remove this frame's spill slots first.
818 int stack_slots = frame()->GetSpillSlotCount();
819 if (stack_slots > 0) {
820 __ addq(rsp, Immediate(stack_slots * kPointerSize));
822 const RegList saves = descriptor->CalleeSavedRegisters();
823 // Restore registers.
825 for (int i = 0; i < Register::kNumRegisters; i++) {
826 if (!((1 << i) & saves)) continue;
827 __ popq(Register::from_code(i));
830 __ popq(rbp); // Pop caller's frame pointer.
833 // No saved registers.
834 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
835 __ popq(rbp); // Pop caller's frame pointer.
839 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
840 __ popq(rbp); // Pop caller's frame pointer.
841 int pop_count = descriptor->IsJSFunctionCall()
842 ? static_cast<int>(descriptor->JSParameterCount())
844 __ ret(pop_count * kPointerSize);
849 void CodeGenerator::AssembleMove(InstructionOperand* source,
850 InstructionOperand* destination) {
851 X64OperandConverter g(this, NULL);
852 // Dispatch on the source and destination operand kinds. Not all
853 // combinations are possible.
854 if (source->IsRegister()) {
855 DCHECK(destination->IsRegister() || destination->IsStackSlot());
856 Register src = g.ToRegister(source);
857 if (destination->IsRegister()) {
858 __ movq(g.ToRegister(destination), src);
860 __ movq(g.ToOperand(destination), src);
862 } else if (source->IsStackSlot()) {
863 DCHECK(destination->IsRegister() || destination->IsStackSlot());
864 Operand src = g.ToOperand(source);
865 if (destination->IsRegister()) {
866 Register dst = g.ToRegister(destination);
869 // Spill on demand to use a temporary register for memory-to-memory
871 Register tmp = kScratchRegister;
872 Operand dst = g.ToOperand(destination);
876 } else if (source->IsConstant()) {
877 ConstantOperand* constant_source = ConstantOperand::cast(source);
878 Constant src = g.ToConstant(constant_source);
879 if (destination->IsRegister() || destination->IsStackSlot()) {
880 Register dst = destination->IsRegister() ? g.ToRegister(destination)
882 switch (src.type()) {
883 case Constant::kInt32:
884 // TODO(dcarney): don't need scratch in this case.
885 __ Set(dst, src.ToInt32());
887 case Constant::kInt64:
888 __ Set(dst, src.ToInt64());
890 case Constant::kFloat32:
892 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
894 case Constant::kFloat64:
896 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
898 case Constant::kExternalReference:
899 __ Move(dst, src.ToExternalReference());
901 case Constant::kHeapObject:
902 __ Move(dst, src.ToHeapObject());
905 if (destination->IsStackSlot()) {
906 __ movq(g.ToOperand(destination), kScratchRegister);
908 } else if (src.type() == Constant::kFloat32) {
909 // TODO(turbofan): Can we do better here?
910 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
911 if (destination->IsDoubleRegister()) {
912 __ Move(g.ToDoubleRegister(destination), src_const);
914 DCHECK(destination->IsDoubleStackSlot());
915 Operand dst = g.ToOperand(destination);
916 __ movl(dst, Immediate(src_const));
919 DCHECK_EQ(Constant::kFloat64, src.type());
920 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
921 if (destination->IsDoubleRegister()) {
922 __ Move(g.ToDoubleRegister(destination), src_const);
924 DCHECK(destination->IsDoubleStackSlot());
925 __ movq(kScratchRegister, src_const);
926 __ movq(g.ToOperand(destination), kScratchRegister);
929 } else if (source->IsDoubleRegister()) {
930 XMMRegister src = g.ToDoubleRegister(source);
931 if (destination->IsDoubleRegister()) {
932 XMMRegister dst = g.ToDoubleRegister(destination);
935 DCHECK(destination->IsDoubleStackSlot());
936 Operand dst = g.ToOperand(destination);
939 } else if (source->IsDoubleStackSlot()) {
940 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
941 Operand src = g.ToOperand(source);
942 if (destination->IsDoubleRegister()) {
943 XMMRegister dst = g.ToDoubleRegister(destination);
946 // We rely on having xmm0 available as a fixed scratch register.
947 Operand dst = g.ToOperand(destination);
957 void CodeGenerator::AssembleSwap(InstructionOperand* source,
958 InstructionOperand* destination) {
959 X64OperandConverter g(this, NULL);
960 // Dispatch on the source and destination operand kinds. Not all
961 // combinations are possible.
962 if (source->IsRegister() && destination->IsRegister()) {
963 // Register-register.
964 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
965 } else if (source->IsRegister() && destination->IsStackSlot()) {
966 Register src = g.ToRegister(source);
967 Operand dst = g.ToOperand(destination);
969 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
970 (source->IsDoubleStackSlot() &&
971 destination->IsDoubleStackSlot())) {
973 Register tmp = kScratchRegister;
974 Operand src = g.ToOperand(source);
975 Operand dst = g.ToOperand(destination);
979 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
980 // XMM register-register swap. We rely on having xmm0
981 // available as a fixed scratch register.
982 XMMRegister src = g.ToDoubleRegister(source);
983 XMMRegister dst = g.ToDoubleRegister(destination);
987 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
988 // XMM register-memory swap. We rely on having xmm0
989 // available as a fixed scratch register.
990 XMMRegister src = g.ToDoubleRegister(source);
991 Operand dst = g.ToOperand(destination);
996 // No other combinations are possible.
1002 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1005 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1006 int space_needed = Deoptimizer::patch_size();
1007 if (!info()->IsStub()) {
1008 // Ensure that we have enough space after the previous lazy-bailout
1009 // instruction for patching the code here.
1010 int current_pc = masm()->pc_offset();
1011 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1012 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1013 __ Nop(padding_size);
1016 MarkLazyDeoptSite();
1021 } // namespace internal
1022 } // namespace compiler