1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 return Immediate(ToConstant(operand).ToInt32());
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
47 static int NextOffset(int* offset) {
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
67 Register base = InputRegister(NextOffset(offset));
69 return Operand(base, disp);
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84 return Operand(base, index, scale, disp);
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
97 Register base = InputRegister(NextOffset(offset));
99 return Operand(base, disp);
102 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
103 return Operand(no_reg, 0);
106 Register index = InputRegister(NextOffset(offset));
107 ScaleFactor scale = ScaleFor(kMode_M1, mode);
109 return Operand(index, scale, disp);
115 Register index = InputRegister(NextOffset(offset));
116 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
117 int32_t disp = InputInt32(NextOffset(offset));
118 return Operand(index, scale, disp);
122 return Operand(no_reg, 0);
125 return Operand(no_reg, 0);
128 Operand MemoryOperand(int first_input = 0) {
129 return MemoryOperand(&first_input);
136 bool HasImmediateInput(Instruction* instr, int index) {
137 return instr->InputAt(index)->IsImmediate();
141 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
143 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
144 : OutOfLineCode(gen), result_(result) {}
146 void Generate() FINAL { __ xorl(result_, result_); }
149 Register const result_;
153 class OutOfLineLoadFloat FINAL : public OutOfLineCode {
155 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
156 : OutOfLineCode(gen), result_(result) {}
158 void Generate() FINAL { __ pcmpeqd(result_, result_); }
161 XMMRegister const result_;
165 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
167 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
169 : OutOfLineCode(gen), result_(result), input_(input) {}
171 void Generate() FINAL {
172 __ subp(rsp, Immediate(kDoubleSize));
173 __ movsd(MemOperand(rsp, 0), input_);
174 __ SlowTruncateToI(result_, rsp, 0);
175 __ addp(rsp, Immediate(kDoubleSize));
179 Register const result_;
180 XMMRegister const input_;
186 #define ASSEMBLE_UNOP(asm_instr) \
188 if (instr->Output()->IsRegister()) { \
189 __ asm_instr(i.OutputRegister()); \
191 __ asm_instr(i.OutputOperand()); \
196 #define ASSEMBLE_BINOP(asm_instr) \
198 if (HasImmediateInput(instr, 1)) { \
199 if (instr->InputAt(0)->IsRegister()) { \
200 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
202 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
205 if (instr->InputAt(1)->IsRegister()) { \
206 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
208 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
214 #define ASSEMBLE_MULT(asm_instr) \
216 if (HasImmediateInput(instr, 1)) { \
217 if (instr->InputAt(0)->IsRegister()) { \
218 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
219 i.InputImmediate(1)); \
221 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
222 i.InputImmediate(1)); \
225 if (instr->InputAt(1)->IsRegister()) { \
226 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
228 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
234 #define ASSEMBLE_SHIFT(asm_instr, width) \
236 if (HasImmediateInput(instr, 1)) { \
237 if (instr->Output()->IsRegister()) { \
238 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
240 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
243 if (instr->Output()->IsRegister()) { \
244 __ asm_instr##_cl(i.OutputRegister()); \
246 __ asm_instr##_cl(i.OutputOperand()); \
252 #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
254 if (instr->InputAt(1)->IsDoubleRegister()) { \
255 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
257 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
262 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
264 CpuFeatureScope avx_scope(masm(), AVX); \
265 if (instr->InputAt(1)->IsDoubleRegister()) { \
266 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
267 i.InputDoubleRegister(1)); \
269 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
270 i.InputOperand(1)); \
275 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
277 auto result = i.OutputDoubleRegister(); \
278 auto offset = i.InputRegister(0); \
279 if (instr->InputAt(1)->IsRegister()) { \
280 __ cmpl(offset, i.InputRegister(1)); \
282 __ cmpl(offset, i.InputImmediate(1)); \
284 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
285 __ j(above_equal, ool->entry()); \
286 __ asm_instr(result, i.MemoryOperand(2)); \
287 __ bind(ool->exit()); \
291 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
293 auto result = i.OutputRegister(); \
294 auto offset = i.InputRegister(0); \
295 if (instr->InputAt(1)->IsRegister()) { \
296 __ cmpl(offset, i.InputRegister(1)); \
298 __ cmpl(offset, i.InputImmediate(1)); \
300 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
301 __ j(above_equal, ool->entry()); \
302 __ asm_instr(result, i.MemoryOperand(2)); \
303 __ bind(ool->exit()); \
307 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
309 auto offset = i.InputRegister(0); \
310 if (instr->InputAt(1)->IsRegister()) { \
311 __ cmpl(offset, i.InputRegister(1)); \
313 __ cmpl(offset, i.InputImmediate(1)); \
316 __ j(above_equal, &done, Label::kNear); \
317 __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
322 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
324 auto offset = i.InputRegister(0); \
325 if (instr->InputAt(1)->IsRegister()) { \
326 __ cmpl(offset, i.InputRegister(1)); \
328 __ cmpl(offset, i.InputImmediate(1)); \
331 __ j(above_equal, &done, Label::kNear); \
332 if (instr->InputAt(2)->IsRegister()) { \
333 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
335 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
341 // Assembles an instruction after register allocation, producing machine code.
342 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
343 X64OperandConverter i(this, instr);
345 switch (ArchOpcodeField::decode(instr->opcode())) {
346 case kArchCallCodeObject: {
347 EnsureSpaceForLazyDeopt();
348 if (HasImmediateInput(instr, 0)) {
349 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
350 __ Call(code, RelocInfo::CODE_TARGET);
352 Register reg = i.InputRegister(0);
353 int entry = Code::kHeaderSize - kHeapObjectTag;
354 __ Call(Operand(reg, entry));
356 AddSafepointAndDeopt(instr);
359 case kArchCallJSFunction: {
360 EnsureSpaceForLazyDeopt();
361 Register func = i.InputRegister(0);
362 if (FLAG_debug_code) {
363 // Check the function's context matches the context argument.
364 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
365 __ Assert(equal, kWrongFunctionContext);
367 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
368 AddSafepointAndDeopt(instr);
372 AssembleArchJump(i.InputRpo(0));
375 // don't emit code for nops.
380 case kArchStackPointer:
381 __ movq(i.OutputRegister(), rsp);
383 case kArchTruncateDoubleToI: {
384 auto result = i.OutputRegister();
385 auto input = i.InputDoubleRegister(0);
386 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
387 __ cvttsd2siq(result, input);
388 __ cmpq(result, Immediate(1));
389 __ j(overflow, ool->entry());
390 __ bind(ool->exit());
394 ASSEMBLE_BINOP(addl);
397 ASSEMBLE_BINOP(addq);
400 ASSEMBLE_BINOP(subl);
403 ASSEMBLE_BINOP(subq);
406 ASSEMBLE_BINOP(andl);
409 ASSEMBLE_BINOP(andq);
412 ASSEMBLE_BINOP(cmpl);
415 ASSEMBLE_BINOP(cmpq);
418 ASSEMBLE_BINOP(testl);
421 ASSEMBLE_BINOP(testq);
424 ASSEMBLE_MULT(imull);
427 ASSEMBLE_MULT(imulq);
430 if (instr->InputAt(1)->IsRegister()) {
431 __ imull(i.InputRegister(1));
433 __ imull(i.InputOperand(1));
437 if (instr->InputAt(1)->IsRegister()) {
438 __ mull(i.InputRegister(1));
440 __ mull(i.InputOperand(1));
445 __ idivl(i.InputRegister(1));
449 __ idivq(i.InputRegister(1));
453 __ divl(i.InputRegister(1));
457 __ divq(i.InputRegister(1));
478 ASSEMBLE_BINOP(xorl);
481 ASSEMBLE_BINOP(xorq);
484 ASSEMBLE_SHIFT(shll, 5);
487 ASSEMBLE_SHIFT(shlq, 6);
490 ASSEMBLE_SHIFT(shrl, 5);
493 ASSEMBLE_SHIFT(shrq, 6);
496 ASSEMBLE_SHIFT(sarl, 5);
499 ASSEMBLE_SHIFT(sarq, 6);
502 ASSEMBLE_SHIFT(rorl, 5);
505 ASSEMBLE_SHIFT(rorq, 6);
508 ASSEMBLE_DOUBLE_BINOP(ucomisd);
511 ASSEMBLE_DOUBLE_BINOP(addsd);
514 ASSEMBLE_DOUBLE_BINOP(subsd);
517 ASSEMBLE_DOUBLE_BINOP(mulsd);
520 ASSEMBLE_DOUBLE_BINOP(divsd);
522 case kSSEFloat64Mod: {
523 __ subq(rsp, Immediate(kDoubleSize));
524 // Move values to st(0) and st(1).
525 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
526 __ fld_d(Operand(rsp, 0));
527 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
528 __ fld_d(Operand(rsp, 0));
529 // Loop while fprem isn't done.
532 // This instructions traps on all kinds inputs, but we are assuming the
533 // floating point control word is set to ignore them all.
535 // The following 2 instruction implicitly use rax.
537 if (CpuFeatures::IsSupported(SAHF)) {
538 CpuFeatureScope sahf_scope(masm(), SAHF);
541 __ shrl(rax, Immediate(8));
542 __ andl(rax, Immediate(0xFF));
546 __ j(parity_even, &mod_loop);
547 // Move output to stack and clean up.
549 __ fstp_d(Operand(rsp, 0));
550 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
551 __ addq(rsp, Immediate(kDoubleSize));
554 case kSSEFloat64Sqrt:
555 if (instr->InputAt(0)->IsDoubleRegister()) {
556 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
558 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
561 case kSSEFloat64Floor: {
562 CpuFeatureScope sse_scope(masm(), SSE4_1);
563 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
564 v8::internal::Assembler::kRoundDown);
567 case kSSEFloat64Ceil: {
568 CpuFeatureScope sse_scope(masm(), SSE4_1);
569 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
570 v8::internal::Assembler::kRoundUp);
573 case kSSEFloat64RoundTruncate: {
574 CpuFeatureScope sse_scope(masm(), SSE4_1);
575 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
576 v8::internal::Assembler::kRoundToZero);
580 if (instr->InputAt(0)->IsDoubleRegister()) {
581 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
583 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
587 if (instr->InputAt(0)->IsDoubleRegister()) {
588 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
590 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
593 case kSSEFloat64ToInt32:
594 if (instr->InputAt(0)->IsDoubleRegister()) {
595 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
597 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
600 case kSSEFloat64ToUint32: {
601 if (instr->InputAt(0)->IsDoubleRegister()) {
602 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
604 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
606 __ AssertZeroExtended(i.OutputRegister());
609 case kSSEInt32ToFloat64:
610 if (instr->InputAt(0)->IsRegister()) {
611 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
613 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
616 case kSSEUint32ToFloat64:
617 if (instr->InputAt(0)->IsRegister()) {
618 __ movl(kScratchRegister, i.InputRegister(0));
620 __ movl(kScratchRegister, i.InputOperand(0));
622 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
625 ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
628 ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
631 ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
634 ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
637 if (instr->addressing_mode() != kMode_None) {
638 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
639 } else if (instr->InputAt(0)->IsRegister()) {
640 __ movsxbl(i.OutputRegister(), i.InputRegister(0));
642 __ movsxbl(i.OutputRegister(), i.InputOperand(0));
644 __ AssertZeroExtended(i.OutputRegister());
647 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
651 Operand operand = i.MemoryOperand(&index);
652 if (HasImmediateInput(instr, index)) {
653 __ movb(operand, Immediate(i.InputInt8(index)));
655 __ movb(operand, i.InputRegister(index));
660 if (instr->addressing_mode() != kMode_None) {
661 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
662 } else if (instr->InputAt(0)->IsRegister()) {
663 __ movsxwl(i.OutputRegister(), i.InputRegister(0));
665 __ movsxwl(i.OutputRegister(), i.InputOperand(0));
667 __ AssertZeroExtended(i.OutputRegister());
670 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
671 __ AssertZeroExtended(i.OutputRegister());
675 Operand operand = i.MemoryOperand(&index);
676 if (HasImmediateInput(instr, index)) {
677 __ movw(operand, Immediate(i.InputInt16(index)));
679 __ movw(operand, i.InputRegister(index));
684 if (instr->HasOutput()) {
685 if (instr->addressing_mode() == kMode_None) {
686 if (instr->InputAt(0)->IsRegister()) {
687 __ movl(i.OutputRegister(), i.InputRegister(0));
689 __ movl(i.OutputRegister(), i.InputOperand(0));
692 __ movl(i.OutputRegister(), i.MemoryOperand());
694 __ AssertZeroExtended(i.OutputRegister());
697 Operand operand = i.MemoryOperand(&index);
698 if (HasImmediateInput(instr, index)) {
699 __ movl(operand, i.InputImmediate(index));
701 __ movl(operand, i.InputRegister(index));
706 if (instr->InputAt(0)->IsRegister()) {
707 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
709 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
714 if (instr->HasOutput()) {
715 __ movq(i.OutputRegister(), i.MemoryOperand());
718 Operand operand = i.MemoryOperand(&index);
719 if (HasImmediateInput(instr, index)) {
720 __ movq(operand, i.InputImmediate(index));
722 __ movq(operand, i.InputRegister(index));
727 if (instr->HasOutput()) {
728 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
731 Operand operand = i.MemoryOperand(&index);
732 __ movss(operand, i.InputDoubleRegister(index));
736 if (instr->HasOutput()) {
737 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
740 Operand operand = i.MemoryOperand(&index);
741 __ movsd(operand, i.InputDoubleRegister(index));
745 AddressingMode mode = AddressingModeField::decode(instr->opcode());
746 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
747 // and addressing mode just happens to work out. The "addl"/"subl" forms
748 // in these cases are faster based on measurements.
749 if (i.InputRegister(0).is(i.OutputRegister())) {
750 if (mode == kMode_MRI) {
751 int32_t constant_summand = i.InputInt32(1);
752 if (constant_summand > 0) {
753 __ addl(i.OutputRegister(), Immediate(constant_summand));
754 } else if (constant_summand < 0) {
755 __ subl(i.OutputRegister(), Immediate(-constant_summand));
757 } else if (mode == kMode_MR1 || mode == kMode_M2) {
758 // Using "addl %r1, %r1" is generally faster than "shll %r1, 1"
759 __ addl(i.OutputRegister(), i.InputRegister(1));
760 } else if (mode == kMode_M4) {
761 __ shll(i.OutputRegister(), Immediate(2));
762 } else if (mode == kMode_M8) {
763 __ shll(i.OutputRegister(), Immediate(3));
765 __ leal(i.OutputRegister(), i.MemoryOperand());
768 __ leal(i.OutputRegister(), i.MemoryOperand());
770 __ AssertZeroExtended(i.OutputRegister());
774 __ leaq(i.OutputRegister(), i.MemoryOperand());
777 __ decl(i.OutputRegister());
780 __ incl(i.OutputRegister());
783 if (HasImmediateInput(instr, 0)) {
784 __ pushq(i.InputImmediate(0));
786 if (instr->InputAt(0)->IsRegister()) {
787 __ pushq(i.InputRegister(0));
789 __ pushq(i.InputOperand(0));
793 case kX64StoreWriteBarrier: {
794 Register object = i.InputRegister(0);
795 Register index = i.InputRegister(1);
796 Register value = i.InputRegister(2);
797 __ movsxlq(index, index);
798 __ movq(Operand(object, index, times_1, 0), value);
799 __ leaq(index, Operand(object, index, times_1, 0));
800 SaveFPRegsMode mode =
801 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
802 __ RecordWrite(object, index, value, mode);
805 case kCheckedLoadInt8:
806 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
808 case kCheckedLoadUint8:
809 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
811 case kCheckedLoadInt16:
812 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
814 case kCheckedLoadUint16:
815 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
817 case kCheckedLoadWord32:
818 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
820 case kCheckedLoadFloat32:
821 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
823 case kCheckedLoadFloat64:
824 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
826 case kCheckedStoreWord8:
827 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
829 case kCheckedStoreWord16:
830 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
832 case kCheckedStoreWord32:
833 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
835 case kCheckedStoreFloat32:
836 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
838 case kCheckedStoreFloat64:
839 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
845 // Assembles branches after this instruction.
846 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
847 X64OperandConverter i(this, instr);
848 Label::Distance flabel_distance =
849 branch->fallthru ? Label::kNear : Label::kFar;
850 Label* tlabel = branch->true_label;
851 Label* flabel = branch->false_label;
852 switch (branch->condition) {
853 case kUnorderedEqual:
854 __ j(parity_even, flabel, flabel_distance);
859 case kUnorderedNotEqual:
860 __ j(parity_even, tlabel);
863 __ j(not_equal, tlabel);
865 case kSignedLessThan:
868 case kSignedGreaterThanOrEqual:
869 __ j(greater_equal, tlabel);
871 case kSignedLessThanOrEqual:
872 __ j(less_equal, tlabel);
874 case kSignedGreaterThan:
875 __ j(greater, tlabel);
877 case kUnorderedLessThan:
878 __ j(parity_even, flabel, flabel_distance);
880 case kUnsignedLessThan:
883 case kUnorderedGreaterThanOrEqual:
884 __ j(parity_even, tlabel);
886 case kUnsignedGreaterThanOrEqual:
887 __ j(above_equal, tlabel);
889 case kUnorderedLessThanOrEqual:
890 __ j(parity_even, flabel, flabel_distance);
892 case kUnsignedLessThanOrEqual:
893 __ j(below_equal, tlabel);
895 case kUnorderedGreaterThan:
896 __ j(parity_even, tlabel);
898 case kUnsignedGreaterThan:
902 __ j(overflow, tlabel);
905 __ j(no_overflow, tlabel);
908 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
912 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
913 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
917 // Assembles boolean materializations after this instruction.
918 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
919 FlagsCondition condition) {
920 X64OperandConverter i(this, instr);
923 // Materialize a full 64-bit 1 or 0 value. The result register is always the
924 // last output of the instruction.
926 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
927 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
928 Condition cc = no_condition;
930 case kUnorderedEqual:
931 __ j(parity_odd, &check, Label::kNear);
932 __ movl(reg, Immediate(0));
933 __ jmp(&done, Label::kNear);
938 case kUnorderedNotEqual:
939 __ j(parity_odd, &check, Label::kNear);
940 __ movl(reg, Immediate(1));
941 __ jmp(&done, Label::kNear);
946 case kSignedLessThan:
949 case kSignedGreaterThanOrEqual:
952 case kSignedLessThanOrEqual:
955 case kSignedGreaterThan:
958 case kUnorderedLessThan:
959 __ j(parity_odd, &check, Label::kNear);
960 __ movl(reg, Immediate(0));
961 __ jmp(&done, Label::kNear);
963 case kUnsignedLessThan:
966 case kUnorderedGreaterThanOrEqual:
967 __ j(parity_odd, &check, Label::kNear);
968 __ movl(reg, Immediate(1));
969 __ jmp(&done, Label::kNear);
971 case kUnsignedGreaterThanOrEqual:
974 case kUnorderedLessThanOrEqual:
975 __ j(parity_odd, &check, Label::kNear);
976 __ movl(reg, Immediate(0));
977 __ jmp(&done, Label::kNear);
979 case kUnsignedLessThanOrEqual:
982 case kUnorderedGreaterThan:
983 __ j(parity_odd, &check, Label::kNear);
984 __ movl(reg, Immediate(1));
985 __ jmp(&done, Label::kNear);
987 case kUnsignedGreaterThan:
999 __ movzxbl(reg, reg);
1004 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1005 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1006 isolate(), deoptimization_id, Deoptimizer::LAZY);
1007 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1011 void CodeGenerator::AssemblePrologue() {
1012 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1013 int stack_slots = frame()->GetSpillSlotCount();
1014 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1017 const RegList saves = descriptor->CalleeSavedRegisters();
1018 if (saves != 0) { // Save callee-saved registers.
1019 int register_save_area_size = 0;
1020 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1021 if (!((1 << i) & saves)) continue;
1022 __ pushq(Register::from_code(i));
1023 register_save_area_size += kPointerSize;
1025 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1027 } else if (descriptor->IsJSFunctionCall()) {
1028 CompilationInfo* info = this->info();
1029 __ Prologue(info->IsCodePreAgingActive());
1030 frame()->SetRegisterSaveAreaSize(
1031 StandardFrameConstants::kFixedFrameSizeFromFp);
1034 frame()->SetRegisterSaveAreaSize(
1035 StandardFrameConstants::kFixedFrameSizeFromFp);
1037 if (stack_slots > 0) {
1038 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1043 void CodeGenerator::AssembleReturn() {
1044 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1045 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1046 if (frame()->GetRegisterSaveAreaSize() > 0) {
1047 // Remove this frame's spill slots first.
1048 int stack_slots = frame()->GetSpillSlotCount();
1049 if (stack_slots > 0) {
1050 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1052 const RegList saves = descriptor->CalleeSavedRegisters();
1053 // Restore registers.
1055 for (int i = 0; i < Register::kNumRegisters; i++) {
1056 if (!((1 << i) & saves)) continue;
1057 __ popq(Register::from_code(i));
1060 __ popq(rbp); // Pop caller's frame pointer.
1063 // No saved registers.
1064 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1065 __ popq(rbp); // Pop caller's frame pointer.
1069 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1070 __ popq(rbp); // Pop caller's frame pointer.
1071 int pop_count = descriptor->IsJSFunctionCall()
1072 ? static_cast<int>(descriptor->JSParameterCount())
1074 __ ret(pop_count * kPointerSize);
1079 void CodeGenerator::AssembleMove(InstructionOperand* source,
1080 InstructionOperand* destination) {
1081 X64OperandConverter g(this, NULL);
1082 // Dispatch on the source and destination operand kinds. Not all
1083 // combinations are possible.
1084 if (source->IsRegister()) {
1085 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1086 Register src = g.ToRegister(source);
1087 if (destination->IsRegister()) {
1088 __ movq(g.ToRegister(destination), src);
1090 __ movq(g.ToOperand(destination), src);
1092 } else if (source->IsStackSlot()) {
1093 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1094 Operand src = g.ToOperand(source);
1095 if (destination->IsRegister()) {
1096 Register dst = g.ToRegister(destination);
1099 // Spill on demand to use a temporary register for memory-to-memory
1101 Register tmp = kScratchRegister;
1102 Operand dst = g.ToOperand(destination);
1106 } else if (source->IsConstant()) {
1107 ConstantOperand* constant_source = ConstantOperand::cast(source);
1108 Constant src = g.ToConstant(constant_source);
1109 if (destination->IsRegister() || destination->IsStackSlot()) {
1110 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1112 switch (src.type()) {
1113 case Constant::kInt32:
1114 // TODO(dcarney): don't need scratch in this case.
1115 __ Set(dst, src.ToInt32());
1117 case Constant::kInt64:
1118 __ Set(dst, src.ToInt64());
1120 case Constant::kFloat32:
1122 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1124 case Constant::kFloat64:
1126 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1128 case Constant::kExternalReference:
1129 __ Move(dst, src.ToExternalReference());
1131 case Constant::kHeapObject:
1132 __ Move(dst, src.ToHeapObject());
1134 case Constant::kRpoNumber:
1135 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1138 if (destination->IsStackSlot()) {
1139 __ movq(g.ToOperand(destination), kScratchRegister);
1141 } else if (src.type() == Constant::kFloat32) {
1142 // TODO(turbofan): Can we do better here?
1143 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1144 if (destination->IsDoubleRegister()) {
1145 __ Move(g.ToDoubleRegister(destination), src_const);
1147 DCHECK(destination->IsDoubleStackSlot());
1148 Operand dst = g.ToOperand(destination);
1149 __ movl(dst, Immediate(src_const));
1152 DCHECK_EQ(Constant::kFloat64, src.type());
1153 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1154 if (destination->IsDoubleRegister()) {
1155 __ Move(g.ToDoubleRegister(destination), src_const);
1157 DCHECK(destination->IsDoubleStackSlot());
1158 __ movq(kScratchRegister, src_const);
1159 __ movq(g.ToOperand(destination), kScratchRegister);
1162 } else if (source->IsDoubleRegister()) {
1163 XMMRegister src = g.ToDoubleRegister(source);
1164 if (destination->IsDoubleRegister()) {
1165 XMMRegister dst = g.ToDoubleRegister(destination);
1168 DCHECK(destination->IsDoubleStackSlot());
1169 Operand dst = g.ToOperand(destination);
1172 } else if (source->IsDoubleStackSlot()) {
1173 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1174 Operand src = g.ToOperand(source);
1175 if (destination->IsDoubleRegister()) {
1176 XMMRegister dst = g.ToDoubleRegister(destination);
1179 // We rely on having xmm0 available as a fixed scratch register.
1180 Operand dst = g.ToOperand(destination);
1181 __ movsd(xmm0, src);
1182 __ movsd(dst, xmm0);
1190 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1191 InstructionOperand* destination) {
1192 X64OperandConverter g(this, NULL);
1193 // Dispatch on the source and destination operand kinds. Not all
1194 // combinations are possible.
1195 if (source->IsRegister() && destination->IsRegister()) {
1196 // Register-register.
1197 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1198 } else if (source->IsRegister() && destination->IsStackSlot()) {
1199 Register src = g.ToRegister(source);
1200 Operand dst = g.ToOperand(destination);
1202 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1203 (source->IsDoubleStackSlot() &&
1204 destination->IsDoubleStackSlot())) {
1206 Register tmp = kScratchRegister;
1207 Operand src = g.ToOperand(source);
1208 Operand dst = g.ToOperand(destination);
1212 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1213 // XMM register-register swap. We rely on having xmm0
1214 // available as a fixed scratch register.
1215 XMMRegister src = g.ToDoubleRegister(source);
1216 XMMRegister dst = g.ToDoubleRegister(destination);
1217 __ movsd(xmm0, src);
1219 __ movsd(dst, xmm0);
1220 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1221 // XMM register-memory swap. We rely on having xmm0
1222 // available as a fixed scratch register.
1223 XMMRegister src = g.ToDoubleRegister(source);
1224 Operand dst = g.ToOperand(destination);
1225 __ movsd(xmm0, src);
1227 __ movsd(dst, xmm0);
1229 // No other combinations are possible.
1235 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1238 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1239 int space_needed = Deoptimizer::patch_size();
1240 if (!info()->IsStub()) {
1241 // Ensure that we have enough space after the previous lazy-bailout
1242 // instruction for patching the code here.
1243 int current_pc = masm()->pc_offset();
1244 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1245 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1246 __ Nop(padding_size);
1249 MarkLazyDeoptSite();
1253 void CodeGenerator::EnsureRelocSpaceForLazyDeopt(Handle<Code> code) {}
1257 } // namespace internal
1258 } // namespace compiler