1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 return Immediate(ToConstant(operand).ToInt32());
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
47 static int NextOffset(int* offset) {
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
67 Register base = InputRegister(NextOffset(offset));
69 return Operand(base, disp);
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84 return Operand(base, index, scale, disp);
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
100 Register index = InputRegister(NextOffset(offset));
101 ScaleFactor scale = ScaleFor(kMode_M1, mode);
103 return Operand(index, scale, disp);
109 Register index = InputRegister(NextOffset(offset));
110 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
111 int32_t disp = InputInt32(NextOffset(offset));
112 return Operand(index, scale, disp);
116 return Operand(no_reg, 0);
119 return Operand(no_reg, 0);
122 Operand MemoryOperand(int first_input = 0) {
123 return MemoryOperand(&first_input);
130 bool HasImmediateInput(Instruction* instr, int index) {
131 return instr->InputAt(index)->IsImmediate();
135 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
137 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
138 : OutOfLineCode(gen), result_(result) {}
140 void Generate() FINAL { __ xorl(result_, result_); }
143 Register const result_;
147 class OutOfLineLoadFloat FINAL : public OutOfLineCode {
149 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
150 : OutOfLineCode(gen), result_(result) {}
152 void Generate() FINAL { __ pcmpeqd(result_, result_); }
155 XMMRegister const result_;
161 #define ASSEMBLE_UNOP(asm_instr) \
163 if (instr->Output()->IsRegister()) { \
164 __ asm_instr(i.OutputRegister()); \
166 __ asm_instr(i.OutputOperand()); \
171 #define ASSEMBLE_BINOP(asm_instr) \
173 if (HasImmediateInput(instr, 1)) { \
174 if (instr->InputAt(0)->IsRegister()) { \
175 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
177 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
180 if (instr->InputAt(1)->IsRegister()) { \
181 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
183 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
189 #define ASSEMBLE_MULT(asm_instr) \
191 if (HasImmediateInput(instr, 1)) { \
192 if (instr->InputAt(0)->IsRegister()) { \
193 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
194 i.InputImmediate(1)); \
196 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
197 i.InputImmediate(1)); \
200 if (instr->InputAt(1)->IsRegister()) { \
201 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
203 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
209 #define ASSEMBLE_SHIFT(asm_instr, width) \
211 if (HasImmediateInput(instr, 1)) { \
212 if (instr->Output()->IsRegister()) { \
213 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
215 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
218 if (instr->Output()->IsRegister()) { \
219 __ asm_instr##_cl(i.OutputRegister()); \
221 __ asm_instr##_cl(i.OutputOperand()); \
227 #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
229 if (instr->InputAt(1)->IsDoubleRegister()) { \
230 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
232 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
237 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
239 CpuFeatureScope avx_scope(masm(), AVX); \
240 if (instr->InputAt(1)->IsDoubleRegister()) { \
241 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
242 i.InputDoubleRegister(1)); \
244 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
245 i.InputOperand(1)); \
250 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
252 auto result = i.OutputDoubleRegister(); \
253 auto offset = i.InputRegister(0); \
254 if (instr->InputAt(1)->IsRegister()) { \
255 __ cmpl(offset, i.InputRegister(1)); \
257 __ cmpl(offset, i.InputImmediate(1)); \
259 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
260 __ j(above_equal, ool->entry()); \
261 __ asm_instr(result, i.MemoryOperand(2)); \
262 __ bind(ool->exit()); \
266 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
268 auto result = i.OutputRegister(); \
269 auto offset = i.InputRegister(0); \
270 if (instr->InputAt(1)->IsRegister()) { \
271 __ cmpl(offset, i.InputRegister(1)); \
273 __ cmpl(offset, i.InputImmediate(1)); \
275 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
276 __ j(above_equal, ool->entry()); \
277 __ asm_instr(result, i.MemoryOperand(2)); \
278 __ bind(ool->exit()); \
282 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
284 auto offset = i.InputRegister(0); \
285 if (instr->InputAt(1)->IsRegister()) { \
286 __ cmpl(offset, i.InputRegister(1)); \
288 __ cmpl(offset, i.InputImmediate(1)); \
291 __ j(above_equal, &done, Label::kNear); \
292 __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
297 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
299 auto offset = i.InputRegister(0); \
300 if (instr->InputAt(1)->IsRegister()) { \
301 __ cmpl(offset, i.InputRegister(1)); \
303 __ cmpl(offset, i.InputImmediate(1)); \
306 __ j(above_equal, &done, Label::kNear); \
307 if (instr->InputAt(2)->IsRegister()) { \
308 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
310 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
316 // Assembles an instruction after register allocation, producing machine code.
317 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
318 X64OperandConverter i(this, instr);
320 switch (ArchOpcodeField::decode(instr->opcode())) {
321 case kArchCallCodeObject: {
322 EnsureSpaceForLazyDeopt();
323 if (HasImmediateInput(instr, 0)) {
324 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
325 __ Call(code, RelocInfo::CODE_TARGET);
327 Register reg = i.InputRegister(0);
328 int entry = Code::kHeaderSize - kHeapObjectTag;
329 __ Call(Operand(reg, entry));
331 AddSafepointAndDeopt(instr);
334 case kArchCallJSFunction: {
335 EnsureSpaceForLazyDeopt();
336 Register func = i.InputRegister(0);
337 if (FLAG_debug_code) {
338 // Check the function's context matches the context argument.
339 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
340 __ Assert(equal, kWrongFunctionContext);
342 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
343 AddSafepointAndDeopt(instr);
347 AssembleArchJump(i.InputRpo(0));
350 // don't emit code for nops.
355 case kArchStackPointer:
356 __ movq(i.OutputRegister(), rsp);
358 case kArchTruncateDoubleToI:
359 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
362 ASSEMBLE_BINOP(addl);
365 ASSEMBLE_BINOP(addq);
368 ASSEMBLE_BINOP(subl);
371 ASSEMBLE_BINOP(subq);
374 ASSEMBLE_BINOP(andl);
377 ASSEMBLE_BINOP(andq);
380 ASSEMBLE_BINOP(cmpl);
383 ASSEMBLE_BINOP(cmpq);
386 ASSEMBLE_BINOP(testl);
389 ASSEMBLE_BINOP(testq);
392 ASSEMBLE_MULT(imull);
395 ASSEMBLE_MULT(imulq);
398 if (instr->InputAt(1)->IsRegister()) {
399 __ imull(i.InputRegister(1));
401 __ imull(i.InputOperand(1));
405 if (instr->InputAt(1)->IsRegister()) {
406 __ mull(i.InputRegister(1));
408 __ mull(i.InputOperand(1));
413 __ idivl(i.InputRegister(1));
417 __ idivq(i.InputRegister(1));
421 __ divl(i.InputRegister(1));
425 __ divq(i.InputRegister(1));
446 ASSEMBLE_BINOP(xorl);
449 ASSEMBLE_BINOP(xorq);
452 ASSEMBLE_SHIFT(shll, 5);
455 ASSEMBLE_SHIFT(shlq, 6);
458 ASSEMBLE_SHIFT(shrl, 5);
461 ASSEMBLE_SHIFT(shrq, 6);
464 ASSEMBLE_SHIFT(sarl, 5);
467 ASSEMBLE_SHIFT(sarq, 6);
470 ASSEMBLE_SHIFT(rorl, 5);
473 ASSEMBLE_SHIFT(rorq, 6);
476 ASSEMBLE_DOUBLE_BINOP(ucomisd);
479 ASSEMBLE_DOUBLE_BINOP(addsd);
482 ASSEMBLE_DOUBLE_BINOP(subsd);
485 ASSEMBLE_DOUBLE_BINOP(mulsd);
488 ASSEMBLE_DOUBLE_BINOP(divsd);
490 case kSSEFloat64Mod: {
491 __ subq(rsp, Immediate(kDoubleSize));
492 // Move values to st(0) and st(1).
493 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
494 __ fld_d(Operand(rsp, 0));
495 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
496 __ fld_d(Operand(rsp, 0));
497 // Loop while fprem isn't done.
500 // This instructions traps on all kinds inputs, but we are assuming the
501 // floating point control word is set to ignore them all.
503 // The following 2 instruction implicitly use rax.
505 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
508 __ shrl(rax, Immediate(8));
509 __ andl(rax, Immediate(0xFF));
513 __ j(parity_even, &mod_loop);
514 // Move output to stack and clean up.
516 __ fstp_d(Operand(rsp, 0));
517 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
518 __ addq(rsp, Immediate(kDoubleSize));
521 case kSSEFloat64Sqrt:
522 if (instr->InputAt(0)->IsDoubleRegister()) {
523 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
525 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
528 case kSSEFloat64Floor: {
529 CpuFeatureScope sse_scope(masm(), SSE4_1);
530 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
531 v8::internal::Assembler::kRoundDown);
534 case kSSEFloat64Ceil: {
535 CpuFeatureScope sse_scope(masm(), SSE4_1);
536 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
537 v8::internal::Assembler::kRoundUp);
540 case kSSEFloat64RoundTruncate: {
541 CpuFeatureScope sse_scope(masm(), SSE4_1);
542 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
543 v8::internal::Assembler::kRoundToZero);
547 if (instr->InputAt(0)->IsDoubleRegister()) {
548 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
550 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
554 if (instr->InputAt(0)->IsDoubleRegister()) {
555 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
557 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
560 case kSSEFloat64ToInt32:
561 if (instr->InputAt(0)->IsDoubleRegister()) {
562 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
564 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
567 case kSSEFloat64ToUint32: {
568 if (instr->InputAt(0)->IsDoubleRegister()) {
569 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
571 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
573 __ AssertZeroExtended(i.OutputRegister());
576 case kSSEInt32ToFloat64:
577 if (instr->InputAt(0)->IsRegister()) {
578 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
580 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
583 case kSSEUint32ToFloat64:
584 if (instr->InputAt(0)->IsRegister()) {
585 __ movl(kScratchRegister, i.InputRegister(0));
587 __ movl(kScratchRegister, i.InputOperand(0));
589 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
592 ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
595 ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
598 ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
601 ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
604 if (instr->addressing_mode() != kMode_None) {
605 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
606 } else if (instr->InputAt(0)->IsRegister()) {
607 __ movsxbl(i.OutputRegister(), i.InputRegister(0));
609 __ movsxbl(i.OutputRegister(), i.InputOperand(0));
611 __ AssertZeroExtended(i.OutputRegister());
614 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
618 Operand operand = i.MemoryOperand(&index);
619 if (HasImmediateInput(instr, index)) {
620 __ movb(operand, Immediate(i.InputInt8(index)));
622 __ movb(operand, i.InputRegister(index));
627 if (instr->addressing_mode() != kMode_None) {
628 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
629 } else if (instr->InputAt(0)->IsRegister()) {
630 __ movsxwl(i.OutputRegister(), i.InputRegister(0));
632 __ movsxwl(i.OutputRegister(), i.InputOperand(0));
634 __ AssertZeroExtended(i.OutputRegister());
637 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
638 __ AssertZeroExtended(i.OutputRegister());
642 Operand operand = i.MemoryOperand(&index);
643 if (HasImmediateInput(instr, index)) {
644 __ movw(operand, Immediate(i.InputInt16(index)));
646 __ movw(operand, i.InputRegister(index));
651 if (instr->HasOutput()) {
652 if (instr->addressing_mode() == kMode_None) {
653 if (instr->InputAt(0)->IsRegister()) {
654 __ movl(i.OutputRegister(), i.InputRegister(0));
656 __ movl(i.OutputRegister(), i.InputOperand(0));
659 __ movl(i.OutputRegister(), i.MemoryOperand());
661 __ AssertZeroExtended(i.OutputRegister());
664 Operand operand = i.MemoryOperand(&index);
665 if (HasImmediateInput(instr, index)) {
666 __ movl(operand, i.InputImmediate(index));
668 __ movl(operand, i.InputRegister(index));
673 if (instr->InputAt(0)->IsRegister()) {
674 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
676 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
681 if (instr->HasOutput()) {
682 __ movq(i.OutputRegister(), i.MemoryOperand());
685 Operand operand = i.MemoryOperand(&index);
686 if (HasImmediateInput(instr, index)) {
687 __ movq(operand, i.InputImmediate(index));
689 __ movq(operand, i.InputRegister(index));
694 if (instr->HasOutput()) {
695 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
698 Operand operand = i.MemoryOperand(&index);
699 __ movss(operand, i.InputDoubleRegister(index));
703 if (instr->HasOutput()) {
704 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
707 Operand operand = i.MemoryOperand(&index);
708 __ movsd(operand, i.InputDoubleRegister(index));
712 AddressingMode mode = AddressingModeField::decode(instr->opcode());
713 // Shorten "leal" to "addl" or "subl" if the register allocation just
714 // happens to work out for operations with immediate operands where the
715 // non-constant input register is the same as output register. The
716 // "addl"/"subl" forms in these cases are faster based on empirical
718 if (mode == kMode_MRI && i.InputRegister(0).is(i.OutputRegister())) {
719 int32_t constant_summand = i.InputInt32(1);
720 if (constant_summand > 0) {
721 __ addl(i.OutputRegister(), Immediate(constant_summand));
722 } else if (constant_summand < 0) {
723 __ subl(i.OutputRegister(), Immediate(-constant_summand));
726 __ leal(i.OutputRegister(), i.MemoryOperand());
728 __ AssertZeroExtended(i.OutputRegister());
732 __ leaq(i.OutputRegister(), i.MemoryOperand());
735 __ decl(i.OutputRegister());
738 __ incl(i.OutputRegister());
741 if (HasImmediateInput(instr, 0)) {
742 __ pushq(i.InputImmediate(0));
744 if (instr->InputAt(0)->IsRegister()) {
745 __ pushq(i.InputRegister(0));
747 __ pushq(i.InputOperand(0));
751 case kX64StoreWriteBarrier: {
752 Register object = i.InputRegister(0);
753 Register index = i.InputRegister(1);
754 Register value = i.InputRegister(2);
755 __ movsxlq(index, index);
756 __ movq(Operand(object, index, times_1, 0), value);
757 __ leaq(index, Operand(object, index, times_1, 0));
758 SaveFPRegsMode mode =
759 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
760 __ RecordWrite(object, index, value, mode);
763 case kCheckedLoadInt8:
764 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
766 case kCheckedLoadUint8:
767 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
769 case kCheckedLoadInt16:
770 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
772 case kCheckedLoadUint16:
773 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
775 case kCheckedLoadWord32:
776 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
778 case kCheckedLoadFloat32:
779 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
781 case kCheckedLoadFloat64:
782 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
784 case kCheckedStoreWord8:
785 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
787 case kCheckedStoreWord16:
788 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
790 case kCheckedStoreWord32:
791 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
793 case kCheckedStoreFloat32:
794 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
796 case kCheckedStoreFloat64:
797 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
803 // Assembles branches after this instruction.
804 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
805 X64OperandConverter i(this, instr);
806 Label::Distance flabel_distance =
807 branch->fallthru ? Label::kNear : Label::kFar;
808 Label* tlabel = branch->true_label;
809 Label* flabel = branch->false_label;
810 switch (branch->condition) {
811 case kUnorderedEqual:
812 __ j(parity_even, flabel, flabel_distance);
817 case kUnorderedNotEqual:
818 __ j(parity_even, tlabel);
821 __ j(not_equal, tlabel);
823 case kSignedLessThan:
826 case kSignedGreaterThanOrEqual:
827 __ j(greater_equal, tlabel);
829 case kSignedLessThanOrEqual:
830 __ j(less_equal, tlabel);
832 case kSignedGreaterThan:
833 __ j(greater, tlabel);
835 case kUnorderedLessThan:
836 __ j(parity_even, flabel, flabel_distance);
838 case kUnsignedLessThan:
841 case kUnorderedGreaterThanOrEqual:
842 __ j(parity_even, tlabel);
844 case kUnsignedGreaterThanOrEqual:
845 __ j(above_equal, tlabel);
847 case kUnorderedLessThanOrEqual:
848 __ j(parity_even, flabel, flabel_distance);
850 case kUnsignedLessThanOrEqual:
851 __ j(below_equal, tlabel);
853 case kUnorderedGreaterThan:
854 __ j(parity_even, tlabel);
856 case kUnsignedGreaterThan:
860 __ j(overflow, tlabel);
863 __ j(no_overflow, tlabel);
866 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
870 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
871 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
875 // Assembles boolean materializations after this instruction.
876 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
877 FlagsCondition condition) {
878 X64OperandConverter i(this, instr);
881 // Materialize a full 64-bit 1 or 0 value. The result register is always the
882 // last output of the instruction.
884 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
885 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
886 Condition cc = no_condition;
888 case kUnorderedEqual:
889 __ j(parity_odd, &check, Label::kNear);
890 __ movl(reg, Immediate(0));
891 __ jmp(&done, Label::kNear);
896 case kUnorderedNotEqual:
897 __ j(parity_odd, &check, Label::kNear);
898 __ movl(reg, Immediate(1));
899 __ jmp(&done, Label::kNear);
904 case kSignedLessThan:
907 case kSignedGreaterThanOrEqual:
910 case kSignedLessThanOrEqual:
913 case kSignedGreaterThan:
916 case kUnorderedLessThan:
917 __ j(parity_odd, &check, Label::kNear);
918 __ movl(reg, Immediate(0));
919 __ jmp(&done, Label::kNear);
921 case kUnsignedLessThan:
924 case kUnorderedGreaterThanOrEqual:
925 __ j(parity_odd, &check, Label::kNear);
926 __ movl(reg, Immediate(1));
927 __ jmp(&done, Label::kNear);
929 case kUnsignedGreaterThanOrEqual:
932 case kUnorderedLessThanOrEqual:
933 __ j(parity_odd, &check, Label::kNear);
934 __ movl(reg, Immediate(0));
935 __ jmp(&done, Label::kNear);
937 case kUnsignedLessThanOrEqual:
940 case kUnorderedGreaterThan:
941 __ j(parity_odd, &check, Label::kNear);
942 __ movl(reg, Immediate(1));
943 __ jmp(&done, Label::kNear);
945 case kUnsignedGreaterThan:
957 __ movzxbl(reg, reg);
962 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
963 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
964 isolate(), deoptimization_id, Deoptimizer::LAZY);
965 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
969 void CodeGenerator::AssemblePrologue() {
970 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
971 int stack_slots = frame()->GetSpillSlotCount();
972 if (descriptor->kind() == CallDescriptor::kCallAddress) {
975 const RegList saves = descriptor->CalleeSavedRegisters();
976 if (saves != 0) { // Save callee-saved registers.
977 int register_save_area_size = 0;
978 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
979 if (!((1 << i) & saves)) continue;
980 __ pushq(Register::from_code(i));
981 register_save_area_size += kPointerSize;
983 frame()->SetRegisterSaveAreaSize(register_save_area_size);
985 } else if (descriptor->IsJSFunctionCall()) {
986 CompilationInfo* info = this->info();
987 __ Prologue(info->IsCodePreAgingActive());
988 frame()->SetRegisterSaveAreaSize(
989 StandardFrameConstants::kFixedFrameSizeFromFp);
992 frame()->SetRegisterSaveAreaSize(
993 StandardFrameConstants::kFixedFrameSizeFromFp);
995 if (stack_slots > 0) {
996 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1001 void CodeGenerator::AssembleReturn() {
1002 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1003 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1004 if (frame()->GetRegisterSaveAreaSize() > 0) {
1005 // Remove this frame's spill slots first.
1006 int stack_slots = frame()->GetSpillSlotCount();
1007 if (stack_slots > 0) {
1008 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1010 const RegList saves = descriptor->CalleeSavedRegisters();
1011 // Restore registers.
1013 for (int i = 0; i < Register::kNumRegisters; i++) {
1014 if (!((1 << i) & saves)) continue;
1015 __ popq(Register::from_code(i));
1018 __ popq(rbp); // Pop caller's frame pointer.
1021 // No saved registers.
1022 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1023 __ popq(rbp); // Pop caller's frame pointer.
1027 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1028 __ popq(rbp); // Pop caller's frame pointer.
1029 int pop_count = descriptor->IsJSFunctionCall()
1030 ? static_cast<int>(descriptor->JSParameterCount())
1032 __ ret(pop_count * kPointerSize);
1037 void CodeGenerator::AssembleMove(InstructionOperand* source,
1038 InstructionOperand* destination) {
1039 X64OperandConverter g(this, NULL);
1040 // Dispatch on the source and destination operand kinds. Not all
1041 // combinations are possible.
1042 if (source->IsRegister()) {
1043 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1044 Register src = g.ToRegister(source);
1045 if (destination->IsRegister()) {
1046 __ movq(g.ToRegister(destination), src);
1048 __ movq(g.ToOperand(destination), src);
1050 } else if (source->IsStackSlot()) {
1051 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1052 Operand src = g.ToOperand(source);
1053 if (destination->IsRegister()) {
1054 Register dst = g.ToRegister(destination);
1057 // Spill on demand to use a temporary register for memory-to-memory
1059 Register tmp = kScratchRegister;
1060 Operand dst = g.ToOperand(destination);
1064 } else if (source->IsConstant()) {
1065 ConstantOperand* constant_source = ConstantOperand::cast(source);
1066 Constant src = g.ToConstant(constant_source);
1067 if (destination->IsRegister() || destination->IsStackSlot()) {
1068 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1070 switch (src.type()) {
1071 case Constant::kInt32:
1072 // TODO(dcarney): don't need scratch in this case.
1073 __ Set(dst, src.ToInt32());
1075 case Constant::kInt64:
1076 __ Set(dst, src.ToInt64());
1078 case Constant::kFloat32:
1080 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1082 case Constant::kFloat64:
1084 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1086 case Constant::kExternalReference:
1087 __ Move(dst, src.ToExternalReference());
1089 case Constant::kHeapObject:
1090 __ Move(dst, src.ToHeapObject());
1092 case Constant::kRpoNumber:
1093 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1096 if (destination->IsStackSlot()) {
1097 __ movq(g.ToOperand(destination), kScratchRegister);
1099 } else if (src.type() == Constant::kFloat32) {
1100 // TODO(turbofan): Can we do better here?
1101 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1102 if (destination->IsDoubleRegister()) {
1103 __ Move(g.ToDoubleRegister(destination), src_const);
1105 DCHECK(destination->IsDoubleStackSlot());
1106 Operand dst = g.ToOperand(destination);
1107 __ movl(dst, Immediate(src_const));
1110 DCHECK_EQ(Constant::kFloat64, src.type());
1111 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1112 if (destination->IsDoubleRegister()) {
1113 __ Move(g.ToDoubleRegister(destination), src_const);
1115 DCHECK(destination->IsDoubleStackSlot());
1116 __ movq(kScratchRegister, src_const);
1117 __ movq(g.ToOperand(destination), kScratchRegister);
1120 } else if (source->IsDoubleRegister()) {
1121 XMMRegister src = g.ToDoubleRegister(source);
1122 if (destination->IsDoubleRegister()) {
1123 XMMRegister dst = g.ToDoubleRegister(destination);
1126 DCHECK(destination->IsDoubleStackSlot());
1127 Operand dst = g.ToOperand(destination);
1130 } else if (source->IsDoubleStackSlot()) {
1131 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1132 Operand src = g.ToOperand(source);
1133 if (destination->IsDoubleRegister()) {
1134 XMMRegister dst = g.ToDoubleRegister(destination);
1137 // We rely on having xmm0 available as a fixed scratch register.
1138 Operand dst = g.ToOperand(destination);
1139 __ movsd(xmm0, src);
1140 __ movsd(dst, xmm0);
1148 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1149 InstructionOperand* destination) {
1150 X64OperandConverter g(this, NULL);
1151 // Dispatch on the source and destination operand kinds. Not all
1152 // combinations are possible.
1153 if (source->IsRegister() && destination->IsRegister()) {
1154 // Register-register.
1155 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1156 } else if (source->IsRegister() && destination->IsStackSlot()) {
1157 Register src = g.ToRegister(source);
1158 Operand dst = g.ToOperand(destination);
1160 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1161 (source->IsDoubleStackSlot() &&
1162 destination->IsDoubleStackSlot())) {
1164 Register tmp = kScratchRegister;
1165 Operand src = g.ToOperand(source);
1166 Operand dst = g.ToOperand(destination);
1170 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1171 // XMM register-register swap. We rely on having xmm0
1172 // available as a fixed scratch register.
1173 XMMRegister src = g.ToDoubleRegister(source);
1174 XMMRegister dst = g.ToDoubleRegister(destination);
1175 __ movsd(xmm0, src);
1177 __ movsd(dst, xmm0);
1178 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1179 // XMM register-memory swap. We rely on having xmm0
1180 // available as a fixed scratch register.
1181 XMMRegister src = g.ToDoubleRegister(source);
1182 Operand dst = g.ToOperand(destination);
1183 __ movsd(xmm0, src);
1185 __ movsd(dst, xmm0);
1187 // No other combinations are possible.
1193 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1196 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1197 int space_needed = Deoptimizer::patch_size();
1198 if (!info()->IsStub()) {
1199 // Ensure that we have enough space after the previous lazy-bailout
1200 // instruction for patching the code here.
1201 int current_pc = masm()->pc_offset();
1202 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1203 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1204 __ Nop(padding_size);
1207 MarkLazyDeoptSite();
1212 } // namespace internal
1213 } // namespace compiler