1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
28 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
36 Immediate ToImmediate(InstructionOperand* operand) {
37 return Immediate(ToConstant(operand).ToInt32());
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
47 static int NextOffset(int* offset) {
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
67 Register base = InputRegister(NextOffset(offset));
69 return Operand(base, disp);
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84 return Operand(base, index, scale, disp);
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
97 Register base = InputRegister(NextOffset(offset));
99 return Operand(base, disp);
102 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
103 return Operand(no_reg, 0);
106 Register index = InputRegister(NextOffset(offset));
107 ScaleFactor scale = ScaleFor(kMode_M1, mode);
109 return Operand(index, scale, disp);
115 Register index = InputRegister(NextOffset(offset));
116 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
117 int32_t disp = InputInt32(NextOffset(offset));
118 return Operand(index, scale, disp);
122 return Operand(no_reg, 0);
125 return Operand(no_reg, 0);
128 Operand MemoryOperand(int first_input = 0) {
129 return MemoryOperand(&first_input);
136 bool HasImmediateInput(Instruction* instr, int index) {
137 return instr->InputAt(index)->IsImmediate();
141 class OutOfLineLoadZero FINAL : public OutOfLineCode {
143 OutOfLineLoadZero(CodeGenerator* gen, Register result)
144 : OutOfLineCode(gen), result_(result) {}
146 void Generate() FINAL { __ xorl(result_, result_); }
149 Register const result_;
153 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
155 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
156 : OutOfLineCode(gen), result_(result) {}
158 void Generate() FINAL { __ pcmpeqd(result_, result_); }
161 XMMRegister const result_;
165 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
167 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
169 : OutOfLineCode(gen), result_(result), input_(input) {}
171 void Generate() FINAL {
172 __ subp(rsp, Immediate(kDoubleSize));
173 __ movsd(MemOperand(rsp, 0), input_);
174 __ SlowTruncateToI(result_, rsp, 0);
175 __ addp(rsp, Immediate(kDoubleSize));
179 Register const result_;
180 XMMRegister const input_;
186 #define ASSEMBLE_UNOP(asm_instr) \
188 if (instr->Output()->IsRegister()) { \
189 __ asm_instr(i.OutputRegister()); \
191 __ asm_instr(i.OutputOperand()); \
196 #define ASSEMBLE_BINOP(asm_instr) \
198 if (HasImmediateInput(instr, 1)) { \
199 if (instr->InputAt(0)->IsRegister()) { \
200 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
202 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
205 if (instr->InputAt(1)->IsRegister()) { \
206 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
208 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
214 #define ASSEMBLE_MULT(asm_instr) \
216 if (HasImmediateInput(instr, 1)) { \
217 if (instr->InputAt(0)->IsRegister()) { \
218 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
219 i.InputImmediate(1)); \
221 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
222 i.InputImmediate(1)); \
225 if (instr->InputAt(1)->IsRegister()) { \
226 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
228 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
234 #define ASSEMBLE_SHIFT(asm_instr, width) \
236 if (HasImmediateInput(instr, 1)) { \
237 if (instr->Output()->IsRegister()) { \
238 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
240 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
243 if (instr->Output()->IsRegister()) { \
244 __ asm_instr##_cl(i.OutputRegister()); \
246 __ asm_instr##_cl(i.OutputOperand()); \
252 #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
254 if (instr->InputAt(1)->IsDoubleRegister()) { \
255 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
257 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
262 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
264 CpuFeatureScope avx_scope(masm(), AVX); \
265 if (instr->InputAt(1)->IsDoubleRegister()) { \
266 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
267 i.InputDoubleRegister(1)); \
269 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
270 i.InputOperand(1)); \
275 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
277 auto result = i.OutputDoubleRegister(); \
278 auto buffer = i.InputRegister(0); \
279 auto index1 = i.InputRegister(1); \
280 auto index2 = i.InputInt32(2); \
281 OutOfLineCode* ool; \
282 if (instr->InputAt(3)->IsRegister()) { \
283 auto length = i.InputRegister(3); \
284 DCHECK_EQ(0, index2); \
285 __ cmpl(index1, length); \
286 ool = new (zone()) OutOfLineLoadNaN(this, result); \
288 auto length = i.InputInt32(3); \
289 DCHECK_LE(index2, length); \
290 __ cmpq(index1, Immediate(length - index2)); \
291 class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
293 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
294 Register buffer, Register index1, int32_t index2, \
296 : OutOfLineCode(gen), \
303 void Generate() FINAL { \
304 __ leal(kScratchRegister, Operand(index1_, index2_)); \
305 __ pcmpeqd(result_, result_); \
306 __ cmpl(kScratchRegister, Immediate(length_)); \
307 __ j(above_equal, exit()); \
308 __ asm_instr(result_, \
309 Operand(buffer_, kScratchRegister, times_1, 0)); \
313 XMMRegister const result_; \
314 Register const buffer_; \
315 Register const index1_; \
316 int32_t const index2_; \
317 int32_t const length_; \
320 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
322 __ j(above_equal, ool->entry()); \
323 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
324 __ bind(ool->exit()); \
328 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
330 auto result = i.OutputRegister(); \
331 auto buffer = i.InputRegister(0); \
332 auto index1 = i.InputRegister(1); \
333 auto index2 = i.InputInt32(2); \
334 OutOfLineCode* ool; \
335 if (instr->InputAt(3)->IsRegister()) { \
336 auto length = i.InputRegister(3); \
337 DCHECK_EQ(0, index2); \
338 __ cmpl(index1, length); \
339 ool = new (zone()) OutOfLineLoadZero(this, result); \
341 auto length = i.InputInt32(3); \
342 DCHECK_LE(index2, length); \
343 __ cmpq(index1, Immediate(length - index2)); \
344 class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
346 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
347 Register buffer, Register index1, int32_t index2, \
349 : OutOfLineCode(gen), \
356 void Generate() FINAL { \
357 __ leal(kScratchRegister, Operand(index1_, index2_)); \
358 __ xorl(result_, result_); \
359 __ cmpl(kScratchRegister, Immediate(length_)); \
360 __ j(above_equal, exit()); \
361 __ asm_instr(result_, \
362 Operand(buffer_, kScratchRegister, times_1, 0)); \
366 Register const result_; \
367 Register const buffer_; \
368 Register const index1_; \
369 int32_t const index2_; \
370 int32_t const length_; \
373 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
375 __ j(above_equal, ool->entry()); \
376 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
377 __ bind(ool->exit()); \
381 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
383 auto buffer = i.InputRegister(0); \
384 auto index1 = i.InputRegister(1); \
385 auto index2 = i.InputInt32(2); \
386 auto value = i.InputDoubleRegister(4); \
387 if (instr->InputAt(3)->IsRegister()) { \
388 auto length = i.InputRegister(3); \
389 DCHECK_EQ(0, index2); \
391 __ cmpl(index1, length); \
392 __ j(above_equal, &done, Label::kNear); \
393 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
396 auto length = i.InputInt32(3); \
397 DCHECK_LE(index2, length); \
398 __ cmpq(index1, Immediate(length - index2)); \
399 class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
401 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
402 Register index1, int32_t index2, int32_t length, \
404 : OutOfLineCode(gen), \
411 void Generate() FINAL { \
412 __ leal(kScratchRegister, Operand(index1_, index2_)); \
413 __ cmpl(kScratchRegister, Immediate(length_)); \
414 __ j(above_equal, exit()); \
415 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
420 Register const buffer_; \
421 Register const index1_; \
422 int32_t const index2_; \
423 int32_t const length_; \
424 XMMRegister const value_; \
426 auto ool = new (zone()) \
427 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
428 __ j(above_equal, ool->entry()); \
429 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
430 __ bind(ool->exit()); \
435 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
437 auto buffer = i.InputRegister(0); \
438 auto index1 = i.InputRegister(1); \
439 auto index2 = i.InputInt32(2); \
440 if (instr->InputAt(3)->IsRegister()) { \
441 auto length = i.InputRegister(3); \
442 DCHECK_EQ(0, index2); \
444 __ cmpl(index1, length); \
445 __ j(above_equal, &done, Label::kNear); \
446 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
449 auto length = i.InputInt32(3); \
450 DCHECK_LE(index2, length); \
451 __ cmpq(index1, Immediate(length - index2)); \
452 class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
454 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
455 Register index1, int32_t index2, int32_t length, \
457 : OutOfLineCode(gen), \
464 void Generate() FINAL { \
465 __ leal(kScratchRegister, Operand(index1_, index2_)); \
466 __ cmpl(kScratchRegister, Immediate(length_)); \
467 __ j(above_equal, exit()); \
468 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
473 Register const buffer_; \
474 Register const index1_; \
475 int32_t const index2_; \
476 int32_t const length_; \
477 Value const value_; \
479 auto ool = new (zone()) \
480 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
481 __ j(above_equal, ool->entry()); \
482 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
483 __ bind(ool->exit()); \
488 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
490 if (instr->InputAt(4)->IsRegister()) { \
491 Register value = i.InputRegister(4); \
492 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
494 Immediate value = i.InputImmediate(4); \
495 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
500 // Assembles an instruction after register allocation, producing machine code.
501 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
502 X64OperandConverter i(this, instr);
504 switch (ArchOpcodeField::decode(instr->opcode())) {
505 case kArchCallCodeObject: {
506 EnsureSpaceForLazyDeopt();
507 if (HasImmediateInput(instr, 0)) {
508 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
509 __ Call(code, RelocInfo::CODE_TARGET);
511 Register reg = i.InputRegister(0);
512 int entry = Code::kHeaderSize - kHeapObjectTag;
513 __ Call(Operand(reg, entry));
515 AddSafepointAndDeopt(instr);
518 case kArchCallJSFunction: {
519 EnsureSpaceForLazyDeopt();
520 Register func = i.InputRegister(0);
521 if (FLAG_debug_code) {
522 // Check the function's context matches the context argument.
523 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
524 __ Assert(equal, kWrongFunctionContext);
526 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
527 AddSafepointAndDeopt(instr);
531 AssembleArchJump(i.InputRpo(0));
534 // don't emit code for nops.
539 case kArchStackPointer:
540 __ movq(i.OutputRegister(), rsp);
542 case kArchTruncateDoubleToI: {
543 auto result = i.OutputRegister();
544 auto input = i.InputDoubleRegister(0);
545 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
546 __ cvttsd2siq(result, input);
547 __ cmpq(result, Immediate(1));
548 __ j(overflow, ool->entry());
549 __ bind(ool->exit());
553 ASSEMBLE_BINOP(addl);
556 ASSEMBLE_BINOP(addq);
559 ASSEMBLE_BINOP(subl);
562 ASSEMBLE_BINOP(subq);
565 ASSEMBLE_BINOP(andl);
568 ASSEMBLE_BINOP(andq);
571 ASSEMBLE_BINOP(cmpl);
574 ASSEMBLE_BINOP(cmpq);
577 ASSEMBLE_BINOP(testl);
580 ASSEMBLE_BINOP(testq);
583 ASSEMBLE_MULT(imull);
586 ASSEMBLE_MULT(imulq);
589 if (instr->InputAt(1)->IsRegister()) {
590 __ imull(i.InputRegister(1));
592 __ imull(i.InputOperand(1));
596 if (instr->InputAt(1)->IsRegister()) {
597 __ mull(i.InputRegister(1));
599 __ mull(i.InputOperand(1));
604 __ idivl(i.InputRegister(1));
608 __ idivq(i.InputRegister(1));
612 __ divl(i.InputRegister(1));
616 __ divq(i.InputRegister(1));
637 ASSEMBLE_BINOP(xorl);
640 ASSEMBLE_BINOP(xorq);
643 ASSEMBLE_SHIFT(shll, 5);
646 ASSEMBLE_SHIFT(shlq, 6);
649 ASSEMBLE_SHIFT(shrl, 5);
652 ASSEMBLE_SHIFT(shrq, 6);
655 ASSEMBLE_SHIFT(sarl, 5);
658 ASSEMBLE_SHIFT(sarq, 6);
661 ASSEMBLE_SHIFT(rorl, 5);
664 ASSEMBLE_SHIFT(rorq, 6);
667 ASSEMBLE_DOUBLE_BINOP(ucomisd);
670 ASSEMBLE_DOUBLE_BINOP(addsd);
673 ASSEMBLE_DOUBLE_BINOP(subsd);
676 ASSEMBLE_DOUBLE_BINOP(mulsd);
679 ASSEMBLE_DOUBLE_BINOP(divsd);
681 case kSSEFloat64Mod: {
682 __ subq(rsp, Immediate(kDoubleSize));
683 // Move values to st(0) and st(1).
684 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
685 __ fld_d(Operand(rsp, 0));
686 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
687 __ fld_d(Operand(rsp, 0));
688 // Loop while fprem isn't done.
691 // This instructions traps on all kinds inputs, but we are assuming the
692 // floating point control word is set to ignore them all.
694 // The following 2 instruction implicitly use rax.
696 if (CpuFeatures::IsSupported(SAHF)) {
697 CpuFeatureScope sahf_scope(masm(), SAHF);
700 __ shrl(rax, Immediate(8));
701 __ andl(rax, Immediate(0xFF));
705 __ j(parity_even, &mod_loop);
706 // Move output to stack and clean up.
708 __ fstp_d(Operand(rsp, 0));
709 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
710 __ addq(rsp, Immediate(kDoubleSize));
713 case kSSEFloat64Sqrt:
714 if (instr->InputAt(0)->IsDoubleRegister()) {
715 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
717 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
720 case kSSEFloat64Floor: {
721 CpuFeatureScope sse_scope(masm(), SSE4_1);
722 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
723 v8::internal::Assembler::kRoundDown);
726 case kSSEFloat64Ceil: {
727 CpuFeatureScope sse_scope(masm(), SSE4_1);
728 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
729 v8::internal::Assembler::kRoundUp);
732 case kSSEFloat64RoundTruncate: {
733 CpuFeatureScope sse_scope(masm(), SSE4_1);
734 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
735 v8::internal::Assembler::kRoundToZero);
739 if (instr->InputAt(0)->IsDoubleRegister()) {
740 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
742 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
746 if (instr->InputAt(0)->IsDoubleRegister()) {
747 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
749 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
752 case kSSEFloat64ToInt32:
753 if (instr->InputAt(0)->IsDoubleRegister()) {
754 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
756 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
759 case kSSEFloat64ToUint32: {
760 if (instr->InputAt(0)->IsDoubleRegister()) {
761 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
763 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
765 __ AssertZeroExtended(i.OutputRegister());
768 case kSSEInt32ToFloat64:
769 if (instr->InputAt(0)->IsRegister()) {
770 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
772 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
775 case kSSEUint32ToFloat64:
776 if (instr->InputAt(0)->IsRegister()) {
777 __ movl(kScratchRegister, i.InputRegister(0));
779 __ movl(kScratchRegister, i.InputOperand(0));
781 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
784 ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
787 ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
790 ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
793 ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
796 if (instr->addressing_mode() != kMode_None) {
797 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
798 } else if (instr->InputAt(0)->IsRegister()) {
799 __ movsxbl(i.OutputRegister(), i.InputRegister(0));
801 __ movsxbl(i.OutputRegister(), i.InputOperand(0));
803 __ AssertZeroExtended(i.OutputRegister());
806 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
810 Operand operand = i.MemoryOperand(&index);
811 if (HasImmediateInput(instr, index)) {
812 __ movb(operand, Immediate(i.InputInt8(index)));
814 __ movb(operand, i.InputRegister(index));
819 if (instr->addressing_mode() != kMode_None) {
820 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
821 } else if (instr->InputAt(0)->IsRegister()) {
822 __ movsxwl(i.OutputRegister(), i.InputRegister(0));
824 __ movsxwl(i.OutputRegister(), i.InputOperand(0));
826 __ AssertZeroExtended(i.OutputRegister());
829 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
830 __ AssertZeroExtended(i.OutputRegister());
834 Operand operand = i.MemoryOperand(&index);
835 if (HasImmediateInput(instr, index)) {
836 __ movw(operand, Immediate(i.InputInt16(index)));
838 __ movw(operand, i.InputRegister(index));
843 if (instr->HasOutput()) {
844 if (instr->addressing_mode() == kMode_None) {
845 if (instr->InputAt(0)->IsRegister()) {
846 __ movl(i.OutputRegister(), i.InputRegister(0));
848 __ movl(i.OutputRegister(), i.InputOperand(0));
851 __ movl(i.OutputRegister(), i.MemoryOperand());
853 __ AssertZeroExtended(i.OutputRegister());
856 Operand operand = i.MemoryOperand(&index);
857 if (HasImmediateInput(instr, index)) {
858 __ movl(operand, i.InputImmediate(index));
860 __ movl(operand, i.InputRegister(index));
865 if (instr->InputAt(0)->IsRegister()) {
866 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
868 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
873 if (instr->HasOutput()) {
874 __ movq(i.OutputRegister(), i.MemoryOperand());
877 Operand operand = i.MemoryOperand(&index);
878 if (HasImmediateInput(instr, index)) {
879 __ movq(operand, i.InputImmediate(index));
881 __ movq(operand, i.InputRegister(index));
886 if (instr->HasOutput()) {
887 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
890 Operand operand = i.MemoryOperand(&index);
891 __ movss(operand, i.InputDoubleRegister(index));
895 if (instr->HasOutput()) {
896 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
899 Operand operand = i.MemoryOperand(&index);
900 __ movsd(operand, i.InputDoubleRegister(index));
904 AddressingMode mode = AddressingModeField::decode(instr->opcode());
905 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
906 // and addressing mode just happens to work out. The "addl"/"subl" forms
907 // in these cases are faster based on measurements.
908 if (i.InputRegister(0).is(i.OutputRegister())) {
909 if (mode == kMode_MRI) {
910 int32_t constant_summand = i.InputInt32(1);
911 if (constant_summand > 0) {
912 __ addl(i.OutputRegister(), Immediate(constant_summand));
913 } else if (constant_summand < 0) {
914 __ subl(i.OutputRegister(), Immediate(-constant_summand));
916 } else if (mode == kMode_MR1) {
917 if (i.InputRegister(1).is(i.OutputRegister())) {
918 __ shll(i.OutputRegister(), Immediate(1));
920 __ leal(i.OutputRegister(), i.MemoryOperand());
922 } else if (mode == kMode_M2) {
923 __ shll(i.OutputRegister(), Immediate(1));
924 } else if (mode == kMode_M4) {
925 __ shll(i.OutputRegister(), Immediate(2));
926 } else if (mode == kMode_M8) {
927 __ shll(i.OutputRegister(), Immediate(3));
929 __ leal(i.OutputRegister(), i.MemoryOperand());
932 __ leal(i.OutputRegister(), i.MemoryOperand());
934 __ AssertZeroExtended(i.OutputRegister());
938 __ leaq(i.OutputRegister(), i.MemoryOperand());
941 __ decl(i.OutputRegister());
944 __ incl(i.OutputRegister());
947 if (HasImmediateInput(instr, 0)) {
948 __ pushq(i.InputImmediate(0));
950 if (instr->InputAt(0)->IsRegister()) {
951 __ pushq(i.InputRegister(0));
953 __ pushq(i.InputOperand(0));
957 case kX64StoreWriteBarrier: {
958 Register object = i.InputRegister(0);
959 Register index = i.InputRegister(1);
960 Register value = i.InputRegister(2);
961 __ movsxlq(index, index);
962 __ movq(Operand(object, index, times_1, 0), value);
963 __ leaq(index, Operand(object, index, times_1, 0));
964 SaveFPRegsMode mode =
965 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
966 __ RecordWrite(object, index, value, mode);
969 case kCheckedLoadInt8:
970 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
972 case kCheckedLoadUint8:
973 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
975 case kCheckedLoadInt16:
976 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
978 case kCheckedLoadUint16:
979 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
981 case kCheckedLoadWord32:
982 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
984 case kCheckedLoadFloat32:
985 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
987 case kCheckedLoadFloat64:
988 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
990 case kCheckedStoreWord8:
991 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
993 case kCheckedStoreWord16:
994 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
996 case kCheckedStoreWord32:
997 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
999 case kCheckedStoreFloat32:
1000 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1002 case kCheckedStoreFloat64:
1003 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1009 // Assembles branches after this instruction.
1010 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1011 X64OperandConverter i(this, instr);
1012 Label::Distance flabel_distance =
1013 branch->fallthru ? Label::kNear : Label::kFar;
1014 Label* tlabel = branch->true_label;
1015 Label* flabel = branch->false_label;
1016 switch (branch->condition) {
1017 case kUnorderedEqual:
1018 __ j(parity_even, flabel, flabel_distance);
1021 __ j(equal, tlabel);
1023 case kUnorderedNotEqual:
1024 __ j(parity_even, tlabel);
1027 __ j(not_equal, tlabel);
1029 case kSignedLessThan:
1032 case kSignedGreaterThanOrEqual:
1033 __ j(greater_equal, tlabel);
1035 case kSignedLessThanOrEqual:
1036 __ j(less_equal, tlabel);
1038 case kSignedGreaterThan:
1039 __ j(greater, tlabel);
1041 case kUnorderedLessThan:
1042 __ j(parity_even, flabel, flabel_distance);
1044 case kUnsignedLessThan:
1045 __ j(below, tlabel);
1047 case kUnorderedGreaterThanOrEqual:
1048 __ j(parity_even, tlabel);
1050 case kUnsignedGreaterThanOrEqual:
1051 __ j(above_equal, tlabel);
1053 case kUnorderedLessThanOrEqual:
1054 __ j(parity_even, flabel, flabel_distance);
1056 case kUnsignedLessThanOrEqual:
1057 __ j(below_equal, tlabel);
1059 case kUnorderedGreaterThan:
1060 __ j(parity_even, tlabel);
1062 case kUnsignedGreaterThan:
1063 __ j(above, tlabel);
1066 __ j(overflow, tlabel);
1069 __ j(no_overflow, tlabel);
1072 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1076 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1077 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1081 // Assembles boolean materializations after this instruction.
1082 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1083 FlagsCondition condition) {
1084 X64OperandConverter i(this, instr);
1087 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1088 // last output of the instruction.
1090 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
1091 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
1092 Condition cc = no_condition;
1093 switch (condition) {
1094 case kUnorderedEqual:
1095 __ j(parity_odd, &check, Label::kNear);
1096 __ movl(reg, Immediate(0));
1097 __ jmp(&done, Label::kNear);
1102 case kUnorderedNotEqual:
1103 __ j(parity_odd, &check, Label::kNear);
1104 __ movl(reg, Immediate(1));
1105 __ jmp(&done, Label::kNear);
1110 case kSignedLessThan:
1113 case kSignedGreaterThanOrEqual:
1116 case kSignedLessThanOrEqual:
1119 case kSignedGreaterThan:
1122 case kUnorderedLessThan:
1123 __ j(parity_odd, &check, Label::kNear);
1124 __ movl(reg, Immediate(0));
1125 __ jmp(&done, Label::kNear);
1127 case kUnsignedLessThan:
1130 case kUnorderedGreaterThanOrEqual:
1131 __ j(parity_odd, &check, Label::kNear);
1132 __ movl(reg, Immediate(1));
1133 __ jmp(&done, Label::kNear);
1135 case kUnsignedGreaterThanOrEqual:
1138 case kUnorderedLessThanOrEqual:
1139 __ j(parity_odd, &check, Label::kNear);
1140 __ movl(reg, Immediate(0));
1141 __ jmp(&done, Label::kNear);
1143 case kUnsignedLessThanOrEqual:
1146 case kUnorderedGreaterThan:
1147 __ j(parity_odd, &check, Label::kNear);
1148 __ movl(reg, Immediate(1));
1149 __ jmp(&done, Label::kNear);
1151 case kUnsignedGreaterThan:
1163 __ movzxbl(reg, reg);
1168 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1169 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1170 isolate(), deoptimization_id, Deoptimizer::LAZY);
1171 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1175 void CodeGenerator::AssemblePrologue() {
1176 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1177 int stack_slots = frame()->GetSpillSlotCount();
1178 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1181 const RegList saves = descriptor->CalleeSavedRegisters();
1182 if (saves != 0) { // Save callee-saved registers.
1183 int register_save_area_size = 0;
1184 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1185 if (!((1 << i) & saves)) continue;
1186 __ pushq(Register::from_code(i));
1187 register_save_area_size += kPointerSize;
1189 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1191 } else if (descriptor->IsJSFunctionCall()) {
1192 CompilationInfo* info = this->info();
1193 __ Prologue(info->IsCodePreAgingActive());
1194 frame()->SetRegisterSaveAreaSize(
1195 StandardFrameConstants::kFixedFrameSizeFromFp);
1198 frame()->SetRegisterSaveAreaSize(
1199 StandardFrameConstants::kFixedFrameSizeFromFp);
1201 if (stack_slots > 0) {
1202 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1207 void CodeGenerator::AssembleReturn() {
1208 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1209 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1210 if (frame()->GetRegisterSaveAreaSize() > 0) {
1211 // Remove this frame's spill slots first.
1212 int stack_slots = frame()->GetSpillSlotCount();
1213 if (stack_slots > 0) {
1214 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1216 const RegList saves = descriptor->CalleeSavedRegisters();
1217 // Restore registers.
1219 for (int i = 0; i < Register::kNumRegisters; i++) {
1220 if (!((1 << i) & saves)) continue;
1221 __ popq(Register::from_code(i));
1224 __ popq(rbp); // Pop caller's frame pointer.
1227 // No saved registers.
1228 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1229 __ popq(rbp); // Pop caller's frame pointer.
1233 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1234 __ popq(rbp); // Pop caller's frame pointer.
1235 int pop_count = descriptor->IsJSFunctionCall()
1236 ? static_cast<int>(descriptor->JSParameterCount())
1238 __ ret(pop_count * kPointerSize);
1243 void CodeGenerator::AssembleMove(InstructionOperand* source,
1244 InstructionOperand* destination) {
1245 X64OperandConverter g(this, NULL);
1246 // Dispatch on the source and destination operand kinds. Not all
1247 // combinations are possible.
1248 if (source->IsRegister()) {
1249 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1250 Register src = g.ToRegister(source);
1251 if (destination->IsRegister()) {
1252 __ movq(g.ToRegister(destination), src);
1254 __ movq(g.ToOperand(destination), src);
1256 } else if (source->IsStackSlot()) {
1257 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1258 Operand src = g.ToOperand(source);
1259 if (destination->IsRegister()) {
1260 Register dst = g.ToRegister(destination);
1263 // Spill on demand to use a temporary register for memory-to-memory
1265 Register tmp = kScratchRegister;
1266 Operand dst = g.ToOperand(destination);
1270 } else if (source->IsConstant()) {
1271 ConstantOperand* constant_source = ConstantOperand::cast(source);
1272 Constant src = g.ToConstant(constant_source);
1273 if (destination->IsRegister() || destination->IsStackSlot()) {
1274 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1276 switch (src.type()) {
1277 case Constant::kInt32:
1278 // TODO(dcarney): don't need scratch in this case.
1279 __ Set(dst, src.ToInt32());
1281 case Constant::kInt64:
1282 __ Set(dst, src.ToInt64());
1284 case Constant::kFloat32:
1286 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1288 case Constant::kFloat64:
1290 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1292 case Constant::kExternalReference:
1293 __ Move(dst, src.ToExternalReference());
1295 case Constant::kHeapObject:
1296 __ Move(dst, src.ToHeapObject());
1298 case Constant::kRpoNumber:
1299 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1302 if (destination->IsStackSlot()) {
1303 __ movq(g.ToOperand(destination), kScratchRegister);
1305 } else if (src.type() == Constant::kFloat32) {
1306 // TODO(turbofan): Can we do better here?
1307 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1308 if (destination->IsDoubleRegister()) {
1309 __ Move(g.ToDoubleRegister(destination), src_const);
1311 DCHECK(destination->IsDoubleStackSlot());
1312 Operand dst = g.ToOperand(destination);
1313 __ movl(dst, Immediate(src_const));
1316 DCHECK_EQ(Constant::kFloat64, src.type());
1317 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1318 if (destination->IsDoubleRegister()) {
1319 __ Move(g.ToDoubleRegister(destination), src_const);
1321 DCHECK(destination->IsDoubleStackSlot());
1322 __ movq(kScratchRegister, src_const);
1323 __ movq(g.ToOperand(destination), kScratchRegister);
1326 } else if (source->IsDoubleRegister()) {
1327 XMMRegister src = g.ToDoubleRegister(source);
1328 if (destination->IsDoubleRegister()) {
1329 XMMRegister dst = g.ToDoubleRegister(destination);
1332 DCHECK(destination->IsDoubleStackSlot());
1333 Operand dst = g.ToOperand(destination);
1336 } else if (source->IsDoubleStackSlot()) {
1337 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1338 Operand src = g.ToOperand(source);
1339 if (destination->IsDoubleRegister()) {
1340 XMMRegister dst = g.ToDoubleRegister(destination);
1343 // We rely on having xmm0 available as a fixed scratch register.
1344 Operand dst = g.ToOperand(destination);
1345 __ movsd(xmm0, src);
1346 __ movsd(dst, xmm0);
1354 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1355 InstructionOperand* destination) {
1356 X64OperandConverter g(this, NULL);
1357 // Dispatch on the source and destination operand kinds. Not all
1358 // combinations are possible.
1359 if (source->IsRegister() && destination->IsRegister()) {
1360 // Register-register.
1361 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1362 } else if (source->IsRegister() && destination->IsStackSlot()) {
1363 Register src = g.ToRegister(source);
1364 Operand dst = g.ToOperand(destination);
1366 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1367 (source->IsDoubleStackSlot() &&
1368 destination->IsDoubleStackSlot())) {
1370 Register tmp = kScratchRegister;
1371 Operand src = g.ToOperand(source);
1372 Operand dst = g.ToOperand(destination);
1376 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1377 // XMM register-register swap. We rely on having xmm0
1378 // available as a fixed scratch register.
1379 XMMRegister src = g.ToDoubleRegister(source);
1380 XMMRegister dst = g.ToDoubleRegister(destination);
1381 __ movsd(xmm0, src);
1383 __ movsd(dst, xmm0);
1384 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1385 // XMM register-memory swap. We rely on having xmm0
1386 // available as a fixed scratch register.
1387 XMMRegister src = g.ToDoubleRegister(source);
1388 Operand dst = g.ToOperand(destination);
1389 __ movsd(xmm0, src);
1391 __ movsd(dst, xmm0);
1393 // No other combinations are possible.
1399 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1402 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1403 int space_needed = Deoptimizer::patch_size();
1404 if (!info()->IsStub()) {
1405 // Ensure that we have enough space after the previous lazy-bailout
1406 // instruction for patching the code here.
1407 int current_pc = masm()->pc_offset();
1408 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1409 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1410 __ Nop(padding_size);
1413 MarkLazyDeoptSite();
1418 } // namespace internal
1419 } // namespace compiler