1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
22 #define kScratchDoubleReg xmm0
25 // Adds X64 specific methods for decoding operands.
26 class X64OperandConverter : public InstructionOperandConverter {
28 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
29 : InstructionOperandConverter(gen, instr) {}
31 Immediate InputImmediate(size_t index) {
32 return ToImmediate(instr_->InputAt(index));
35 Operand InputOperand(size_t index, int extra = 0) {
36 return ToOperand(instr_->InputAt(index), extra);
39 Operand OutputOperand() { return ToOperand(instr_->Output()); }
41 Immediate ToImmediate(InstructionOperand* operand) {
42 Constant constant = ToConstant(operand);
43 if (constant.type() == Constant::kFloat64) {
44 DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
47 return Immediate(constant.ToInt32());
50 Operand ToOperand(InstructionOperand* op, int extra = 0) {
51 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
53 linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
54 return Operand(offset.from_stack_pointer() ? rsp : rbp,
55 offset.offset() + extra);
58 static size_t NextOffset(size_t* offset) {
64 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
65 STATIC_ASSERT(0 == static_cast<int>(times_1));
66 STATIC_ASSERT(1 == static_cast<int>(times_2));
67 STATIC_ASSERT(2 == static_cast<int>(times_4));
68 STATIC_ASSERT(3 == static_cast<int>(times_8));
69 int scale = static_cast<int>(mode - one);
70 DCHECK(scale >= 0 && scale < 4);
71 return static_cast<ScaleFactor>(scale);
74 Operand MemoryOperand(size_t* offset) {
75 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
78 Register base = InputRegister(NextOffset(offset));
80 return Operand(base, disp);
83 Register base = InputRegister(NextOffset(offset));
84 int32_t disp = InputInt32(NextOffset(offset));
85 return Operand(base, disp);
91 Register base = InputRegister(NextOffset(offset));
92 Register index = InputRegister(NextOffset(offset));
93 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
95 return Operand(base, index, scale, disp);
101 Register base = InputRegister(NextOffset(offset));
102 Register index = InputRegister(NextOffset(offset));
103 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
104 int32_t disp = InputInt32(NextOffset(offset));
105 return Operand(base, index, scale, disp);
108 Register base = InputRegister(NextOffset(offset));
110 return Operand(base, disp);
113 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
114 return Operand(no_reg, 0);
117 Register index = InputRegister(NextOffset(offset));
118 ScaleFactor scale = ScaleFor(kMode_M1, mode);
120 return Operand(index, scale, disp);
126 Register index = InputRegister(NextOffset(offset));
127 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
128 int32_t disp = InputInt32(NextOffset(offset));
129 return Operand(index, scale, disp);
133 return Operand(no_reg, 0);
136 return Operand(no_reg, 0);
139 Operand MemoryOperand(size_t first_input = 0) {
140 return MemoryOperand(&first_input);
147 bool HasImmediateInput(Instruction* instr, size_t index) {
148 return instr->InputAt(index)->IsImmediate();
152 class OutOfLineLoadZero final : public OutOfLineCode {
154 OutOfLineLoadZero(CodeGenerator* gen, Register result)
155 : OutOfLineCode(gen), result_(result) {}
157 void Generate() final { __ xorl(result_, result_); }
160 Register const result_;
164 class OutOfLineLoadNaN final : public OutOfLineCode {
166 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
167 : OutOfLineCode(gen), result_(result) {}
169 void Generate() final { __ pcmpeqd(result_, result_); }
172 XMMRegister const result_;
176 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
178 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
180 : OutOfLineCode(gen), result_(result), input_(input) {}
182 void Generate() final {
183 __ subp(rsp, Immediate(kDoubleSize));
184 __ movsd(MemOperand(rsp, 0), input_);
185 __ SlowTruncateToI(result_, rsp, 0);
186 __ addp(rsp, Immediate(kDoubleSize));
190 Register const result_;
191 XMMRegister const input_;
197 #define ASSEMBLE_UNOP(asm_instr) \
199 if (instr->Output()->IsRegister()) { \
200 __ asm_instr(i.OutputRegister()); \
202 __ asm_instr(i.OutputOperand()); \
207 #define ASSEMBLE_BINOP(asm_instr) \
209 if (HasImmediateInput(instr, 1)) { \
210 if (instr->InputAt(0)->IsRegister()) { \
211 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
213 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
216 if (instr->InputAt(1)->IsRegister()) { \
217 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
219 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
225 #define ASSEMBLE_MULT(asm_instr) \
227 if (HasImmediateInput(instr, 1)) { \
228 if (instr->InputAt(0)->IsRegister()) { \
229 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
230 i.InputImmediate(1)); \
232 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
233 i.InputImmediate(1)); \
236 if (instr->InputAt(1)->IsRegister()) { \
237 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
239 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
245 #define ASSEMBLE_SHIFT(asm_instr, width) \
247 if (HasImmediateInput(instr, 1)) { \
248 if (instr->Output()->IsRegister()) { \
249 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
251 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
254 if (instr->Output()->IsRegister()) { \
255 __ asm_instr##_cl(i.OutputRegister()); \
257 __ asm_instr##_cl(i.OutputOperand()); \
263 #define ASSEMBLE_MOVX(asm_instr) \
265 if (instr->addressing_mode() != kMode_None) { \
266 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
267 } else if (instr->InputAt(0)->IsRegister()) { \
268 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
270 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
275 #define ASSEMBLE_SSE_BINOP(asm_instr) \
277 if (instr->InputAt(1)->IsDoubleRegister()) { \
278 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
280 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
285 #define ASSEMBLE_SSE_UNOP(asm_instr) \
287 if (instr->InputAt(0)->IsDoubleRegister()) { \
288 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
290 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
295 #define ASSEMBLE_AVX_BINOP(asm_instr) \
297 CpuFeatureScope avx_scope(masm(), AVX); \
298 if (instr->InputAt(1)->IsDoubleRegister()) { \
299 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
300 i.InputDoubleRegister(1)); \
302 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
303 i.InputOperand(1)); \
308 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
310 auto result = i.OutputDoubleRegister(); \
311 auto buffer = i.InputRegister(0); \
312 auto index1 = i.InputRegister(1); \
313 auto index2 = i.InputInt32(2); \
314 OutOfLineCode* ool; \
315 if (instr->InputAt(3)->IsRegister()) { \
316 auto length = i.InputRegister(3); \
317 DCHECK_EQ(0, index2); \
318 __ cmpl(index1, length); \
319 ool = new (zone()) OutOfLineLoadNaN(this, result); \
321 auto length = i.InputInt32(3); \
322 DCHECK_LE(index2, length); \
323 __ cmpq(index1, Immediate(length - index2)); \
324 class OutOfLineLoadFloat final : public OutOfLineCode { \
326 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
327 Register buffer, Register index1, int32_t index2, \
329 : OutOfLineCode(gen), \
336 void Generate() final { \
337 __ leal(kScratchRegister, Operand(index1_, index2_)); \
338 __ pcmpeqd(result_, result_); \
339 __ cmpl(kScratchRegister, Immediate(length_)); \
340 __ j(above_equal, exit()); \
341 __ asm_instr(result_, \
342 Operand(buffer_, kScratchRegister, times_1, 0)); \
346 XMMRegister const result_; \
347 Register const buffer_; \
348 Register const index1_; \
349 int32_t const index2_; \
350 int32_t const length_; \
353 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
355 __ j(above_equal, ool->entry()); \
356 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
357 __ bind(ool->exit()); \
361 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
363 auto result = i.OutputRegister(); \
364 auto buffer = i.InputRegister(0); \
365 auto index1 = i.InputRegister(1); \
366 auto index2 = i.InputInt32(2); \
367 OutOfLineCode* ool; \
368 if (instr->InputAt(3)->IsRegister()) { \
369 auto length = i.InputRegister(3); \
370 DCHECK_EQ(0, index2); \
371 __ cmpl(index1, length); \
372 ool = new (zone()) OutOfLineLoadZero(this, result); \
374 auto length = i.InputInt32(3); \
375 DCHECK_LE(index2, length); \
376 __ cmpq(index1, Immediate(length - index2)); \
377 class OutOfLineLoadInteger final : public OutOfLineCode { \
379 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
380 Register buffer, Register index1, int32_t index2, \
382 : OutOfLineCode(gen), \
389 void Generate() final { \
391 __ leal(kScratchRegister, Operand(index1_, index2_)); \
392 __ cmpl(kScratchRegister, Immediate(length_)); \
393 __ j(above_equal, &oob, Label::kNear); \
394 __ asm_instr(result_, \
395 Operand(buffer_, kScratchRegister, times_1, 0)); \
398 __ xorl(result_, result_); \
402 Register const result_; \
403 Register const buffer_; \
404 Register const index1_; \
405 int32_t const index2_; \
406 int32_t const length_; \
409 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
411 __ j(above_equal, ool->entry()); \
412 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
413 __ bind(ool->exit()); \
417 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
419 auto buffer = i.InputRegister(0); \
420 auto index1 = i.InputRegister(1); \
421 auto index2 = i.InputInt32(2); \
422 auto value = i.InputDoubleRegister(4); \
423 if (instr->InputAt(3)->IsRegister()) { \
424 auto length = i.InputRegister(3); \
425 DCHECK_EQ(0, index2); \
427 __ cmpl(index1, length); \
428 __ j(above_equal, &done, Label::kNear); \
429 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
432 auto length = i.InputInt32(3); \
433 DCHECK_LE(index2, length); \
434 __ cmpq(index1, Immediate(length - index2)); \
435 class OutOfLineStoreFloat final : public OutOfLineCode { \
437 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
438 Register index1, int32_t index2, int32_t length, \
440 : OutOfLineCode(gen), \
447 void Generate() final { \
448 __ leal(kScratchRegister, Operand(index1_, index2_)); \
449 __ cmpl(kScratchRegister, Immediate(length_)); \
450 __ j(above_equal, exit()); \
451 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
456 Register const buffer_; \
457 Register const index1_; \
458 int32_t const index2_; \
459 int32_t const length_; \
460 XMMRegister const value_; \
462 auto ool = new (zone()) \
463 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
464 __ j(above_equal, ool->entry()); \
465 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
466 __ bind(ool->exit()); \
471 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
473 auto buffer = i.InputRegister(0); \
474 auto index1 = i.InputRegister(1); \
475 auto index2 = i.InputInt32(2); \
476 if (instr->InputAt(3)->IsRegister()) { \
477 auto length = i.InputRegister(3); \
478 DCHECK_EQ(0, index2); \
480 __ cmpl(index1, length); \
481 __ j(above_equal, &done, Label::kNear); \
482 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
485 auto length = i.InputInt32(3); \
486 DCHECK_LE(index2, length); \
487 __ cmpq(index1, Immediate(length - index2)); \
488 class OutOfLineStoreInteger final : public OutOfLineCode { \
490 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
491 Register index1, int32_t index2, int32_t length, \
493 : OutOfLineCode(gen), \
500 void Generate() final { \
501 __ leal(kScratchRegister, Operand(index1_, index2_)); \
502 __ cmpl(kScratchRegister, Immediate(length_)); \
503 __ j(above_equal, exit()); \
504 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
509 Register const buffer_; \
510 Register const index1_; \
511 int32_t const index2_; \
512 int32_t const length_; \
513 Value const value_; \
515 auto ool = new (zone()) \
516 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
517 __ j(above_equal, ool->entry()); \
518 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
519 __ bind(ool->exit()); \
524 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
526 if (instr->InputAt(4)->IsRegister()) { \
527 Register value = i.InputRegister(4); \
528 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
530 Immediate value = i.InputImmediate(4); \
531 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
536 void CodeGenerator::AssembleDeconstructActivationRecord() {
537 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
538 int stack_slots = frame()->GetSpillSlotCount();
539 if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
546 // Assembles an instruction after register allocation, producing machine code.
547 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
548 X64OperandConverter i(this, instr);
550 switch (ArchOpcodeField::decode(instr->opcode())) {
551 case kArchCallCodeObject: {
552 EnsureSpaceForLazyDeopt();
553 if (HasImmediateInput(instr, 0)) {
554 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
555 __ Call(code, RelocInfo::CODE_TARGET);
557 Register reg = i.InputRegister(0);
558 int entry = Code::kHeaderSize - kHeapObjectTag;
559 __ Call(Operand(reg, entry));
561 RecordCallPosition(instr);
564 case kArchTailCallCodeObject: {
565 AssembleDeconstructActivationRecord();
566 if (HasImmediateInput(instr, 0)) {
567 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
568 __ jmp(code, RelocInfo::CODE_TARGET);
570 Register reg = i.InputRegister(0);
571 __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
576 case kArchCallJSFunction: {
577 EnsureSpaceForLazyDeopt();
578 Register func = i.InputRegister(0);
579 if (FLAG_debug_code) {
580 // Check the function's context matches the context argument.
581 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
582 __ Assert(equal, kWrongFunctionContext);
584 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
585 RecordCallPosition(instr);
588 case kArchTailCallJSFunction: {
589 Register func = i.InputRegister(0);
590 if (FLAG_debug_code) {
591 // Check the function's context matches the context argument.
592 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
593 __ Assert(equal, kWrongFunctionContext);
595 AssembleDeconstructActivationRecord();
596 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
599 case kArchPrepareCallCFunction: {
600 int const num_parameters = MiscField::decode(instr->opcode());
601 __ PrepareCallCFunction(num_parameters);
604 case kArchCallCFunction: {
605 int const num_parameters = MiscField::decode(instr->opcode());
606 if (HasImmediateInput(instr, 0)) {
607 ExternalReference ref = i.InputExternalReference(0);
608 __ CallCFunction(ref, num_parameters);
610 Register func = i.InputRegister(0);
611 __ CallCFunction(func, num_parameters);
616 AssembleArchJump(i.InputRpo(0));
618 case kArchLookupSwitch:
619 AssembleArchLookupSwitch(instr);
621 case kArchTableSwitch:
622 AssembleArchTableSwitch(instr);
625 // don't emit code for nops.
627 case kArchDeoptimize: {
629 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
630 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
636 case kArchStackPointer:
637 __ movq(i.OutputRegister(), rsp);
639 case kArchFramePointer:
640 __ movq(i.OutputRegister(), rbp);
642 case kArchTruncateDoubleToI: {
643 auto result = i.OutputRegister();
644 auto input = i.InputDoubleRegister(0);
645 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
646 __ cvttsd2siq(result, input);
647 __ cmpq(result, Immediate(1));
648 __ j(overflow, ool->entry());
649 __ bind(ool->exit());
653 ASSEMBLE_BINOP(addl);
656 ASSEMBLE_BINOP(addq);
659 ASSEMBLE_BINOP(subl);
662 ASSEMBLE_BINOP(subq);
665 ASSEMBLE_BINOP(andl);
668 ASSEMBLE_BINOP(andq);
671 ASSEMBLE_BINOP(cmpl);
674 ASSEMBLE_BINOP(cmpq);
677 ASSEMBLE_BINOP(testl);
680 ASSEMBLE_BINOP(testq);
683 ASSEMBLE_MULT(imull);
686 ASSEMBLE_MULT(imulq);
689 if (instr->InputAt(1)->IsRegister()) {
690 __ imull(i.InputRegister(1));
692 __ imull(i.InputOperand(1));
696 if (instr->InputAt(1)->IsRegister()) {
697 __ mull(i.InputRegister(1));
699 __ mull(i.InputOperand(1));
704 __ idivl(i.InputRegister(1));
708 __ idivq(i.InputRegister(1));
712 __ divl(i.InputRegister(1));
716 __ divq(i.InputRegister(1));
737 ASSEMBLE_BINOP(xorl);
740 ASSEMBLE_BINOP(xorq);
743 ASSEMBLE_SHIFT(shll, 5);
746 ASSEMBLE_SHIFT(shlq, 6);
749 ASSEMBLE_SHIFT(shrl, 5);
752 ASSEMBLE_SHIFT(shrq, 6);
755 ASSEMBLE_SHIFT(sarl, 5);
758 ASSEMBLE_SHIFT(sarq, 6);
761 ASSEMBLE_SHIFT(rorl, 5);
764 ASSEMBLE_SHIFT(rorq, 6);
767 if (instr->InputAt(0)->IsRegister()) {
768 __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
770 __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
774 ASSEMBLE_SSE_BINOP(ucomiss);
777 ASSEMBLE_SSE_BINOP(addss);
780 ASSEMBLE_SSE_BINOP(subss);
783 ASSEMBLE_SSE_BINOP(mulss);
786 ASSEMBLE_SSE_BINOP(divss);
787 // Don't delete this mov. It may improve performance on some CPUs,
788 // when there is a (v)mulss depending on the result.
789 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
791 case kSSEFloat32Abs: {
792 // TODO(bmeurer): Use RIP relative 128-bit constants.
793 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
794 __ psrlq(kScratchDoubleReg, 33);
795 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
798 case kSSEFloat32Neg: {
799 // TODO(bmeurer): Use RIP relative 128-bit constants.
800 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
801 __ psllq(kScratchDoubleReg, 31);
802 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
805 case kSSEFloat32Sqrt:
806 ASSEMBLE_SSE_UNOP(sqrtss);
809 ASSEMBLE_SSE_BINOP(maxss);
812 ASSEMBLE_SSE_BINOP(minss);
814 case kSSEFloat32ToFloat64:
815 ASSEMBLE_SSE_UNOP(cvtss2sd);
818 ASSEMBLE_SSE_BINOP(ucomisd);
821 ASSEMBLE_SSE_BINOP(addsd);
824 ASSEMBLE_SSE_BINOP(subsd);
827 ASSEMBLE_SSE_BINOP(mulsd);
830 ASSEMBLE_SSE_BINOP(divsd);
831 // Don't delete this mov. It may improve performance on some CPUs,
832 // when there is a (v)mulsd depending on the result.
833 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
835 case kSSEFloat64Mod: {
836 __ subq(rsp, Immediate(kDoubleSize));
837 // Move values to st(0) and st(1).
838 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
839 __ fld_d(Operand(rsp, 0));
840 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
841 __ fld_d(Operand(rsp, 0));
842 // Loop while fprem isn't done.
845 // This instructions traps on all kinds inputs, but we are assuming the
846 // floating point control word is set to ignore them all.
848 // The following 2 instruction implicitly use rax.
850 if (CpuFeatures::IsSupported(SAHF)) {
851 CpuFeatureScope sahf_scope(masm(), SAHF);
854 __ shrl(rax, Immediate(8));
855 __ andl(rax, Immediate(0xFF));
859 __ j(parity_even, &mod_loop);
860 // Move output to stack and clean up.
862 __ fstp_d(Operand(rsp, 0));
863 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
864 __ addq(rsp, Immediate(kDoubleSize));
868 ASSEMBLE_SSE_BINOP(maxsd);
871 ASSEMBLE_SSE_BINOP(minsd);
873 case kSSEFloat64Abs: {
874 // TODO(bmeurer): Use RIP relative 128-bit constants.
875 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
876 __ psrlq(kScratchDoubleReg, 1);
877 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
880 case kSSEFloat64Neg: {
881 // TODO(bmeurer): Use RIP relative 128-bit constants.
882 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
883 __ psllq(kScratchDoubleReg, 63);
884 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
887 case kSSEFloat64Sqrt:
888 ASSEMBLE_SSE_UNOP(sqrtsd);
890 case kSSEFloat64Round: {
891 CpuFeatureScope sse_scope(masm(), SSE4_1);
892 RoundingMode const mode =
893 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
894 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
897 case kSSEFloat64ToFloat32:
898 ASSEMBLE_SSE_UNOP(cvtsd2ss);
900 case kSSEFloat64ToInt32:
901 if (instr->InputAt(0)->IsDoubleRegister()) {
902 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
904 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
907 case kSSEFloat64ToUint32: {
908 if (instr->InputAt(0)->IsDoubleRegister()) {
909 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
911 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
913 __ AssertZeroExtended(i.OutputRegister());
916 case kSSEInt32ToFloat64:
917 if (instr->InputAt(0)->IsRegister()) {
918 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
920 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
923 case kSSEUint32ToFloat64:
924 if (instr->InputAt(0)->IsRegister()) {
925 __ movl(kScratchRegister, i.InputRegister(0));
927 __ movl(kScratchRegister, i.InputOperand(0));
929 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
931 case kSSEFloat64ExtractLowWord32:
932 if (instr->InputAt(0)->IsDoubleStackSlot()) {
933 __ movl(i.OutputRegister(), i.InputOperand(0));
935 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
938 case kSSEFloat64ExtractHighWord32:
939 if (instr->InputAt(0)->IsDoubleStackSlot()) {
940 __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
942 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
945 case kSSEFloat64InsertLowWord32:
946 if (instr->InputAt(1)->IsRegister()) {
947 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
949 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
952 case kSSEFloat64InsertHighWord32:
953 if (instr->InputAt(1)->IsRegister()) {
954 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
956 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
959 case kSSEFloat64LoadLowWord32:
960 if (instr->InputAt(0)->IsRegister()) {
961 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
963 __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
966 case kAVXFloat32Cmp: {
967 CpuFeatureScope avx_scope(masm(), AVX);
968 if (instr->InputAt(1)->IsDoubleRegister()) {
969 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
971 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
976 ASSEMBLE_AVX_BINOP(vaddss);
979 ASSEMBLE_AVX_BINOP(vsubss);
982 ASSEMBLE_AVX_BINOP(vmulss);
985 ASSEMBLE_AVX_BINOP(vdivss);
986 // Don't delete this mov. It may improve performance on some CPUs,
987 // when there is a (v)mulss depending on the result.
988 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
991 ASSEMBLE_AVX_BINOP(vmaxss);
994 ASSEMBLE_AVX_BINOP(vminss);
996 case kAVXFloat64Cmp: {
997 CpuFeatureScope avx_scope(masm(), AVX);
998 if (instr->InputAt(1)->IsDoubleRegister()) {
999 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1001 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1005 case kAVXFloat64Add:
1006 ASSEMBLE_AVX_BINOP(vaddsd);
1008 case kAVXFloat64Sub:
1009 ASSEMBLE_AVX_BINOP(vsubsd);
1011 case kAVXFloat64Mul:
1012 ASSEMBLE_AVX_BINOP(vmulsd);
1014 case kAVXFloat64Div:
1015 ASSEMBLE_AVX_BINOP(vdivsd);
1016 // Don't delete this mov. It may improve performance on some CPUs,
1017 // when there is a (v)mulsd depending on the result.
1018 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1020 case kAVXFloat64Max:
1021 ASSEMBLE_AVX_BINOP(vmaxsd);
1023 case kAVXFloat64Min:
1024 ASSEMBLE_AVX_BINOP(vminsd);
1026 case kAVXFloat32Abs: {
1027 // TODO(bmeurer): Use RIP relative 128-bit constants.
1028 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1029 __ psrlq(kScratchDoubleReg, 33);
1030 CpuFeatureScope avx_scope(masm(), AVX);
1031 if (instr->InputAt(0)->IsDoubleRegister()) {
1032 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1033 i.InputDoubleRegister(0));
1035 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1040 case kAVXFloat32Neg: {
1041 // TODO(bmeurer): Use RIP relative 128-bit constants.
1042 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1043 __ psllq(kScratchDoubleReg, 31);
1044 CpuFeatureScope avx_scope(masm(), AVX);
1045 if (instr->InputAt(0)->IsDoubleRegister()) {
1046 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1047 i.InputDoubleRegister(0));
1049 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1054 case kAVXFloat64Abs: {
1055 // TODO(bmeurer): Use RIP relative 128-bit constants.
1056 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1057 __ psrlq(kScratchDoubleReg, 1);
1058 CpuFeatureScope avx_scope(masm(), AVX);
1059 if (instr->InputAt(0)->IsDoubleRegister()) {
1060 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1061 i.InputDoubleRegister(0));
1063 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1068 case kAVXFloat64Neg: {
1069 // TODO(bmeurer): Use RIP relative 128-bit constants.
1070 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1071 __ psllq(kScratchDoubleReg, 63);
1072 CpuFeatureScope avx_scope(masm(), AVX);
1073 if (instr->InputAt(0)->IsDoubleRegister()) {
1074 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1075 i.InputDoubleRegister(0));
1077 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1083 ASSEMBLE_MOVX(movsxbl);
1084 __ AssertZeroExtended(i.OutputRegister());
1087 ASSEMBLE_MOVX(movzxbl);
1088 __ AssertZeroExtended(i.OutputRegister());
1092 Operand operand = i.MemoryOperand(&index);
1093 if (HasImmediateInput(instr, index)) {
1094 __ movb(operand, Immediate(i.InputInt8(index)));
1096 __ movb(operand, i.InputRegister(index));
1101 ASSEMBLE_MOVX(movsxwl);
1102 __ AssertZeroExtended(i.OutputRegister());
1105 ASSEMBLE_MOVX(movzxwl);
1106 __ AssertZeroExtended(i.OutputRegister());
1110 Operand operand = i.MemoryOperand(&index);
1111 if (HasImmediateInput(instr, index)) {
1112 __ movw(operand, Immediate(i.InputInt16(index)));
1114 __ movw(operand, i.InputRegister(index));
1119 if (instr->HasOutput()) {
1120 if (instr->addressing_mode() == kMode_None) {
1121 if (instr->InputAt(0)->IsRegister()) {
1122 __ movl(i.OutputRegister(), i.InputRegister(0));
1124 __ movl(i.OutputRegister(), i.InputOperand(0));
1127 __ movl(i.OutputRegister(), i.MemoryOperand());
1129 __ AssertZeroExtended(i.OutputRegister());
1132 Operand operand = i.MemoryOperand(&index);
1133 if (HasImmediateInput(instr, index)) {
1134 __ movl(operand, i.InputImmediate(index));
1136 __ movl(operand, i.InputRegister(index));
1141 ASSEMBLE_MOVX(movsxlq);
1144 if (instr->HasOutput()) {
1145 __ movq(i.OutputRegister(), i.MemoryOperand());
1148 Operand operand = i.MemoryOperand(&index);
1149 if (HasImmediateInput(instr, index)) {
1150 __ movq(operand, i.InputImmediate(index));
1152 __ movq(operand, i.InputRegister(index));
1157 if (instr->HasOutput()) {
1158 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1161 Operand operand = i.MemoryOperand(&index);
1162 __ movss(operand, i.InputDoubleRegister(index));
1166 if (instr->HasOutput()) {
1167 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1170 Operand operand = i.MemoryOperand(&index);
1171 __ movsd(operand, i.InputDoubleRegister(index));
1175 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1176 __ movl(i.OutputRegister(), i.InputOperand(0));
1178 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
1182 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1183 __ movq(i.OutputRegister(), i.InputOperand(0));
1185 __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
1189 if (instr->InputAt(0)->IsRegister()) {
1190 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
1192 __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1196 if (instr->InputAt(0)->IsRegister()) {
1197 __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
1199 __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
1203 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1204 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1205 // and addressing mode just happens to work out. The "addl"/"subl" forms
1206 // in these cases are faster based on measurements.
1207 if (i.InputRegister(0).is(i.OutputRegister())) {
1208 if (mode == kMode_MRI) {
1209 int32_t constant_summand = i.InputInt32(1);
1210 if (constant_summand > 0) {
1211 __ addl(i.OutputRegister(), Immediate(constant_summand));
1212 } else if (constant_summand < 0) {
1213 __ subl(i.OutputRegister(), Immediate(-constant_summand));
1215 } else if (mode == kMode_MR1) {
1216 if (i.InputRegister(1).is(i.OutputRegister())) {
1217 __ shll(i.OutputRegister(), Immediate(1));
1219 __ leal(i.OutputRegister(), i.MemoryOperand());
1221 } else if (mode == kMode_M2) {
1222 __ shll(i.OutputRegister(), Immediate(1));
1223 } else if (mode == kMode_M4) {
1224 __ shll(i.OutputRegister(), Immediate(2));
1225 } else if (mode == kMode_M8) {
1226 __ shll(i.OutputRegister(), Immediate(3));
1228 __ leal(i.OutputRegister(), i.MemoryOperand());
1231 __ leal(i.OutputRegister(), i.MemoryOperand());
1233 __ AssertZeroExtended(i.OutputRegister());
1237 __ leaq(i.OutputRegister(), i.MemoryOperand());
1240 __ decl(i.OutputRegister());
1243 __ incl(i.OutputRegister());
1246 if (HasImmediateInput(instr, 0)) {
1247 __ pushq(i.InputImmediate(0));
1249 if (instr->InputAt(0)->IsRegister()) {
1250 __ pushq(i.InputRegister(0));
1251 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1252 // TODO(titzer): use another machine instruction?
1253 __ subq(rsp, Immediate(kDoubleSize));
1254 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
1256 __ pushq(i.InputOperand(0));
1261 int const slot = MiscField::decode(instr->opcode());
1262 if (HasImmediateInput(instr, 0)) {
1263 __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1265 __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1269 case kX64StoreWriteBarrier: {
1270 Register object = i.InputRegister(0);
1271 Register value = i.InputRegister(2);
1272 SaveFPRegsMode mode =
1273 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1274 if (HasImmediateInput(instr, 1)) {
1275 int index = i.InputInt32(1);
1276 Register scratch = i.TempRegister(1);
1277 __ movq(Operand(object, index), value);
1278 __ RecordWriteContextSlot(object, index, value, scratch, mode);
1280 Register index = i.InputRegister(1);
1281 __ movq(Operand(object, index, times_1, 0), value);
1282 __ leaq(index, Operand(object, index, times_1, 0));
1283 __ RecordWrite(object, index, value, mode);
1287 case kCheckedLoadInt8:
1288 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1290 case kCheckedLoadUint8:
1291 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1293 case kCheckedLoadInt16:
1294 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1296 case kCheckedLoadUint16:
1297 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1299 case kCheckedLoadWord32:
1300 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1302 case kCheckedLoadWord64:
1303 ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
1305 case kCheckedLoadFloat32:
1306 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1308 case kCheckedLoadFloat64:
1309 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1311 case kCheckedStoreWord8:
1312 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1314 case kCheckedStoreWord16:
1315 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1317 case kCheckedStoreWord32:
1318 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1320 case kCheckedStoreWord64:
1321 ASSEMBLE_CHECKED_STORE_INTEGER(movq);
1323 case kCheckedStoreFloat32:
1324 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1326 case kCheckedStoreFloat64:
1327 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1329 case kX64StackCheck:
1330 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1333 } // NOLINT(readability/fn_size)
1336 // Assembles branches after this instruction.
1337 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1338 X64OperandConverter i(this, instr);
1339 Label::Distance flabel_distance =
1340 branch->fallthru ? Label::kNear : Label::kFar;
1341 Label* tlabel = branch->true_label;
1342 Label* flabel = branch->false_label;
1343 switch (branch->condition) {
1344 case kUnorderedEqual:
1345 __ j(parity_even, flabel, flabel_distance);
1348 __ j(equal, tlabel);
1350 case kUnorderedNotEqual:
1351 __ j(parity_even, tlabel);
1354 __ j(not_equal, tlabel);
1356 case kSignedLessThan:
1359 case kSignedGreaterThanOrEqual:
1360 __ j(greater_equal, tlabel);
1362 case kSignedLessThanOrEqual:
1363 __ j(less_equal, tlabel);
1365 case kSignedGreaterThan:
1366 __ j(greater, tlabel);
1368 case kUnsignedLessThan:
1369 __ j(below, tlabel);
1371 case kUnsignedGreaterThanOrEqual:
1372 __ j(above_equal, tlabel);
1374 case kUnsignedLessThanOrEqual:
1375 __ j(below_equal, tlabel);
1377 case kUnsignedGreaterThan:
1378 __ j(above, tlabel);
1381 __ j(overflow, tlabel);
1384 __ j(no_overflow, tlabel);
1387 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1391 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1392 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1396 // Assembles boolean materializations after this instruction.
1397 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1398 FlagsCondition condition) {
1399 X64OperandConverter i(this, instr);
1402 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1403 // last output of the instruction.
1405 DCHECK_NE(0u, instr->OutputCount());
1406 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1407 Condition cc = no_condition;
1408 switch (condition) {
1409 case kUnorderedEqual:
1410 __ j(parity_odd, &check, Label::kNear);
1411 __ movl(reg, Immediate(0));
1412 __ jmp(&done, Label::kNear);
1417 case kUnorderedNotEqual:
1418 __ j(parity_odd, &check, Label::kNear);
1419 __ movl(reg, Immediate(1));
1420 __ jmp(&done, Label::kNear);
1425 case kSignedLessThan:
1428 case kSignedGreaterThanOrEqual:
1431 case kSignedLessThanOrEqual:
1434 case kSignedGreaterThan:
1437 case kUnsignedLessThan:
1440 case kUnsignedGreaterThanOrEqual:
1443 case kUnsignedLessThanOrEqual:
1446 case kUnsignedGreaterThan:
1458 __ movzxbl(reg, reg);
1463 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1464 X64OperandConverter i(this, instr);
1465 Register input = i.InputRegister(0);
1466 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1467 __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1468 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1470 AssembleArchJump(i.InputRpo(1));
1474 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1475 X64OperandConverter i(this, instr);
1476 Register input = i.InputRegister(0);
1477 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1478 Label** cases = zone()->NewArray<Label*>(case_count);
1479 for (int32_t index = 0; index < case_count; ++index) {
1480 cases[index] = GetLabel(i.InputRpo(index + 2));
1482 Label* const table = AddJumpTable(cases, case_count);
1483 __ cmpl(input, Immediate(case_count));
1484 __ j(above_equal, GetLabel(i.InputRpo(1)));
1485 __ leaq(kScratchRegister, Operand(table));
1486 __ jmp(Operand(kScratchRegister, input, times_8, 0));
1490 void CodeGenerator::AssembleDeoptimizerCall(
1491 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1492 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1493 isolate(), deoptimization_id, bailout_type);
1494 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1500 static const int kQuadWordSize = 16;
1505 void CodeGenerator::AssemblePrologue() {
1506 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1507 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1510 } else if (descriptor->IsJSFunctionCall()) {
1511 CompilationInfo* info = this->info();
1512 __ Prologue(info->IsCodePreAgingActive());
1513 } else if (needs_frame_) {
1516 frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
1519 int stack_shrink_slots = frame()->GetSpillSlotCount();
1520 if (info()->is_osr()) {
1521 // TurboFan OSR-compiled functions cannot be entered directly.
1522 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1524 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1525 // frame is still on the stack. Optimized code uses OSR values directly from
1526 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1527 // remaining stack slots.
1528 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1529 osr_pc_offset_ = __ pc_offset();
1530 // TODO(titzer): cannot address target function == local #-1
1531 __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1532 stack_shrink_slots -=
1533 static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1536 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1537 if (saves_fp != 0) {
1538 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1540 if (stack_shrink_slots > 0) {
1541 __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
1544 if (saves_fp != 0) { // Save callee-saved XMM registers.
1545 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1546 const int stack_size = saves_fp_count * kQuadWordSize;
1547 // Adjust the stack pointer.
1548 __ subp(rsp, Immediate(stack_size));
1549 // Store the registers on the stack.
1551 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1552 if (!((1 << i) & saves_fp)) continue;
1553 __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
1554 XMMRegister::from_code(i));
1557 frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
1558 (kQuadWordSize / kPointerSize));
1561 const RegList saves = descriptor->CalleeSavedRegisters();
1562 if (saves != 0) { // Save callee-saved registers.
1563 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1564 if (!((1 << i) & saves)) continue;
1565 __ pushq(Register::from_code(i));
1566 frame()->AllocateSavedCalleeRegisterSlots(1);
1572 void CodeGenerator::AssembleReturn() {
1573 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1575 // Restore registers.
1576 const RegList saves = descriptor->CalleeSavedRegisters();
1578 for (int i = 0; i < Register::kNumRegisters; i++) {
1579 if (!((1 << i) & saves)) continue;
1580 __ popq(Register::from_code(i));
1583 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1584 if (saves_fp != 0) {
1585 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1586 const int stack_size = saves_fp_count * kQuadWordSize;
1587 // Load the registers from the stack.
1589 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1590 if (!((1 << i) & saves_fp)) continue;
1591 __ movdqu(XMMRegister::from_code(i),
1592 Operand(rsp, kQuadWordSize * slot_idx));
1595 // Adjust the stack pointer.
1596 __ addp(rsp, Immediate(stack_size));
1599 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1600 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1601 __ popq(rbp); // Pop caller's frame pointer.
1602 } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
1603 // Canonicalize JSFunction return sites for now.
1604 if (return_label_.is_bound()) {
1605 __ jmp(&return_label_);
1608 __ bind(&return_label_);
1609 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1610 __ popq(rbp); // Pop caller's frame pointer.
1613 size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
1614 // Might need rcx for scratch if pop_size is too big.
1615 DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit());
1616 __ Ret(static_cast<int>(pop_size), rcx);
1620 void CodeGenerator::AssembleMove(InstructionOperand* source,
1621 InstructionOperand* destination) {
1622 X64OperandConverter g(this, NULL);
1623 // Dispatch on the source and destination operand kinds. Not all
1624 // combinations are possible.
1625 if (source->IsRegister()) {
1626 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1627 Register src = g.ToRegister(source);
1628 if (destination->IsRegister()) {
1629 __ movq(g.ToRegister(destination), src);
1631 __ movq(g.ToOperand(destination), src);
1633 } else if (source->IsStackSlot()) {
1634 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1635 Operand src = g.ToOperand(source);
1636 if (destination->IsRegister()) {
1637 Register dst = g.ToRegister(destination);
1640 // Spill on demand to use a temporary register for memory-to-memory
1642 Register tmp = kScratchRegister;
1643 Operand dst = g.ToOperand(destination);
1647 } else if (source->IsConstant()) {
1648 ConstantOperand* constant_source = ConstantOperand::cast(source);
1649 Constant src = g.ToConstant(constant_source);
1650 if (destination->IsRegister() || destination->IsStackSlot()) {
1651 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1653 switch (src.type()) {
1654 case Constant::kInt32:
1655 // TODO(dcarney): don't need scratch in this case.
1656 __ Set(dst, src.ToInt32());
1658 case Constant::kInt64:
1659 __ Set(dst, src.ToInt64());
1661 case Constant::kFloat32:
1663 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1665 case Constant::kFloat64:
1667 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1669 case Constant::kExternalReference:
1670 __ Move(dst, src.ToExternalReference());
1672 case Constant::kHeapObject: {
1673 Handle<HeapObject> src_object = src.ToHeapObject();
1674 Heap::RootListIndex index;
1676 if (IsMaterializableFromFrame(src_object, &offset)) {
1677 __ movp(dst, Operand(rbp, offset));
1678 } else if (IsMaterializableFromRoot(src_object, &index)) {
1679 __ LoadRoot(dst, index);
1681 __ Move(dst, src_object);
1685 case Constant::kRpoNumber:
1686 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1689 if (destination->IsStackSlot()) {
1690 __ movq(g.ToOperand(destination), kScratchRegister);
1692 } else if (src.type() == Constant::kFloat32) {
1693 // TODO(turbofan): Can we do better here?
1694 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1695 if (destination->IsDoubleRegister()) {
1696 __ Move(g.ToDoubleRegister(destination), src_const);
1698 DCHECK(destination->IsDoubleStackSlot());
1699 Operand dst = g.ToOperand(destination);
1700 __ movl(dst, Immediate(src_const));
1703 DCHECK_EQ(Constant::kFloat64, src.type());
1704 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1705 if (destination->IsDoubleRegister()) {
1706 __ Move(g.ToDoubleRegister(destination), src_const);
1708 DCHECK(destination->IsDoubleStackSlot());
1709 __ movq(kScratchRegister, src_const);
1710 __ movq(g.ToOperand(destination), kScratchRegister);
1713 } else if (source->IsDoubleRegister()) {
1714 XMMRegister src = g.ToDoubleRegister(source);
1715 if (destination->IsDoubleRegister()) {
1716 XMMRegister dst = g.ToDoubleRegister(destination);
1717 __ movaps(dst, src);
1719 DCHECK(destination->IsDoubleStackSlot());
1720 Operand dst = g.ToOperand(destination);
1723 } else if (source->IsDoubleStackSlot()) {
1724 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1725 Operand src = g.ToOperand(source);
1726 if (destination->IsDoubleRegister()) {
1727 XMMRegister dst = g.ToDoubleRegister(destination);
1730 // We rely on having xmm0 available as a fixed scratch register.
1731 Operand dst = g.ToOperand(destination);
1732 __ movsd(xmm0, src);
1733 __ movsd(dst, xmm0);
1741 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1742 InstructionOperand* destination) {
1743 X64OperandConverter g(this, NULL);
1744 // Dispatch on the source and destination operand kinds. Not all
1745 // combinations are possible.
1746 if (source->IsRegister() && destination->IsRegister()) {
1747 // Register-register.
1748 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1749 } else if (source->IsRegister() && destination->IsStackSlot()) {
1750 Register src = g.ToRegister(source);
1751 Operand dst = g.ToOperand(destination);
1753 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1754 (source->IsDoubleStackSlot() &&
1755 destination->IsDoubleStackSlot())) {
1757 Register tmp = kScratchRegister;
1758 Operand src = g.ToOperand(source);
1759 Operand dst = g.ToOperand(destination);
1763 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1764 // XMM register-register swap. We rely on having xmm0
1765 // available as a fixed scratch register.
1766 XMMRegister src = g.ToDoubleRegister(source);
1767 XMMRegister dst = g.ToDoubleRegister(destination);
1768 __ movaps(xmm0, src);
1769 __ movaps(src, dst);
1770 __ movaps(dst, xmm0);
1771 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1772 // XMM register-memory swap. We rely on having xmm0
1773 // available as a fixed scratch register.
1774 XMMRegister src = g.ToDoubleRegister(source);
1775 Operand dst = g.ToOperand(destination);
1776 __ movsd(xmm0, src);
1778 __ movsd(dst, xmm0);
1780 // No other combinations are possible.
1786 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1787 for (size_t index = 0; index < target_count; ++index) {
1788 __ dq(targets[index]);
1793 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1796 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1797 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1801 int space_needed = Deoptimizer::patch_size();
1802 // Ensure that we have enough space after the previous lazy-bailout
1803 // instruction for patching the code here.
1804 int current_pc = masm()->pc_offset();
1805 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1806 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1807 __ Nop(padding_size);
1813 } // namespace internal
1814 } // namespace compiler