[turbofan] remove dependence of InstructionBlock on BasicBlock
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 // Adds X64 specific methods for decoding operands.
22 class X64OperandConverter : public InstructionOperandConverter {
23  public:
24   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
25       : InstructionOperandConverter(gen, instr) {}
26
27   Immediate InputImmediate(size_t index) {
28     return ToImmediate(instr_->InputAt(index));
29   }
30
31   Operand InputOperand(size_t index) {
32     return ToOperand(instr_->InputAt(index));
33   }
34
35   Operand OutputOperand() { return ToOperand(instr_->Output()); }
36
37   Immediate ToImmediate(InstructionOperand* operand) {
38     return Immediate(ToConstant(operand).ToInt32());
39   }
40
41   Operand ToOperand(InstructionOperand* op, int extra = 0) {
42     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
43     // The linkage computes where all spill slots are located.
44     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
45     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
46   }
47
48   static size_t NextOffset(size_t* offset) {
49     size_t i = *offset;
50     (*offset)++;
51     return i;
52   }
53
54   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
55     STATIC_ASSERT(0 == static_cast<int>(times_1));
56     STATIC_ASSERT(1 == static_cast<int>(times_2));
57     STATIC_ASSERT(2 == static_cast<int>(times_4));
58     STATIC_ASSERT(3 == static_cast<int>(times_8));
59     int scale = static_cast<int>(mode - one);
60     DCHECK(scale >= 0 && scale < 4);
61     return static_cast<ScaleFactor>(scale);
62   }
63
64   Operand MemoryOperand(size_t* offset) {
65     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
66     switch (mode) {
67       case kMode_MR: {
68         Register base = InputRegister(NextOffset(offset));
69         int32_t disp = 0;
70         return Operand(base, disp);
71       }
72       case kMode_MRI: {
73         Register base = InputRegister(NextOffset(offset));
74         int32_t disp = InputInt32(NextOffset(offset));
75         return Operand(base, disp);
76       }
77       case kMode_MR1:
78       case kMode_MR2:
79       case kMode_MR4:
80       case kMode_MR8: {
81         Register base = InputRegister(NextOffset(offset));
82         Register index = InputRegister(NextOffset(offset));
83         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84         int32_t disp = 0;
85         return Operand(base, index, scale, disp);
86       }
87       case kMode_MR1I:
88       case kMode_MR2I:
89       case kMode_MR4I:
90       case kMode_MR8I: {
91         Register base = InputRegister(NextOffset(offset));
92         Register index = InputRegister(NextOffset(offset));
93         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
94         int32_t disp = InputInt32(NextOffset(offset));
95         return Operand(base, index, scale, disp);
96       }
97       case kMode_M1: {
98         Register base = InputRegister(NextOffset(offset));
99         int32_t disp = 0;
100         return Operand(base, disp);
101       }
102       case kMode_M2:
103         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
104         return Operand(no_reg, 0);
105       case kMode_M4:
106       case kMode_M8: {
107         Register index = InputRegister(NextOffset(offset));
108         ScaleFactor scale = ScaleFor(kMode_M1, mode);
109         int32_t disp = 0;
110         return Operand(index, scale, disp);
111       }
112       case kMode_M1I:
113       case kMode_M2I:
114       case kMode_M4I:
115       case kMode_M8I: {
116         Register index = InputRegister(NextOffset(offset));
117         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
118         int32_t disp = InputInt32(NextOffset(offset));
119         return Operand(index, scale, disp);
120       }
121       case kMode_None:
122         UNREACHABLE();
123         return Operand(no_reg, 0);
124     }
125     UNREACHABLE();
126     return Operand(no_reg, 0);
127   }
128
129   Operand MemoryOperand(size_t first_input = 0) {
130     return MemoryOperand(&first_input);
131   }
132 };
133
134
135 namespace {
136
137 bool HasImmediateInput(Instruction* instr, size_t index) {
138   return instr->InputAt(index)->IsImmediate();
139 }
140
141
142 class OutOfLineLoadZero FINAL : public OutOfLineCode {
143  public:
144   OutOfLineLoadZero(CodeGenerator* gen, Register result)
145       : OutOfLineCode(gen), result_(result) {}
146
147   void Generate() FINAL { __ xorl(result_, result_); }
148
149  private:
150   Register const result_;
151 };
152
153
154 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
155  public:
156   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
157       : OutOfLineCode(gen), result_(result) {}
158
159   void Generate() FINAL { __ pcmpeqd(result_, result_); }
160
161  private:
162   XMMRegister const result_;
163 };
164
165
166 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
167  public:
168   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
169                              XMMRegister input)
170       : OutOfLineCode(gen), result_(result), input_(input) {}
171
172   void Generate() FINAL {
173     __ subp(rsp, Immediate(kDoubleSize));
174     __ movsd(MemOperand(rsp, 0), input_);
175     __ SlowTruncateToI(result_, rsp, 0);
176     __ addp(rsp, Immediate(kDoubleSize));
177   }
178
179  private:
180   Register const result_;
181   XMMRegister const input_;
182 };
183
184 }  // namespace
185
186
187 #define ASSEMBLE_UNOP(asm_instr)         \
188   do {                                   \
189     if (instr->Output()->IsRegister()) { \
190       __ asm_instr(i.OutputRegister());  \
191     } else {                             \
192       __ asm_instr(i.OutputOperand());   \
193     }                                    \
194   } while (0)
195
196
197 #define ASSEMBLE_BINOP(asm_instr)                              \
198   do {                                                         \
199     if (HasImmediateInput(instr, 1)) {                         \
200       if (instr->InputAt(0)->IsRegister()) {                   \
201         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
202       } else {                                                 \
203         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
204       }                                                        \
205     } else {                                                   \
206       if (instr->InputAt(1)->IsRegister()) {                   \
207         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
208       } else {                                                 \
209         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
210       }                                                        \
211     }                                                          \
212   } while (0)
213
214
215 #define ASSEMBLE_MULT(asm_instr)                              \
216   do {                                                        \
217     if (HasImmediateInput(instr, 1)) {                        \
218       if (instr->InputAt(0)->IsRegister()) {                  \
219         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
220                      i.InputImmediate(1));                    \
221       } else {                                                \
222         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
223                      i.InputImmediate(1));                    \
224       }                                                       \
225     } else {                                                  \
226       if (instr->InputAt(1)->IsRegister()) {                  \
227         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
228       } else {                                                \
229         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
230       }                                                       \
231     }                                                         \
232   } while (0)
233
234
235 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
236   do {                                                                     \
237     if (HasImmediateInput(instr, 1)) {                                     \
238       if (instr->Output()->IsRegister()) {                                 \
239         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
240       } else {                                                             \
241         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
242       }                                                                    \
243     } else {                                                               \
244       if (instr->Output()->IsRegister()) {                                 \
245         __ asm_instr##_cl(i.OutputRegister());                             \
246       } else {                                                             \
247         __ asm_instr##_cl(i.OutputOperand());                              \
248       }                                                                    \
249     }                                                                      \
250   } while (0)
251
252
253 #define ASSEMBLE_MOVX(asm_instr)                            \
254   do {                                                      \
255     if (instr->addressing_mode() != kMode_None) {           \
256       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
257     } else if (instr->InputAt(0)->IsRegister()) {           \
258       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
259     } else {                                                \
260       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
261     }                                                       \
262   } while (0)
263
264
265 #define ASSEMBLE_DOUBLE_BINOP(asm_instr)                                \
266   do {                                                                  \
267     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
268       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
269     } else {                                                            \
270       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
271     }                                                                   \
272   } while (0)
273
274
275 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
276   do {                                                                 \
277     CpuFeatureScope avx_scope(masm(), AVX);                            \
278     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
279       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
280                    i.InputDoubleRegister(1));                          \
281     } else {                                                           \
282       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
283                    i.InputOperand(1));                                 \
284     }                                                                  \
285   } while (0)
286
287
288 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
289   do {                                                                       \
290     auto result = i.OutputDoubleRegister();                                  \
291     auto buffer = i.InputRegister(0);                                        \
292     auto index1 = i.InputRegister(1);                                        \
293     auto index2 = i.InputInt32(2);                                           \
294     OutOfLineCode* ool;                                                      \
295     if (instr->InputAt(3)->IsRegister()) {                                   \
296       auto length = i.InputRegister(3);                                      \
297       DCHECK_EQ(0, index2);                                                  \
298       __ cmpl(index1, length);                                               \
299       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
300     } else {                                                                 \
301       auto length = i.InputInt32(3);                                         \
302       DCHECK_LE(index2, length);                                             \
303       __ cmpq(index1, Immediate(length - index2));                           \
304       class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
305        public:                                                               \
306         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
307                            Register buffer, Register index1, int32_t index2, \
308                            int32_t length)                                   \
309             : OutOfLineCode(gen),                                            \
310               result_(result),                                               \
311               buffer_(buffer),                                               \
312               index1_(index1),                                               \
313               index2_(index2),                                               \
314               length_(length) {}                                             \
315                                                                              \
316         void Generate() FINAL {                                              \
317           __ leal(kScratchRegister, Operand(index1_, index2_));              \
318           __ pcmpeqd(result_, result_);                                      \
319           __ cmpl(kScratchRegister, Immediate(length_));                     \
320           __ j(above_equal, exit());                                         \
321           __ asm_instr(result_,                                              \
322                        Operand(buffer_, kScratchRegister, times_1, 0));      \
323         }                                                                    \
324                                                                              \
325        private:                                                              \
326         XMMRegister const result_;                                           \
327         Register const buffer_;                                              \
328         Register const index1_;                                              \
329         int32_t const index2_;                                               \
330         int32_t const length_;                                               \
331       };                                                                     \
332       ool = new (zone())                                                     \
333           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
334     }                                                                        \
335     __ j(above_equal, ool->entry());                                         \
336     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
337     __ bind(ool->exit());                                                    \
338   } while (false)
339
340
341 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
342   do {                                                                         \
343     auto result = i.OutputRegister();                                          \
344     auto buffer = i.InputRegister(0);                                          \
345     auto index1 = i.InputRegister(1);                                          \
346     auto index2 = i.InputInt32(2);                                             \
347     OutOfLineCode* ool;                                                        \
348     if (instr->InputAt(3)->IsRegister()) {                                     \
349       auto length = i.InputRegister(3);                                        \
350       DCHECK_EQ(0, index2);                                                    \
351       __ cmpl(index1, length);                                                 \
352       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
353     } else {                                                                   \
354       auto length = i.InputInt32(3);                                           \
355       DCHECK_LE(index2, length);                                               \
356       __ cmpq(index1, Immediate(length - index2));                             \
357       class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
358        public:                                                                 \
359         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
360                              Register buffer, Register index1, int32_t index2, \
361                              int32_t length)                                   \
362             : OutOfLineCode(gen),                                              \
363               result_(result),                                                 \
364               buffer_(buffer),                                                 \
365               index1_(index1),                                                 \
366               index2_(index2),                                                 \
367               length_(length) {}                                               \
368                                                                                \
369         void Generate() FINAL {                                                \
370           Label oob;                                                           \
371           __ leal(kScratchRegister, Operand(index1_, index2_));                \
372           __ cmpl(kScratchRegister, Immediate(length_));                       \
373           __ j(above_equal, &oob, Label::kNear);                               \
374           __ asm_instr(result_,                                                \
375                        Operand(buffer_, kScratchRegister, times_1, 0));        \
376           __ jmp(exit());                                                      \
377           __ bind(&oob);                                                       \
378           __ xorl(result_, result_);                                           \
379         }                                                                      \
380                                                                                \
381        private:                                                                \
382         Register const result_;                                                \
383         Register const buffer_;                                                \
384         Register const index1_;                                                \
385         int32_t const index2_;                                                 \
386         int32_t const length_;                                                 \
387       };                                                                       \
388       ool = new (zone())                                                       \
389           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
390     }                                                                          \
391     __ j(above_equal, ool->entry());                                           \
392     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
393     __ bind(ool->exit());                                                      \
394   } while (false)
395
396
397 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
398   do {                                                                       \
399     auto buffer = i.InputRegister(0);                                        \
400     auto index1 = i.InputRegister(1);                                        \
401     auto index2 = i.InputInt32(2);                                           \
402     auto value = i.InputDoubleRegister(4);                                   \
403     if (instr->InputAt(3)->IsRegister()) {                                   \
404       auto length = i.InputRegister(3);                                      \
405       DCHECK_EQ(0, index2);                                                  \
406       Label done;                                                            \
407       __ cmpl(index1, length);                                               \
408       __ j(above_equal, &done, Label::kNear);                                \
409       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
410       __ bind(&done);                                                        \
411     } else {                                                                 \
412       auto length = i.InputInt32(3);                                         \
413       DCHECK_LE(index2, length);                                             \
414       __ cmpq(index1, Immediate(length - index2));                           \
415       class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
416        public:                                                               \
417         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
418                             Register index1, int32_t index2, int32_t length, \
419                             XMMRegister value)                               \
420             : OutOfLineCode(gen),                                            \
421               buffer_(buffer),                                               \
422               index1_(index1),                                               \
423               index2_(index2),                                               \
424               length_(length),                                               \
425               value_(value) {}                                               \
426                                                                              \
427         void Generate() FINAL {                                              \
428           __ leal(kScratchRegister, Operand(index1_, index2_));              \
429           __ cmpl(kScratchRegister, Immediate(length_));                     \
430           __ j(above_equal, exit());                                         \
431           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
432                        value_);                                              \
433         }                                                                    \
434                                                                              \
435        private:                                                              \
436         Register const buffer_;                                              \
437         Register const index1_;                                              \
438         int32_t const index2_;                                               \
439         int32_t const length_;                                               \
440         XMMRegister const value_;                                            \
441       };                                                                     \
442       auto ool = new (zone())                                                \
443           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
444       __ j(above_equal, ool->entry());                                       \
445       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
446       __ bind(ool->exit());                                                  \
447     }                                                                        \
448   } while (false)
449
450
451 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
452   do {                                                                         \
453     auto buffer = i.InputRegister(0);                                          \
454     auto index1 = i.InputRegister(1);                                          \
455     auto index2 = i.InputInt32(2);                                             \
456     if (instr->InputAt(3)->IsRegister()) {                                     \
457       auto length = i.InputRegister(3);                                        \
458       DCHECK_EQ(0, index2);                                                    \
459       Label done;                                                              \
460       __ cmpl(index1, length);                                                 \
461       __ j(above_equal, &done, Label::kNear);                                  \
462       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
463       __ bind(&done);                                                          \
464     } else {                                                                   \
465       auto length = i.InputInt32(3);                                           \
466       DCHECK_LE(index2, length);                                               \
467       __ cmpq(index1, Immediate(length - index2));                             \
468       class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
469        public:                                                                 \
470         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
471                               Register index1, int32_t index2, int32_t length, \
472                               Value value)                                     \
473             : OutOfLineCode(gen),                                              \
474               buffer_(buffer),                                                 \
475               index1_(index1),                                                 \
476               index2_(index2),                                                 \
477               length_(length),                                                 \
478               value_(value) {}                                                 \
479                                                                                \
480         void Generate() FINAL {                                                \
481           __ leal(kScratchRegister, Operand(index1_, index2_));                \
482           __ cmpl(kScratchRegister, Immediate(length_));                       \
483           __ j(above_equal, exit());                                           \
484           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
485                        value_);                                                \
486         }                                                                      \
487                                                                                \
488        private:                                                                \
489         Register const buffer_;                                                \
490         Register const index1_;                                                \
491         int32_t const index2_;                                                 \
492         int32_t const length_;                                                 \
493         Value const value_;                                                    \
494       };                                                                       \
495       auto ool = new (zone())                                                  \
496           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
497       __ j(above_equal, ool->entry());                                         \
498       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
499       __ bind(ool->exit());                                                    \
500     }                                                                          \
501   } while (false)
502
503
504 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
505   do {                                                           \
506     if (instr->InputAt(4)->IsRegister()) {                       \
507       Register value = i.InputRegister(4);                       \
508       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
509     } else {                                                     \
510       Immediate value = i.InputImmediate(4);                     \
511       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
512     }                                                            \
513   } while (false)
514
515
516 // Assembles an instruction after register allocation, producing machine code.
517 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
518   X64OperandConverter i(this, instr);
519
520   switch (ArchOpcodeField::decode(instr->opcode())) {
521     case kArchCallCodeObject: {
522       EnsureSpaceForLazyDeopt();
523       if (HasImmediateInput(instr, 0)) {
524         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
525         __ Call(code, RelocInfo::CODE_TARGET);
526       } else {
527         Register reg = i.InputRegister(0);
528         int entry = Code::kHeaderSize - kHeapObjectTag;
529         __ Call(Operand(reg, entry));
530       }
531       RecordCallPosition(instr);
532       break;
533     }
534     case kArchCallJSFunction: {
535       EnsureSpaceForLazyDeopt();
536       Register func = i.InputRegister(0);
537       if (FLAG_debug_code) {
538         // Check the function's context matches the context argument.
539         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
540         __ Assert(equal, kWrongFunctionContext);
541       }
542       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
543       RecordCallPosition(instr);
544       break;
545     }
546     case kArchJmp:
547       AssembleArchJump(i.InputRpo(0));
548       break;
549     case kArchLookupSwitch:
550       AssembleArchLookupSwitch(instr);
551       break;
552     case kArchTableSwitch:
553       AssembleArchTableSwitch(instr);
554       break;
555     case kArchNop:
556       // don't emit code for nops.
557       break;
558     case kArchRet:
559       AssembleReturn();
560       break;
561     case kArchStackPointer:
562       __ movq(i.OutputRegister(), rsp);
563       break;
564     case kArchTruncateDoubleToI: {
565       auto result = i.OutputRegister();
566       auto input = i.InputDoubleRegister(0);
567       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
568       __ cvttsd2siq(result, input);
569       __ cmpq(result, Immediate(1));
570       __ j(overflow, ool->entry());
571       __ bind(ool->exit());
572       break;
573     }
574     case kX64Add32:
575       ASSEMBLE_BINOP(addl);
576       break;
577     case kX64Add:
578       ASSEMBLE_BINOP(addq);
579       break;
580     case kX64Sub32:
581       ASSEMBLE_BINOP(subl);
582       break;
583     case kX64Sub:
584       ASSEMBLE_BINOP(subq);
585       break;
586     case kX64And32:
587       ASSEMBLE_BINOP(andl);
588       break;
589     case kX64And:
590       ASSEMBLE_BINOP(andq);
591       break;
592     case kX64Cmp32:
593       ASSEMBLE_BINOP(cmpl);
594       break;
595     case kX64Cmp:
596       ASSEMBLE_BINOP(cmpq);
597       break;
598     case kX64Test32:
599       ASSEMBLE_BINOP(testl);
600       break;
601     case kX64Test:
602       ASSEMBLE_BINOP(testq);
603       break;
604     case kX64Imul32:
605       ASSEMBLE_MULT(imull);
606       break;
607     case kX64Imul:
608       ASSEMBLE_MULT(imulq);
609       break;
610     case kX64ImulHigh32:
611       if (instr->InputAt(1)->IsRegister()) {
612         __ imull(i.InputRegister(1));
613       } else {
614         __ imull(i.InputOperand(1));
615       }
616       break;
617     case kX64UmulHigh32:
618       if (instr->InputAt(1)->IsRegister()) {
619         __ mull(i.InputRegister(1));
620       } else {
621         __ mull(i.InputOperand(1));
622       }
623       break;
624     case kX64Idiv32:
625       __ cdq();
626       __ idivl(i.InputRegister(1));
627       break;
628     case kX64Idiv:
629       __ cqo();
630       __ idivq(i.InputRegister(1));
631       break;
632     case kX64Udiv32:
633       __ xorl(rdx, rdx);
634       __ divl(i.InputRegister(1));
635       break;
636     case kX64Udiv:
637       __ xorq(rdx, rdx);
638       __ divq(i.InputRegister(1));
639       break;
640     case kX64Not:
641       ASSEMBLE_UNOP(notq);
642       break;
643     case kX64Not32:
644       ASSEMBLE_UNOP(notl);
645       break;
646     case kX64Neg:
647       ASSEMBLE_UNOP(negq);
648       break;
649     case kX64Neg32:
650       ASSEMBLE_UNOP(negl);
651       break;
652     case kX64Or32:
653       ASSEMBLE_BINOP(orl);
654       break;
655     case kX64Or:
656       ASSEMBLE_BINOP(orq);
657       break;
658     case kX64Xor32:
659       ASSEMBLE_BINOP(xorl);
660       break;
661     case kX64Xor:
662       ASSEMBLE_BINOP(xorq);
663       break;
664     case kX64Shl32:
665       ASSEMBLE_SHIFT(shll, 5);
666       break;
667     case kX64Shl:
668       ASSEMBLE_SHIFT(shlq, 6);
669       break;
670     case kX64Shr32:
671       ASSEMBLE_SHIFT(shrl, 5);
672       break;
673     case kX64Shr:
674       ASSEMBLE_SHIFT(shrq, 6);
675       break;
676     case kX64Sar32:
677       ASSEMBLE_SHIFT(sarl, 5);
678       break;
679     case kX64Sar:
680       ASSEMBLE_SHIFT(sarq, 6);
681       break;
682     case kX64Ror32:
683       ASSEMBLE_SHIFT(rorl, 5);
684       break;
685     case kX64Ror:
686       ASSEMBLE_SHIFT(rorq, 6);
687       break;
688     case kSSEFloat64Cmp:
689       ASSEMBLE_DOUBLE_BINOP(ucomisd);
690       break;
691     case kSSEFloat64Add:
692       ASSEMBLE_DOUBLE_BINOP(addsd);
693       break;
694     case kSSEFloat64Sub:
695       ASSEMBLE_DOUBLE_BINOP(subsd);
696       break;
697     case kSSEFloat64Mul:
698       ASSEMBLE_DOUBLE_BINOP(mulsd);
699       break;
700     case kSSEFloat64Div:
701       ASSEMBLE_DOUBLE_BINOP(divsd);
702       break;
703     case kSSEFloat64Mod: {
704       __ subq(rsp, Immediate(kDoubleSize));
705       // Move values to st(0) and st(1).
706       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
707       __ fld_d(Operand(rsp, 0));
708       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
709       __ fld_d(Operand(rsp, 0));
710       // Loop while fprem isn't done.
711       Label mod_loop;
712       __ bind(&mod_loop);
713       // This instructions traps on all kinds inputs, but we are assuming the
714       // floating point control word is set to ignore them all.
715       __ fprem();
716       // The following 2 instruction implicitly use rax.
717       __ fnstsw_ax();
718       if (CpuFeatures::IsSupported(SAHF)) {
719         CpuFeatureScope sahf_scope(masm(), SAHF);
720         __ sahf();
721       } else {
722         __ shrl(rax, Immediate(8));
723         __ andl(rax, Immediate(0xFF));
724         __ pushq(rax);
725         __ popfq();
726       }
727       __ j(parity_even, &mod_loop);
728       // Move output to stack and clean up.
729       __ fstp(1);
730       __ fstp_d(Operand(rsp, 0));
731       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
732       __ addq(rsp, Immediate(kDoubleSize));
733       break;
734     }
735     case kSSEFloat64Sqrt:
736       if (instr->InputAt(0)->IsDoubleRegister()) {
737         __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
738       } else {
739         __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
740       }
741       break;
742     case kSSEFloat64Floor: {
743       CpuFeatureScope sse_scope(masm(), SSE4_1);
744       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
745                  v8::internal::Assembler::kRoundDown);
746       break;
747     }
748     case kSSEFloat64Ceil: {
749       CpuFeatureScope sse_scope(masm(), SSE4_1);
750       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
751                  v8::internal::Assembler::kRoundUp);
752       break;
753     }
754     case kSSEFloat64RoundTruncate: {
755       CpuFeatureScope sse_scope(masm(), SSE4_1);
756       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
757                  v8::internal::Assembler::kRoundToZero);
758       break;
759     }
760     case kSSECvtss2sd:
761       if (instr->InputAt(0)->IsDoubleRegister()) {
762         __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
763       } else {
764         __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
765       }
766       break;
767     case kSSECvtsd2ss:
768       if (instr->InputAt(0)->IsDoubleRegister()) {
769         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
770       } else {
771         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
772       }
773       break;
774     case kSSEFloat64ToInt32:
775       if (instr->InputAt(0)->IsDoubleRegister()) {
776         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
777       } else {
778         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
779       }
780       break;
781     case kSSEFloat64ToUint32: {
782       if (instr->InputAt(0)->IsDoubleRegister()) {
783         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
784       } else {
785         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
786       }
787       __ AssertZeroExtended(i.OutputRegister());
788       break;
789     }
790     case kSSEInt32ToFloat64:
791       if (instr->InputAt(0)->IsRegister()) {
792         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
793       } else {
794         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
795       }
796       break;
797     case kSSEUint32ToFloat64:
798       if (instr->InputAt(0)->IsRegister()) {
799         __ movl(kScratchRegister, i.InputRegister(0));
800       } else {
801         __ movl(kScratchRegister, i.InputOperand(0));
802       }
803       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
804       break;
805     case kAVXFloat64Add:
806       ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
807       break;
808     case kAVXFloat64Sub:
809       ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
810       break;
811     case kAVXFloat64Mul:
812       ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
813       break;
814     case kAVXFloat64Div:
815       ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
816       break;
817     case kX64Movsxbl:
818       ASSEMBLE_MOVX(movsxbl);
819       __ AssertZeroExtended(i.OutputRegister());
820       break;
821     case kX64Movzxbl:
822       ASSEMBLE_MOVX(movzxbl);
823       __ AssertZeroExtended(i.OutputRegister());
824       break;
825     case kX64Movb: {
826       size_t index = 0;
827       Operand operand = i.MemoryOperand(&index);
828       if (HasImmediateInput(instr, index)) {
829         __ movb(operand, Immediate(i.InputInt8(index)));
830       } else {
831         __ movb(operand, i.InputRegister(index));
832       }
833       break;
834     }
835     case kX64Movsxwl:
836       ASSEMBLE_MOVX(movsxwl);
837       __ AssertZeroExtended(i.OutputRegister());
838       break;
839     case kX64Movzxwl:
840       ASSEMBLE_MOVX(movzxwl);
841       __ AssertZeroExtended(i.OutputRegister());
842       break;
843     case kX64Movw: {
844       size_t index = 0;
845       Operand operand = i.MemoryOperand(&index);
846       if (HasImmediateInput(instr, index)) {
847         __ movw(operand, Immediate(i.InputInt16(index)));
848       } else {
849         __ movw(operand, i.InputRegister(index));
850       }
851       break;
852     }
853     case kX64Movl:
854       if (instr->HasOutput()) {
855         if (instr->addressing_mode() == kMode_None) {
856           if (instr->InputAt(0)->IsRegister()) {
857             __ movl(i.OutputRegister(), i.InputRegister(0));
858           } else {
859             __ movl(i.OutputRegister(), i.InputOperand(0));
860           }
861         } else {
862           __ movl(i.OutputRegister(), i.MemoryOperand());
863         }
864         __ AssertZeroExtended(i.OutputRegister());
865       } else {
866         size_t index = 0;
867         Operand operand = i.MemoryOperand(&index);
868         if (HasImmediateInput(instr, index)) {
869           __ movl(operand, i.InputImmediate(index));
870         } else {
871           __ movl(operand, i.InputRegister(index));
872         }
873       }
874       break;
875     case kX64Movsxlq:
876       ASSEMBLE_MOVX(movsxlq);
877       break;
878     case kX64Movq:
879       if (instr->HasOutput()) {
880         __ movq(i.OutputRegister(), i.MemoryOperand());
881       } else {
882         size_t index = 0;
883         Operand operand = i.MemoryOperand(&index);
884         if (HasImmediateInput(instr, index)) {
885           __ movq(operand, i.InputImmediate(index));
886         } else {
887           __ movq(operand, i.InputRegister(index));
888         }
889       }
890       break;
891     case kX64Movss:
892       if (instr->HasOutput()) {
893         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
894       } else {
895         size_t index = 0;
896         Operand operand = i.MemoryOperand(&index);
897         __ movss(operand, i.InputDoubleRegister(index));
898       }
899       break;
900     case kX64Movsd:
901       if (instr->HasOutput()) {
902         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
903       } else {
904         size_t index = 0;
905         Operand operand = i.MemoryOperand(&index);
906         __ movsd(operand, i.InputDoubleRegister(index));
907       }
908       break;
909     case kX64Lea32: {
910       AddressingMode mode = AddressingModeField::decode(instr->opcode());
911       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
912       // and addressing mode just happens to work out. The "addl"/"subl" forms
913       // in these cases are faster based on measurements.
914       if (i.InputRegister(0).is(i.OutputRegister())) {
915         if (mode == kMode_MRI) {
916           int32_t constant_summand = i.InputInt32(1);
917           if (constant_summand > 0) {
918             __ addl(i.OutputRegister(), Immediate(constant_summand));
919           } else if (constant_summand < 0) {
920             __ subl(i.OutputRegister(), Immediate(-constant_summand));
921           }
922         } else if (mode == kMode_MR1) {
923           if (i.InputRegister(1).is(i.OutputRegister())) {
924             __ shll(i.OutputRegister(), Immediate(1));
925           } else {
926             __ leal(i.OutputRegister(), i.MemoryOperand());
927           }
928         } else if (mode == kMode_M2) {
929           __ shll(i.OutputRegister(), Immediate(1));
930         } else if (mode == kMode_M4) {
931           __ shll(i.OutputRegister(), Immediate(2));
932         } else if (mode == kMode_M8) {
933           __ shll(i.OutputRegister(), Immediate(3));
934         } else {
935           __ leal(i.OutputRegister(), i.MemoryOperand());
936         }
937       } else {
938         __ leal(i.OutputRegister(), i.MemoryOperand());
939       }
940       __ AssertZeroExtended(i.OutputRegister());
941       break;
942     }
943     case kX64Lea:
944       __ leaq(i.OutputRegister(), i.MemoryOperand());
945       break;
946     case kX64Dec32:
947       __ decl(i.OutputRegister());
948       break;
949     case kX64Inc32:
950       __ incl(i.OutputRegister());
951       break;
952     case kX64Push:
953       if (HasImmediateInput(instr, 0)) {
954         __ pushq(i.InputImmediate(0));
955       } else {
956         if (instr->InputAt(0)->IsRegister()) {
957           __ pushq(i.InputRegister(0));
958         } else {
959           __ pushq(i.InputOperand(0));
960         }
961       }
962       break;
963     case kX64StoreWriteBarrier: {
964       Register object = i.InputRegister(0);
965       Register index = i.InputRegister(1);
966       Register value = i.InputRegister(2);
967       __ movq(Operand(object, index, times_1, 0), value);
968       __ leaq(index, Operand(object, index, times_1, 0));
969       SaveFPRegsMode mode =
970           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
971       __ RecordWrite(object, index, value, mode);
972       break;
973     }
974     case kCheckedLoadInt8:
975       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
976       break;
977     case kCheckedLoadUint8:
978       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
979       break;
980     case kCheckedLoadInt16:
981       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
982       break;
983     case kCheckedLoadUint16:
984       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
985       break;
986     case kCheckedLoadWord32:
987       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
988       break;
989     case kCheckedLoadFloat32:
990       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
991       break;
992     case kCheckedLoadFloat64:
993       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
994       break;
995     case kCheckedStoreWord8:
996       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
997       break;
998     case kCheckedStoreWord16:
999       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1000       break;
1001     case kCheckedStoreWord32:
1002       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1003       break;
1004     case kCheckedStoreFloat32:
1005       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1006       break;
1007     case kCheckedStoreFloat64:
1008       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1009       break;
1010   }
1011 }
1012
1013
1014 // Assembles branches after this instruction.
1015 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1016   X64OperandConverter i(this, instr);
1017   Label::Distance flabel_distance =
1018       branch->fallthru ? Label::kNear : Label::kFar;
1019   Label* tlabel = branch->true_label;
1020   Label* flabel = branch->false_label;
1021   switch (branch->condition) {
1022     case kUnorderedEqual:
1023       __ j(parity_even, flabel, flabel_distance);
1024     // Fall through.
1025     case kEqual:
1026       __ j(equal, tlabel);
1027       break;
1028     case kUnorderedNotEqual:
1029       __ j(parity_even, tlabel);
1030     // Fall through.
1031     case kNotEqual:
1032       __ j(not_equal, tlabel);
1033       break;
1034     case kSignedLessThan:
1035       __ j(less, tlabel);
1036       break;
1037     case kSignedGreaterThanOrEqual:
1038       __ j(greater_equal, tlabel);
1039       break;
1040     case kSignedLessThanOrEqual:
1041       __ j(less_equal, tlabel);
1042       break;
1043     case kSignedGreaterThan:
1044       __ j(greater, tlabel);
1045       break;
1046     case kUnsignedLessThan:
1047       __ j(below, tlabel);
1048       break;
1049     case kUnsignedGreaterThanOrEqual:
1050       __ j(above_equal, tlabel);
1051       break;
1052     case kUnsignedLessThanOrEqual:
1053       __ j(below_equal, tlabel);
1054       break;
1055     case kUnsignedGreaterThan:
1056       __ j(above, tlabel);
1057       break;
1058     case kOverflow:
1059       __ j(overflow, tlabel);
1060       break;
1061     case kNotOverflow:
1062       __ j(no_overflow, tlabel);
1063       break;
1064   }
1065   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1066 }
1067
1068
1069 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1070   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1071 }
1072
1073
1074 // Assembles boolean materializations after this instruction.
1075 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1076                                         FlagsCondition condition) {
1077   X64OperandConverter i(this, instr);
1078   Label done;
1079
1080   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1081   // last output of the instruction.
1082   Label check;
1083   DCHECK_NE(0u, instr->OutputCount());
1084   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1085   Condition cc = no_condition;
1086   switch (condition) {
1087     case kUnorderedEqual:
1088       __ j(parity_odd, &check, Label::kNear);
1089       __ movl(reg, Immediate(0));
1090       __ jmp(&done, Label::kNear);
1091     // Fall through.
1092     case kEqual:
1093       cc = equal;
1094       break;
1095     case kUnorderedNotEqual:
1096       __ j(parity_odd, &check, Label::kNear);
1097       __ movl(reg, Immediate(1));
1098       __ jmp(&done, Label::kNear);
1099     // Fall through.
1100     case kNotEqual:
1101       cc = not_equal;
1102       break;
1103     case kSignedLessThan:
1104       cc = less;
1105       break;
1106     case kSignedGreaterThanOrEqual:
1107       cc = greater_equal;
1108       break;
1109     case kSignedLessThanOrEqual:
1110       cc = less_equal;
1111       break;
1112     case kSignedGreaterThan:
1113       cc = greater;
1114       break;
1115     case kUnsignedLessThan:
1116       cc = below;
1117       break;
1118     case kUnsignedGreaterThanOrEqual:
1119       cc = above_equal;
1120       break;
1121     case kUnsignedLessThanOrEqual:
1122       cc = below_equal;
1123       break;
1124     case kUnsignedGreaterThan:
1125       cc = above;
1126       break;
1127     case kOverflow:
1128       cc = overflow;
1129       break;
1130     case kNotOverflow:
1131       cc = no_overflow;
1132       break;
1133   }
1134   __ bind(&check);
1135   __ setcc(cc, reg);
1136   __ movzxbl(reg, reg);
1137   __ bind(&done);
1138 }
1139
1140
1141 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1142   X64OperandConverter i(this, instr);
1143   Register input = i.InputRegister(0);
1144   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1145     __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1146     __ j(equal, GetLabel(i.InputRpo(index + 1)));
1147   }
1148   AssembleArchJump(i.InputRpo(1));
1149 }
1150
1151
1152 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1153   X64OperandConverter i(this, instr);
1154   Register input = i.InputRegister(0);
1155   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1156   Label** cases = zone()->NewArray<Label*>(case_count);
1157   for (int32_t index = 0; index < case_count; ++index) {
1158     cases[index] = GetLabel(i.InputRpo(index + 2));
1159   }
1160   Label* const table = AddJumpTable(cases, case_count);
1161   __ cmpl(input, Immediate(case_count));
1162   __ j(above_equal, GetLabel(i.InputRpo(1)));
1163   __ leaq(kScratchRegister, Operand(table));
1164   __ jmp(Operand(kScratchRegister, input, times_8, 0));
1165 }
1166
1167
1168 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1169   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1170       isolate(), deoptimization_id, Deoptimizer::LAZY);
1171   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1172 }
1173
1174
1175 void CodeGenerator::AssemblePrologue() {
1176   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1177   int stack_slots = frame()->GetSpillSlotCount();
1178   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1179     __ pushq(rbp);
1180     __ movq(rbp, rsp);
1181     const RegList saves = descriptor->CalleeSavedRegisters();
1182     if (saves != 0) {  // Save callee-saved registers.
1183       int register_save_area_size = 0;
1184       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1185         if (!((1 << i) & saves)) continue;
1186         __ pushq(Register::from_code(i));
1187         register_save_area_size += kPointerSize;
1188       }
1189       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1190     }
1191   } else if (descriptor->IsJSFunctionCall()) {
1192     CompilationInfo* info = this->info();
1193     __ Prologue(info->IsCodePreAgingActive());
1194     frame()->SetRegisterSaveAreaSize(
1195         StandardFrameConstants::kFixedFrameSizeFromFp);
1196   } else if (stack_slots > 0) {
1197     __ StubPrologue();
1198     frame()->SetRegisterSaveAreaSize(
1199         StandardFrameConstants::kFixedFrameSizeFromFp);
1200   }
1201
1202   if (info()->is_osr()) {
1203     // TurboFan OSR-compiled functions cannot be entered directly.
1204     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1205
1206     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1207     // frame is still on the stack. Optimized code uses OSR values directly from
1208     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1209     // remaining stack slots.
1210     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1211     osr_pc_offset_ = __ pc_offset();
1212     DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1213     stack_slots -= frame()->GetOsrStackSlotCount();
1214   }
1215
1216   if (stack_slots > 0) {
1217     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1218   }
1219 }
1220
1221
1222 void CodeGenerator::AssembleReturn() {
1223   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1224   int stack_slots = frame()->GetSpillSlotCount();
1225   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1226     if (frame()->GetRegisterSaveAreaSize() > 0) {
1227       // Remove this frame's spill slots first.
1228       if (stack_slots > 0) {
1229         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1230       }
1231       const RegList saves = descriptor->CalleeSavedRegisters();
1232       // Restore registers.
1233       if (saves != 0) {
1234         for (int i = 0; i < Register::kNumRegisters; i++) {
1235           if (!((1 << i) & saves)) continue;
1236           __ popq(Register::from_code(i));
1237         }
1238       }
1239       __ popq(rbp);  // Pop caller's frame pointer.
1240       __ ret(0);
1241     } else {
1242       // No saved registers.
1243       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1244       __ popq(rbp);       // Pop caller's frame pointer.
1245       __ ret(0);
1246     }
1247   } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1248     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1249     __ popq(rbp);       // Pop caller's frame pointer.
1250     int pop_count = descriptor->IsJSFunctionCall()
1251                         ? static_cast<int>(descriptor->JSParameterCount())
1252                         : 0;
1253     __ ret(pop_count * kPointerSize);
1254   } else {
1255     __ ret(0);
1256   }
1257 }
1258
1259
1260 void CodeGenerator::AssembleMove(InstructionOperand* source,
1261                                  InstructionOperand* destination) {
1262   X64OperandConverter g(this, NULL);
1263   // Dispatch on the source and destination operand kinds.  Not all
1264   // combinations are possible.
1265   if (source->IsRegister()) {
1266     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1267     Register src = g.ToRegister(source);
1268     if (destination->IsRegister()) {
1269       __ movq(g.ToRegister(destination), src);
1270     } else {
1271       __ movq(g.ToOperand(destination), src);
1272     }
1273   } else if (source->IsStackSlot()) {
1274     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1275     Operand src = g.ToOperand(source);
1276     if (destination->IsRegister()) {
1277       Register dst = g.ToRegister(destination);
1278       __ movq(dst, src);
1279     } else {
1280       // Spill on demand to use a temporary register for memory-to-memory
1281       // moves.
1282       Register tmp = kScratchRegister;
1283       Operand dst = g.ToOperand(destination);
1284       __ movq(tmp, src);
1285       __ movq(dst, tmp);
1286     }
1287   } else if (source->IsConstant()) {
1288     ConstantOperand* constant_source = ConstantOperand::cast(source);
1289     Constant src = g.ToConstant(constant_source);
1290     if (destination->IsRegister() || destination->IsStackSlot()) {
1291       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1292                                                : kScratchRegister;
1293       switch (src.type()) {
1294         case Constant::kInt32:
1295           // TODO(dcarney): don't need scratch in this case.
1296           __ Set(dst, src.ToInt32());
1297           break;
1298         case Constant::kInt64:
1299           __ Set(dst, src.ToInt64());
1300           break;
1301         case Constant::kFloat32:
1302           __ Move(dst,
1303                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1304           break;
1305         case Constant::kFloat64:
1306           __ Move(dst,
1307                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1308           break;
1309         case Constant::kExternalReference:
1310           __ Move(dst, src.ToExternalReference());
1311           break;
1312         case Constant::kHeapObject:
1313           __ Move(dst, src.ToHeapObject());
1314           break;
1315         case Constant::kRpoNumber:
1316           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1317           break;
1318       }
1319       if (destination->IsStackSlot()) {
1320         __ movq(g.ToOperand(destination), kScratchRegister);
1321       }
1322     } else if (src.type() == Constant::kFloat32) {
1323       // TODO(turbofan): Can we do better here?
1324       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1325       if (destination->IsDoubleRegister()) {
1326         __ Move(g.ToDoubleRegister(destination), src_const);
1327       } else {
1328         DCHECK(destination->IsDoubleStackSlot());
1329         Operand dst = g.ToOperand(destination);
1330         __ movl(dst, Immediate(src_const));
1331       }
1332     } else {
1333       DCHECK_EQ(Constant::kFloat64, src.type());
1334       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1335       if (destination->IsDoubleRegister()) {
1336         __ Move(g.ToDoubleRegister(destination), src_const);
1337       } else {
1338         DCHECK(destination->IsDoubleStackSlot());
1339         __ movq(kScratchRegister, src_const);
1340         __ movq(g.ToOperand(destination), kScratchRegister);
1341       }
1342     }
1343   } else if (source->IsDoubleRegister()) {
1344     XMMRegister src = g.ToDoubleRegister(source);
1345     if (destination->IsDoubleRegister()) {
1346       XMMRegister dst = g.ToDoubleRegister(destination);
1347       __ movsd(dst, src);
1348     } else {
1349       DCHECK(destination->IsDoubleStackSlot());
1350       Operand dst = g.ToOperand(destination);
1351       __ movsd(dst, src);
1352     }
1353   } else if (source->IsDoubleStackSlot()) {
1354     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1355     Operand src = g.ToOperand(source);
1356     if (destination->IsDoubleRegister()) {
1357       XMMRegister dst = g.ToDoubleRegister(destination);
1358       __ movsd(dst, src);
1359     } else {
1360       // We rely on having xmm0 available as a fixed scratch register.
1361       Operand dst = g.ToOperand(destination);
1362       __ movsd(xmm0, src);
1363       __ movsd(dst, xmm0);
1364     }
1365   } else {
1366     UNREACHABLE();
1367   }
1368 }
1369
1370
1371 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1372                                  InstructionOperand* destination) {
1373   X64OperandConverter g(this, NULL);
1374   // Dispatch on the source and destination operand kinds.  Not all
1375   // combinations are possible.
1376   if (source->IsRegister() && destination->IsRegister()) {
1377     // Register-register.
1378     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1379   } else if (source->IsRegister() && destination->IsStackSlot()) {
1380     Register src = g.ToRegister(source);
1381     Operand dst = g.ToOperand(destination);
1382     __ xchgq(src, dst);
1383   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1384              (source->IsDoubleStackSlot() &&
1385               destination->IsDoubleStackSlot())) {
1386     // Memory-memory.
1387     Register tmp = kScratchRegister;
1388     Operand src = g.ToOperand(source);
1389     Operand dst = g.ToOperand(destination);
1390     __ movq(tmp, dst);
1391     __ xchgq(tmp, src);
1392     __ movq(dst, tmp);
1393   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1394     // XMM register-register swap. We rely on having xmm0
1395     // available as a fixed scratch register.
1396     XMMRegister src = g.ToDoubleRegister(source);
1397     XMMRegister dst = g.ToDoubleRegister(destination);
1398     __ movsd(xmm0, src);
1399     __ movsd(src, dst);
1400     __ movsd(dst, xmm0);
1401   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1402     // XMM register-memory swap.  We rely on having xmm0
1403     // available as a fixed scratch register.
1404     XMMRegister src = g.ToDoubleRegister(source);
1405     Operand dst = g.ToOperand(destination);
1406     __ movsd(xmm0, src);
1407     __ movsd(src, dst);
1408     __ movsd(dst, xmm0);
1409   } else {
1410     // No other combinations are possible.
1411     UNREACHABLE();
1412   }
1413 }
1414
1415
1416 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1417   for (size_t index = 0; index < target_count; ++index) {
1418     __ dq(targets[index]);
1419   }
1420 }
1421
1422
1423 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1424
1425
1426 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1427   int space_needed = Deoptimizer::patch_size();
1428   if (!info()->IsStub()) {
1429     // Ensure that we have enough space after the previous lazy-bailout
1430     // instruction for patching the code here.
1431     int current_pc = masm()->pc_offset();
1432     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1433       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1434       __ Nop(padding_size);
1435     }
1436   }
1437   MarkLazyDeoptSite();
1438 }
1439
1440 #undef __
1441
1442 }  // namespace internal
1443 }  // namespace compiler
1444 }  // namespace v8