[x64] Generate test reg,reg instead of cmp reg,0.
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 // Adds X64 specific methods for decoding operands.
22 class X64OperandConverter : public InstructionOperandConverter {
23  public:
24   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
25       : InstructionOperandConverter(gen, instr) {}
26
27   Immediate InputImmediate(int index) {
28     return ToImmediate(instr_->InputAt(index));
29   }
30
31   Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
32
33   Operand OutputOperand() { return ToOperand(instr_->Output()); }
34
35   Immediate ToImmediate(InstructionOperand* operand) {
36     return Immediate(ToConstant(operand).ToInt32());
37   }
38
39   Operand ToOperand(InstructionOperand* op, int extra = 0) {
40     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
41     // The linkage computes where all spill slots are located.
42     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
43     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
44   }
45
46   static int NextOffset(int* offset) {
47     int i = *offset;
48     (*offset)++;
49     return i;
50   }
51
52   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
53     STATIC_ASSERT(0 == static_cast<int>(times_1));
54     STATIC_ASSERT(1 == static_cast<int>(times_2));
55     STATIC_ASSERT(2 == static_cast<int>(times_4));
56     STATIC_ASSERT(3 == static_cast<int>(times_8));
57     int scale = static_cast<int>(mode - one);
58     DCHECK(scale >= 0 && scale < 4);
59     return static_cast<ScaleFactor>(scale);
60   }
61
62   Operand MemoryOperand(int* offset) {
63     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
64     switch (mode) {
65       case kMode_MR: {
66         Register base = InputRegister(NextOffset(offset));
67         int32_t disp = 0;
68         return Operand(base, disp);
69       }
70       case kMode_MRI: {
71         Register base = InputRegister(NextOffset(offset));
72         int32_t disp = InputInt32(NextOffset(offset));
73         return Operand(base, disp);
74       }
75       case kMode_MR1:
76       case kMode_MR2:
77       case kMode_MR4:
78       case kMode_MR8: {
79         Register base = InputRegister(NextOffset(offset));
80         Register index = InputRegister(NextOffset(offset));
81         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
82         int32_t disp = 0;
83         return Operand(base, index, scale, disp);
84       }
85       case kMode_MR1I:
86       case kMode_MR2I:
87       case kMode_MR4I:
88       case kMode_MR8I: {
89         Register base = InputRegister(NextOffset(offset));
90         Register index = InputRegister(NextOffset(offset));
91         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
92         int32_t disp = InputInt32(NextOffset(offset));
93         return Operand(base, index, scale, disp);
94       }
95       case kMode_M1: {
96         Register base = InputRegister(NextOffset(offset));
97         int32_t disp = 0;
98         return Operand(base, disp);
99       }
100       case kMode_M2:
101         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
102         return Operand(no_reg, 0);
103       case kMode_M4:
104       case kMode_M8: {
105         Register index = InputRegister(NextOffset(offset));
106         ScaleFactor scale = ScaleFor(kMode_M1, mode);
107         int32_t disp = 0;
108         return Operand(index, scale, disp);
109       }
110       case kMode_M1I:
111       case kMode_M2I:
112       case kMode_M4I:
113       case kMode_M8I: {
114         Register index = InputRegister(NextOffset(offset));
115         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
116         int32_t disp = InputInt32(NextOffset(offset));
117         return Operand(index, scale, disp);
118       }
119       case kMode_None:
120         UNREACHABLE();
121         return Operand(no_reg, 0);
122     }
123     UNREACHABLE();
124     return Operand(no_reg, 0);
125   }
126
127   Operand MemoryOperand(int first_input = 0) {
128     return MemoryOperand(&first_input);
129   }
130 };
131
132
133 namespace {
134
135 bool HasImmediateInput(Instruction* instr, int index) {
136   return instr->InputAt(index)->IsImmediate();
137 }
138
139
140 class OutOfLineLoadZero FINAL : public OutOfLineCode {
141  public:
142   OutOfLineLoadZero(CodeGenerator* gen, Register result)
143       : OutOfLineCode(gen), result_(result) {}
144
145   void Generate() FINAL { __ xorl(result_, result_); }
146
147  private:
148   Register const result_;
149 };
150
151
152 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
153  public:
154   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
155       : OutOfLineCode(gen), result_(result) {}
156
157   void Generate() FINAL { __ pcmpeqd(result_, result_); }
158
159  private:
160   XMMRegister const result_;
161 };
162
163
164 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
165  public:
166   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
167                              XMMRegister input)
168       : OutOfLineCode(gen), result_(result), input_(input) {}
169
170   void Generate() FINAL {
171     __ subp(rsp, Immediate(kDoubleSize));
172     __ movsd(MemOperand(rsp, 0), input_);
173     __ SlowTruncateToI(result_, rsp, 0);
174     __ addp(rsp, Immediate(kDoubleSize));
175   }
176
177  private:
178   Register const result_;
179   XMMRegister const input_;
180 };
181
182 }  // namespace
183
184
185 #define ASSEMBLE_UNOP(asm_instr)         \
186   do {                                   \
187     if (instr->Output()->IsRegister()) { \
188       __ asm_instr(i.OutputRegister());  \
189     } else {                             \
190       __ asm_instr(i.OutputOperand());   \
191     }                                    \
192   } while (0)
193
194
195 #define ASSEMBLE_BINOP(asm_instr)                              \
196   do {                                                         \
197     if (HasImmediateInput(instr, 1)) {                         \
198       if (instr->InputAt(0)->IsRegister()) {                   \
199         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
200       } else {                                                 \
201         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
202       }                                                        \
203     } else {                                                   \
204       if (instr->InputAt(1)->IsRegister()) {                   \
205         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
206       } else {                                                 \
207         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
208       }                                                        \
209     }                                                          \
210   } while (0)
211
212
213 #define ASSEMBLE_CMP(cmp_instr, test_instr)                      \
214   do {                                                           \
215     if (HasImmediateInput(instr, 1)) {                           \
216       if (instr->InputAt(0)->IsRegister()) {                     \
217         if (i.InputInt32(1) == 0) {                              \
218           __ test_instr(i.InputRegister(0), i.InputRegister(0)); \
219         } else {                                                 \
220           __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
221         }                                                        \
222       } else {                                                   \
223         __ cmp_instr(i.InputOperand(0), i.InputImmediate(1));    \
224       }                                                          \
225     } else {                                                     \
226       if (instr->InputAt(1)->IsRegister()) {                     \
227         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));    \
228       } else {                                                   \
229         __ cmp_instr(i.InputRegister(0), i.InputOperand(1));     \
230       }                                                          \
231     }                                                            \
232   } while (0)
233
234 #define ASSEMBLE_MULT(asm_instr)                              \
235   do {                                                        \
236     if (HasImmediateInput(instr, 1)) {                        \
237       if (instr->InputAt(0)->IsRegister()) {                  \
238         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
239                      i.InputImmediate(1));                    \
240       } else {                                                \
241         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
242                      i.InputImmediate(1));                    \
243       }                                                       \
244     } else {                                                  \
245       if (instr->InputAt(1)->IsRegister()) {                  \
246         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
247       } else {                                                \
248         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
249       }                                                       \
250     }                                                         \
251   } while (0)
252
253
254 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
255   do {                                                                     \
256     if (HasImmediateInput(instr, 1)) {                                     \
257       if (instr->Output()->IsRegister()) {                                 \
258         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
259       } else {                                                             \
260         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
261       }                                                                    \
262     } else {                                                               \
263       if (instr->Output()->IsRegister()) {                                 \
264         __ asm_instr##_cl(i.OutputRegister());                             \
265       } else {                                                             \
266         __ asm_instr##_cl(i.OutputOperand());                              \
267       }                                                                    \
268     }                                                                      \
269   } while (0)
270
271
272 #define ASSEMBLE_MOVX(asm_instr)                            \
273   do {                                                      \
274     if (instr->addressing_mode() != kMode_None) {           \
275       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
276     } else if (instr->InputAt(0)->IsRegister()) {           \
277       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
278     } else {                                                \
279       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
280     }                                                       \
281   } while (0)
282
283
284 #define ASSEMBLE_DOUBLE_BINOP(asm_instr)                                \
285   do {                                                                  \
286     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
287       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
288     } else {                                                            \
289       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
290     }                                                                   \
291   } while (0)
292
293
294 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
295   do {                                                                 \
296     CpuFeatureScope avx_scope(masm(), AVX);                            \
297     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
298       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
299                    i.InputDoubleRegister(1));                          \
300     } else {                                                           \
301       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
302                    i.InputOperand(1));                                 \
303     }                                                                  \
304   } while (0)
305
306
307 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
308   do {                                                                       \
309     auto result = i.OutputDoubleRegister();                                  \
310     auto buffer = i.InputRegister(0);                                        \
311     auto index1 = i.InputRegister(1);                                        \
312     auto index2 = i.InputInt32(2);                                           \
313     OutOfLineCode* ool;                                                      \
314     if (instr->InputAt(3)->IsRegister()) {                                   \
315       auto length = i.InputRegister(3);                                      \
316       DCHECK_EQ(0, index2);                                                  \
317       __ cmpl(index1, length);                                               \
318       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
319     } else {                                                                 \
320       auto length = i.InputInt32(3);                                         \
321       DCHECK_LE(index2, length);                                             \
322       __ cmpq(index1, Immediate(length - index2));                           \
323       class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
324        public:                                                               \
325         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
326                            Register buffer, Register index1, int32_t index2, \
327                            int32_t length)                                   \
328             : OutOfLineCode(gen),                                            \
329               result_(result),                                               \
330               buffer_(buffer),                                               \
331               index1_(index1),                                               \
332               index2_(index2),                                               \
333               length_(length) {}                                             \
334                                                                              \
335         void Generate() FINAL {                                              \
336           __ leal(kScratchRegister, Operand(index1_, index2_));              \
337           __ pcmpeqd(result_, result_);                                      \
338           __ cmpl(kScratchRegister, Immediate(length_));                     \
339           __ j(above_equal, exit());                                         \
340           __ asm_instr(result_,                                              \
341                        Operand(buffer_, kScratchRegister, times_1, 0));      \
342         }                                                                    \
343                                                                              \
344        private:                                                              \
345         XMMRegister const result_;                                           \
346         Register const buffer_;                                              \
347         Register const index1_;                                              \
348         int32_t const index2_;                                               \
349         int32_t const length_;                                               \
350       };                                                                     \
351       ool = new (zone())                                                     \
352           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
353     }                                                                        \
354     __ j(above_equal, ool->entry());                                         \
355     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
356     __ bind(ool->exit());                                                    \
357   } while (false)
358
359
360 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
361   do {                                                                         \
362     auto result = i.OutputRegister();                                          \
363     auto buffer = i.InputRegister(0);                                          \
364     auto index1 = i.InputRegister(1);                                          \
365     auto index2 = i.InputInt32(2);                                             \
366     OutOfLineCode* ool;                                                        \
367     if (instr->InputAt(3)->IsRegister()) {                                     \
368       auto length = i.InputRegister(3);                                        \
369       DCHECK_EQ(0, index2);                                                    \
370       __ cmpl(index1, length);                                                 \
371       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
372     } else {                                                                   \
373       auto length = i.InputInt32(3);                                           \
374       DCHECK_LE(index2, length);                                               \
375       __ cmpq(index1, Immediate(length - index2));                             \
376       class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
377        public:                                                                 \
378         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
379                              Register buffer, Register index1, int32_t index2, \
380                              int32_t length)                                   \
381             : OutOfLineCode(gen),                                              \
382               result_(result),                                                 \
383               buffer_(buffer),                                                 \
384               index1_(index1),                                                 \
385               index2_(index2),                                                 \
386               length_(length) {}                                               \
387                                                                                \
388         void Generate() FINAL {                                                \
389           Label oob;                                                           \
390           __ leal(kScratchRegister, Operand(index1_, index2_));                \
391           __ cmpl(kScratchRegister, Immediate(length_));                       \
392           __ j(above_equal, &oob, Label::kNear);                               \
393           __ asm_instr(result_,                                                \
394                        Operand(buffer_, kScratchRegister, times_1, 0));        \
395           __ jmp(exit());                                                      \
396           __ bind(&oob);                                                       \
397           __ xorl(result_, result_);                                           \
398         }                                                                      \
399                                                                                \
400        private:                                                                \
401         Register const result_;                                                \
402         Register const buffer_;                                                \
403         Register const index1_;                                                \
404         int32_t const index2_;                                                 \
405         int32_t const length_;                                                 \
406       };                                                                       \
407       ool = new (zone())                                                       \
408           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
409     }                                                                          \
410     __ j(above_equal, ool->entry());                                           \
411     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
412     __ bind(ool->exit());                                                      \
413   } while (false)
414
415
416 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
417   do {                                                                       \
418     auto buffer = i.InputRegister(0);                                        \
419     auto index1 = i.InputRegister(1);                                        \
420     auto index2 = i.InputInt32(2);                                           \
421     auto value = i.InputDoubleRegister(4);                                   \
422     if (instr->InputAt(3)->IsRegister()) {                                   \
423       auto length = i.InputRegister(3);                                      \
424       DCHECK_EQ(0, index2);                                                  \
425       Label done;                                                            \
426       __ cmpl(index1, length);                                               \
427       __ j(above_equal, &done, Label::kNear);                                \
428       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
429       __ bind(&done);                                                        \
430     } else {                                                                 \
431       auto length = i.InputInt32(3);                                         \
432       DCHECK_LE(index2, length);                                             \
433       __ cmpq(index1, Immediate(length - index2));                           \
434       class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
435        public:                                                               \
436         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
437                             Register index1, int32_t index2, int32_t length, \
438                             XMMRegister value)                               \
439             : OutOfLineCode(gen),                                            \
440               buffer_(buffer),                                               \
441               index1_(index1),                                               \
442               index2_(index2),                                               \
443               length_(length),                                               \
444               value_(value) {}                                               \
445                                                                              \
446         void Generate() FINAL {                                              \
447           __ leal(kScratchRegister, Operand(index1_, index2_));              \
448           __ cmpl(kScratchRegister, Immediate(length_));                     \
449           __ j(above_equal, exit());                                         \
450           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
451                        value_);                                              \
452         }                                                                    \
453                                                                              \
454        private:                                                              \
455         Register const buffer_;                                              \
456         Register const index1_;                                              \
457         int32_t const index2_;                                               \
458         int32_t const length_;                                               \
459         XMMRegister const value_;                                            \
460       };                                                                     \
461       auto ool = new (zone())                                                \
462           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
463       __ j(above_equal, ool->entry());                                       \
464       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
465       __ bind(ool->exit());                                                  \
466     }                                                                        \
467   } while (false)
468
469
470 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
471   do {                                                                         \
472     auto buffer = i.InputRegister(0);                                          \
473     auto index1 = i.InputRegister(1);                                          \
474     auto index2 = i.InputInt32(2);                                             \
475     if (instr->InputAt(3)->IsRegister()) {                                     \
476       auto length = i.InputRegister(3);                                        \
477       DCHECK_EQ(0, index2);                                                    \
478       Label done;                                                              \
479       __ cmpl(index1, length);                                                 \
480       __ j(above_equal, &done, Label::kNear);                                  \
481       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
482       __ bind(&done);                                                          \
483     } else {                                                                   \
484       auto length = i.InputInt32(3);                                           \
485       DCHECK_LE(index2, length);                                               \
486       __ cmpq(index1, Immediate(length - index2));                             \
487       class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
488        public:                                                                 \
489         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
490                               Register index1, int32_t index2, int32_t length, \
491                               Value value)                                     \
492             : OutOfLineCode(gen),                                              \
493               buffer_(buffer),                                                 \
494               index1_(index1),                                                 \
495               index2_(index2),                                                 \
496               length_(length),                                                 \
497               value_(value) {}                                                 \
498                                                                                \
499         void Generate() FINAL {                                                \
500           __ leal(kScratchRegister, Operand(index1_, index2_));                \
501           __ cmpl(kScratchRegister, Immediate(length_));                       \
502           __ j(above_equal, exit());                                           \
503           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
504                        value_);                                                \
505         }                                                                      \
506                                                                                \
507        private:                                                                \
508         Register const buffer_;                                                \
509         Register const index1_;                                                \
510         int32_t const index2_;                                                 \
511         int32_t const length_;                                                 \
512         Value const value_;                                                    \
513       };                                                                       \
514       auto ool = new (zone())                                                  \
515           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
516       __ j(above_equal, ool->entry());                                         \
517       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
518       __ bind(ool->exit());                                                    \
519     }                                                                          \
520   } while (false)
521
522
523 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
524   do {                                                           \
525     if (instr->InputAt(4)->IsRegister()) {                       \
526       Register value = i.InputRegister(4);                       \
527       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
528     } else {                                                     \
529       Immediate value = i.InputImmediate(4);                     \
530       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
531     }                                                            \
532   } while (false)
533
534
535 // Assembles an instruction after register allocation, producing machine code.
536 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
537   X64OperandConverter i(this, instr);
538
539   switch (ArchOpcodeField::decode(instr->opcode())) {
540     case kArchCallCodeObject: {
541       EnsureSpaceForLazyDeopt();
542       if (HasImmediateInput(instr, 0)) {
543         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
544         __ Call(code, RelocInfo::CODE_TARGET);
545       } else {
546         Register reg = i.InputRegister(0);
547         int entry = Code::kHeaderSize - kHeapObjectTag;
548         __ Call(Operand(reg, entry));
549       }
550       AddSafepointAndDeopt(instr);
551       break;
552     }
553     case kArchCallJSFunction: {
554       EnsureSpaceForLazyDeopt();
555       Register func = i.InputRegister(0);
556       if (FLAG_debug_code) {
557         // Check the function's context matches the context argument.
558         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
559         __ Assert(equal, kWrongFunctionContext);
560       }
561       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
562       AddSafepointAndDeopt(instr);
563       break;
564     }
565     case kArchJmp:
566       AssembleArchJump(i.InputRpo(0));
567       break;
568     case kArchLookupSwitch:
569       AssembleArchLookupSwitch(instr);
570       break;
571     case kArchTableSwitch:
572       AssembleArchTableSwitch(instr);
573       break;
574     case kArchNop:
575       // don't emit code for nops.
576       break;
577     case kArchRet:
578       AssembleReturn();
579       break;
580     case kArchStackPointer:
581       __ movq(i.OutputRegister(), rsp);
582       break;
583     case kArchTruncateDoubleToI: {
584       auto result = i.OutputRegister();
585       auto input = i.InputDoubleRegister(0);
586       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
587       __ cvttsd2siq(result, input);
588       __ cmpq(result, Immediate(1));
589       __ j(overflow, ool->entry());
590       __ bind(ool->exit());
591       break;
592     }
593     case kX64Add32:
594       ASSEMBLE_BINOP(addl);
595       break;
596     case kX64Add:
597       ASSEMBLE_BINOP(addq);
598       break;
599     case kX64Sub32:
600       ASSEMBLE_BINOP(subl);
601       break;
602     case kX64Sub:
603       ASSEMBLE_BINOP(subq);
604       break;
605     case kX64And32:
606       ASSEMBLE_BINOP(andl);
607       break;
608     case kX64And:
609       ASSEMBLE_BINOP(andq);
610       break;
611     case kX64Cmp32:
612       ASSEMBLE_CMP(cmpl, testl);
613       break;
614     case kX64Cmp:
615       ASSEMBLE_CMP(cmpq, testq);
616       break;
617     case kX64Test32:
618       ASSEMBLE_BINOP(testl);
619       break;
620     case kX64Test:
621       ASSEMBLE_BINOP(testq);
622       break;
623     case kX64Imul32:
624       ASSEMBLE_MULT(imull);
625       break;
626     case kX64Imul:
627       ASSEMBLE_MULT(imulq);
628       break;
629     case kX64ImulHigh32:
630       if (instr->InputAt(1)->IsRegister()) {
631         __ imull(i.InputRegister(1));
632       } else {
633         __ imull(i.InputOperand(1));
634       }
635       break;
636     case kX64UmulHigh32:
637       if (instr->InputAt(1)->IsRegister()) {
638         __ mull(i.InputRegister(1));
639       } else {
640         __ mull(i.InputOperand(1));
641       }
642       break;
643     case kX64Idiv32:
644       __ cdq();
645       __ idivl(i.InputRegister(1));
646       break;
647     case kX64Idiv:
648       __ cqo();
649       __ idivq(i.InputRegister(1));
650       break;
651     case kX64Udiv32:
652       __ xorl(rdx, rdx);
653       __ divl(i.InputRegister(1));
654       break;
655     case kX64Udiv:
656       __ xorq(rdx, rdx);
657       __ divq(i.InputRegister(1));
658       break;
659     case kX64Not:
660       ASSEMBLE_UNOP(notq);
661       break;
662     case kX64Not32:
663       ASSEMBLE_UNOP(notl);
664       break;
665     case kX64Neg:
666       ASSEMBLE_UNOP(negq);
667       break;
668     case kX64Neg32:
669       ASSEMBLE_UNOP(negl);
670       break;
671     case kX64Or32:
672       ASSEMBLE_BINOP(orl);
673       break;
674     case kX64Or:
675       ASSEMBLE_BINOP(orq);
676       break;
677     case kX64Xor32:
678       ASSEMBLE_BINOP(xorl);
679       break;
680     case kX64Xor:
681       ASSEMBLE_BINOP(xorq);
682       break;
683     case kX64Shl32:
684       ASSEMBLE_SHIFT(shll, 5);
685       break;
686     case kX64Shl:
687       ASSEMBLE_SHIFT(shlq, 6);
688       break;
689     case kX64Shr32:
690       ASSEMBLE_SHIFT(shrl, 5);
691       break;
692     case kX64Shr:
693       ASSEMBLE_SHIFT(shrq, 6);
694       break;
695     case kX64Sar32:
696       ASSEMBLE_SHIFT(sarl, 5);
697       break;
698     case kX64Sar:
699       ASSEMBLE_SHIFT(sarq, 6);
700       break;
701     case kX64Ror32:
702       ASSEMBLE_SHIFT(rorl, 5);
703       break;
704     case kX64Ror:
705       ASSEMBLE_SHIFT(rorq, 6);
706       break;
707     case kSSEFloat64Cmp:
708       ASSEMBLE_DOUBLE_BINOP(ucomisd);
709       break;
710     case kSSEFloat64Add:
711       ASSEMBLE_DOUBLE_BINOP(addsd);
712       break;
713     case kSSEFloat64Sub:
714       ASSEMBLE_DOUBLE_BINOP(subsd);
715       break;
716     case kSSEFloat64Mul:
717       ASSEMBLE_DOUBLE_BINOP(mulsd);
718       break;
719     case kSSEFloat64Div:
720       ASSEMBLE_DOUBLE_BINOP(divsd);
721       break;
722     case kSSEFloat64Mod: {
723       __ subq(rsp, Immediate(kDoubleSize));
724       // Move values to st(0) and st(1).
725       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
726       __ fld_d(Operand(rsp, 0));
727       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
728       __ fld_d(Operand(rsp, 0));
729       // Loop while fprem isn't done.
730       Label mod_loop;
731       __ bind(&mod_loop);
732       // This instructions traps on all kinds inputs, but we are assuming the
733       // floating point control word is set to ignore them all.
734       __ fprem();
735       // The following 2 instruction implicitly use rax.
736       __ fnstsw_ax();
737       if (CpuFeatures::IsSupported(SAHF)) {
738         CpuFeatureScope sahf_scope(masm(), SAHF);
739         __ sahf();
740       } else {
741         __ shrl(rax, Immediate(8));
742         __ andl(rax, Immediate(0xFF));
743         __ pushq(rax);
744         __ popfq();
745       }
746       __ j(parity_even, &mod_loop);
747       // Move output to stack and clean up.
748       __ fstp(1);
749       __ fstp_d(Operand(rsp, 0));
750       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
751       __ addq(rsp, Immediate(kDoubleSize));
752       break;
753     }
754     case kSSEFloat64Sqrt:
755       if (instr->InputAt(0)->IsDoubleRegister()) {
756         __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
757       } else {
758         __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
759       }
760       break;
761     case kSSEFloat64Floor: {
762       CpuFeatureScope sse_scope(masm(), SSE4_1);
763       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
764                  v8::internal::Assembler::kRoundDown);
765       break;
766     }
767     case kSSEFloat64Ceil: {
768       CpuFeatureScope sse_scope(masm(), SSE4_1);
769       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
770                  v8::internal::Assembler::kRoundUp);
771       break;
772     }
773     case kSSEFloat64RoundTruncate: {
774       CpuFeatureScope sse_scope(masm(), SSE4_1);
775       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
776                  v8::internal::Assembler::kRoundToZero);
777       break;
778     }
779     case kSSECvtss2sd:
780       if (instr->InputAt(0)->IsDoubleRegister()) {
781         __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
782       } else {
783         __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
784       }
785       break;
786     case kSSECvtsd2ss:
787       if (instr->InputAt(0)->IsDoubleRegister()) {
788         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
789       } else {
790         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
791       }
792       break;
793     case kSSEFloat64ToInt32:
794       if (instr->InputAt(0)->IsDoubleRegister()) {
795         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
796       } else {
797         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
798       }
799       break;
800     case kSSEFloat64ToUint32: {
801       if (instr->InputAt(0)->IsDoubleRegister()) {
802         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
803       } else {
804         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
805       }
806       __ AssertZeroExtended(i.OutputRegister());
807       break;
808     }
809     case kSSEInt32ToFloat64:
810       if (instr->InputAt(0)->IsRegister()) {
811         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
812       } else {
813         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
814       }
815       break;
816     case kSSEUint32ToFloat64:
817       if (instr->InputAt(0)->IsRegister()) {
818         __ movl(kScratchRegister, i.InputRegister(0));
819       } else {
820         __ movl(kScratchRegister, i.InputOperand(0));
821       }
822       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
823       break;
824     case kAVXFloat64Add:
825       ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
826       break;
827     case kAVXFloat64Sub:
828       ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
829       break;
830     case kAVXFloat64Mul:
831       ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
832       break;
833     case kAVXFloat64Div:
834       ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
835       break;
836     case kX64Movsxbl:
837       ASSEMBLE_MOVX(movsxbl);
838       __ AssertZeroExtended(i.OutputRegister());
839       break;
840     case kX64Movzxbl:
841       ASSEMBLE_MOVX(movzxbl);
842       __ AssertZeroExtended(i.OutputRegister());
843       break;
844     case kX64Movb: {
845       int index = 0;
846       Operand operand = i.MemoryOperand(&index);
847       if (HasImmediateInput(instr, index)) {
848         __ movb(operand, Immediate(i.InputInt8(index)));
849       } else {
850         __ movb(operand, i.InputRegister(index));
851       }
852       break;
853     }
854     case kX64Movsxwl:
855       ASSEMBLE_MOVX(movsxwl);
856       __ AssertZeroExtended(i.OutputRegister());
857       break;
858     case kX64Movzxwl:
859       ASSEMBLE_MOVX(movzxwl);
860       __ AssertZeroExtended(i.OutputRegister());
861       break;
862     case kX64Movw: {
863       int index = 0;
864       Operand operand = i.MemoryOperand(&index);
865       if (HasImmediateInput(instr, index)) {
866         __ movw(operand, Immediate(i.InputInt16(index)));
867       } else {
868         __ movw(operand, i.InputRegister(index));
869       }
870       break;
871     }
872     case kX64Movl:
873       if (instr->HasOutput()) {
874         if (instr->addressing_mode() == kMode_None) {
875           if (instr->InputAt(0)->IsRegister()) {
876             __ movl(i.OutputRegister(), i.InputRegister(0));
877           } else {
878             __ movl(i.OutputRegister(), i.InputOperand(0));
879           }
880         } else {
881           __ movl(i.OutputRegister(), i.MemoryOperand());
882         }
883         __ AssertZeroExtended(i.OutputRegister());
884       } else {
885         int index = 0;
886         Operand operand = i.MemoryOperand(&index);
887         if (HasImmediateInput(instr, index)) {
888           __ movl(operand, i.InputImmediate(index));
889         } else {
890           __ movl(operand, i.InputRegister(index));
891         }
892       }
893       break;
894     case kX64Movsxlq:
895       ASSEMBLE_MOVX(movsxlq);
896       break;
897     case kX64Movq:
898       if (instr->HasOutput()) {
899         __ movq(i.OutputRegister(), i.MemoryOperand());
900       } else {
901         int index = 0;
902         Operand operand = i.MemoryOperand(&index);
903         if (HasImmediateInput(instr, index)) {
904           __ movq(operand, i.InputImmediate(index));
905         } else {
906           __ movq(operand, i.InputRegister(index));
907         }
908       }
909       break;
910     case kX64Movss:
911       if (instr->HasOutput()) {
912         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
913       } else {
914         int index = 0;
915         Operand operand = i.MemoryOperand(&index);
916         __ movss(operand, i.InputDoubleRegister(index));
917       }
918       break;
919     case kX64Movsd:
920       if (instr->HasOutput()) {
921         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
922       } else {
923         int index = 0;
924         Operand operand = i.MemoryOperand(&index);
925         __ movsd(operand, i.InputDoubleRegister(index));
926       }
927       break;
928     case kX64Lea32: {
929       AddressingMode mode = AddressingModeField::decode(instr->opcode());
930       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
931       // and addressing mode just happens to work out. The "addl"/"subl" forms
932       // in these cases are faster based on measurements.
933       if (i.InputRegister(0).is(i.OutputRegister())) {
934         if (mode == kMode_MRI) {
935           int32_t constant_summand = i.InputInt32(1);
936           if (constant_summand > 0) {
937             __ addl(i.OutputRegister(), Immediate(constant_summand));
938           } else if (constant_summand < 0) {
939             __ subl(i.OutputRegister(), Immediate(-constant_summand));
940           }
941         } else if (mode == kMode_MR1) {
942           if (i.InputRegister(1).is(i.OutputRegister())) {
943             __ shll(i.OutputRegister(), Immediate(1));
944           } else {
945             __ leal(i.OutputRegister(), i.MemoryOperand());
946           }
947         } else if (mode == kMode_M2) {
948           __ shll(i.OutputRegister(), Immediate(1));
949         } else if (mode == kMode_M4) {
950           __ shll(i.OutputRegister(), Immediate(2));
951         } else if (mode == kMode_M8) {
952           __ shll(i.OutputRegister(), Immediate(3));
953         } else {
954           __ leal(i.OutputRegister(), i.MemoryOperand());
955         }
956       } else {
957         __ leal(i.OutputRegister(), i.MemoryOperand());
958       }
959       __ AssertZeroExtended(i.OutputRegister());
960       break;
961     }
962     case kX64Lea:
963       __ leaq(i.OutputRegister(), i.MemoryOperand());
964       break;
965     case kX64Dec32:
966       __ decl(i.OutputRegister());
967       break;
968     case kX64Inc32:
969       __ incl(i.OutputRegister());
970       break;
971     case kX64Push:
972       if (HasImmediateInput(instr, 0)) {
973         __ pushq(i.InputImmediate(0));
974       } else {
975         if (instr->InputAt(0)->IsRegister()) {
976           __ pushq(i.InputRegister(0));
977         } else {
978           __ pushq(i.InputOperand(0));
979         }
980       }
981       break;
982     case kX64StoreWriteBarrier: {
983       Register object = i.InputRegister(0);
984       Register index = i.InputRegister(1);
985       Register value = i.InputRegister(2);
986       __ movq(Operand(object, index, times_1, 0), value);
987       __ leaq(index, Operand(object, index, times_1, 0));
988       SaveFPRegsMode mode =
989           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
990       __ RecordWrite(object, index, value, mode);
991       break;
992     }
993     case kCheckedLoadInt8:
994       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
995       break;
996     case kCheckedLoadUint8:
997       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
998       break;
999     case kCheckedLoadInt16:
1000       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1001       break;
1002     case kCheckedLoadUint16:
1003       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1004       break;
1005     case kCheckedLoadWord32:
1006       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1007       break;
1008     case kCheckedLoadFloat32:
1009       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1010       break;
1011     case kCheckedLoadFloat64:
1012       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1013       break;
1014     case kCheckedStoreWord8:
1015       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1016       break;
1017     case kCheckedStoreWord16:
1018       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1019       break;
1020     case kCheckedStoreWord32:
1021       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1022       break;
1023     case kCheckedStoreFloat32:
1024       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1025       break;
1026     case kCheckedStoreFloat64:
1027       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1028       break;
1029   }
1030 }
1031
1032
1033 // Assembles branches after this instruction.
1034 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1035   X64OperandConverter i(this, instr);
1036   Label::Distance flabel_distance =
1037       branch->fallthru ? Label::kNear : Label::kFar;
1038   Label* tlabel = branch->true_label;
1039   Label* flabel = branch->false_label;
1040   switch (branch->condition) {
1041     case kUnorderedEqual:
1042       __ j(parity_even, flabel, flabel_distance);
1043     // Fall through.
1044     case kEqual:
1045       __ j(equal, tlabel);
1046       break;
1047     case kUnorderedNotEqual:
1048       __ j(parity_even, tlabel);
1049     // Fall through.
1050     case kNotEqual:
1051       __ j(not_equal, tlabel);
1052       break;
1053     case kSignedLessThan:
1054       __ j(less, tlabel);
1055       break;
1056     case kSignedGreaterThanOrEqual:
1057       __ j(greater_equal, tlabel);
1058       break;
1059     case kSignedLessThanOrEqual:
1060       __ j(less_equal, tlabel);
1061       break;
1062     case kSignedGreaterThan:
1063       __ j(greater, tlabel);
1064       break;
1065     case kUnsignedLessThan:
1066       __ j(below, tlabel);
1067       break;
1068     case kUnsignedGreaterThanOrEqual:
1069       __ j(above_equal, tlabel);
1070       break;
1071     case kUnsignedLessThanOrEqual:
1072       __ j(below_equal, tlabel);
1073       break;
1074     case kUnsignedGreaterThan:
1075       __ j(above, tlabel);
1076       break;
1077     case kOverflow:
1078       __ j(overflow, tlabel);
1079       break;
1080     case kNotOverflow:
1081       __ j(no_overflow, tlabel);
1082       break;
1083   }
1084   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1085 }
1086
1087
1088 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1089   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1090 }
1091
1092
1093 // Assembles boolean materializations after this instruction.
1094 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1095                                         FlagsCondition condition) {
1096   X64OperandConverter i(this, instr);
1097   Label done;
1098
1099   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1100   // last output of the instruction.
1101   Label check;
1102   DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
1103   Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
1104   Condition cc = no_condition;
1105   switch (condition) {
1106     case kUnorderedEqual:
1107       __ j(parity_odd, &check, Label::kNear);
1108       __ movl(reg, Immediate(0));
1109       __ jmp(&done, Label::kNear);
1110     // Fall through.
1111     case kEqual:
1112       cc = equal;
1113       break;
1114     case kUnorderedNotEqual:
1115       __ j(parity_odd, &check, Label::kNear);
1116       __ movl(reg, Immediate(1));
1117       __ jmp(&done, Label::kNear);
1118     // Fall through.
1119     case kNotEqual:
1120       cc = not_equal;
1121       break;
1122     case kSignedLessThan:
1123       cc = less;
1124       break;
1125     case kSignedGreaterThanOrEqual:
1126       cc = greater_equal;
1127       break;
1128     case kSignedLessThanOrEqual:
1129       cc = less_equal;
1130       break;
1131     case kSignedGreaterThan:
1132       cc = greater;
1133       break;
1134     case kUnsignedLessThan:
1135       cc = below;
1136       break;
1137     case kUnsignedGreaterThanOrEqual:
1138       cc = above_equal;
1139       break;
1140     case kUnsignedLessThanOrEqual:
1141       cc = below_equal;
1142       break;
1143     case kUnsignedGreaterThan:
1144       cc = above;
1145       break;
1146     case kOverflow:
1147       cc = overflow;
1148       break;
1149     case kNotOverflow:
1150       cc = no_overflow;
1151       break;
1152   }
1153   __ bind(&check);
1154   __ setcc(cc, reg);
1155   __ movzxbl(reg, reg);
1156   __ bind(&done);
1157 }
1158
1159
1160 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1161   X64OperandConverter i(this, instr);
1162   Register input = i.InputRegister(0);
1163   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1164     __ cmpl(input, Immediate(i.InputInt32(static_cast<int>(index + 0))));
1165     __ j(equal, GetLabel(i.InputRpo(static_cast<int>(index + 1))));
1166   }
1167   AssembleArchJump(i.InputRpo(1));
1168 }
1169
1170
1171 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1172   X64OperandConverter i(this, instr);
1173   Register input = i.InputRegister(0);
1174   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1175   Label** cases = zone()->NewArray<Label*>(case_count);
1176   for (int32_t index = 0; index < case_count; ++index) {
1177     cases[index] = GetLabel(i.InputRpo(index + 2));
1178   }
1179   Label* const table = AddJumpTable(cases, case_count);
1180   __ cmpl(input, Immediate(case_count));
1181   __ j(above_equal, GetLabel(i.InputRpo(1)));
1182   __ leaq(kScratchRegister, Operand(table));
1183   __ jmp(Operand(kScratchRegister, input, times_8, 0));
1184 }
1185
1186
1187 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1188   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1189       isolate(), deoptimization_id, Deoptimizer::LAZY);
1190   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1191 }
1192
1193
1194 void CodeGenerator::AssemblePrologue() {
1195   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1196   int stack_slots = frame()->GetSpillSlotCount();
1197   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1198     __ pushq(rbp);
1199     __ movq(rbp, rsp);
1200     const RegList saves = descriptor->CalleeSavedRegisters();
1201     if (saves != 0) {  // Save callee-saved registers.
1202       int register_save_area_size = 0;
1203       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1204         if (!((1 << i) & saves)) continue;
1205         __ pushq(Register::from_code(i));
1206         register_save_area_size += kPointerSize;
1207       }
1208       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1209     }
1210   } else if (descriptor->IsJSFunctionCall()) {
1211     CompilationInfo* info = this->info();
1212     __ Prologue(info->IsCodePreAgingActive());
1213     frame()->SetRegisterSaveAreaSize(
1214         StandardFrameConstants::kFixedFrameSizeFromFp);
1215   } else if (stack_slots > 0) {
1216     __ StubPrologue();
1217     frame()->SetRegisterSaveAreaSize(
1218         StandardFrameConstants::kFixedFrameSizeFromFp);
1219   }
1220
1221   if (info()->is_osr()) {
1222     // TurboFan OSR-compiled functions cannot be entered directly.
1223     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1224
1225     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1226     // frame is still on the stack. Optimized code uses OSR values directly from
1227     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1228     // remaining stack slots.
1229     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1230     osr_pc_offset_ = __ pc_offset();
1231     DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1232     stack_slots -= frame()->GetOsrStackSlotCount();
1233   }
1234
1235   if (stack_slots > 0) {
1236     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1237   }
1238 }
1239
1240
1241 void CodeGenerator::AssembleReturn() {
1242   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1243   int stack_slots = frame()->GetSpillSlotCount();
1244   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1245     if (frame()->GetRegisterSaveAreaSize() > 0) {
1246       // Remove this frame's spill slots first.
1247       if (stack_slots > 0) {
1248         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1249       }
1250       const RegList saves = descriptor->CalleeSavedRegisters();
1251       // Restore registers.
1252       if (saves != 0) {
1253         for (int i = 0; i < Register::kNumRegisters; i++) {
1254           if (!((1 << i) & saves)) continue;
1255           __ popq(Register::from_code(i));
1256         }
1257       }
1258       __ popq(rbp);  // Pop caller's frame pointer.
1259       __ ret(0);
1260     } else {
1261       // No saved registers.
1262       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1263       __ popq(rbp);       // Pop caller's frame pointer.
1264       __ ret(0);
1265     }
1266   } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1267     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1268     __ popq(rbp);       // Pop caller's frame pointer.
1269     int pop_count = descriptor->IsJSFunctionCall()
1270                         ? static_cast<int>(descriptor->JSParameterCount())
1271                         : 0;
1272     __ ret(pop_count * kPointerSize);
1273   } else {
1274     __ ret(0);
1275   }
1276 }
1277
1278
1279 void CodeGenerator::AssembleMove(InstructionOperand* source,
1280                                  InstructionOperand* destination) {
1281   X64OperandConverter g(this, NULL);
1282   // Dispatch on the source and destination operand kinds.  Not all
1283   // combinations are possible.
1284   if (source->IsRegister()) {
1285     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1286     Register src = g.ToRegister(source);
1287     if (destination->IsRegister()) {
1288       __ movq(g.ToRegister(destination), src);
1289     } else {
1290       __ movq(g.ToOperand(destination), src);
1291     }
1292   } else if (source->IsStackSlot()) {
1293     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1294     Operand src = g.ToOperand(source);
1295     if (destination->IsRegister()) {
1296       Register dst = g.ToRegister(destination);
1297       __ movq(dst, src);
1298     } else {
1299       // Spill on demand to use a temporary register for memory-to-memory
1300       // moves.
1301       Register tmp = kScratchRegister;
1302       Operand dst = g.ToOperand(destination);
1303       __ movq(tmp, src);
1304       __ movq(dst, tmp);
1305     }
1306   } else if (source->IsConstant()) {
1307     ConstantOperand* constant_source = ConstantOperand::cast(source);
1308     Constant src = g.ToConstant(constant_source);
1309     if (destination->IsRegister() || destination->IsStackSlot()) {
1310       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1311                                                : kScratchRegister;
1312       switch (src.type()) {
1313         case Constant::kInt32:
1314           // TODO(dcarney): don't need scratch in this case.
1315           __ Set(dst, src.ToInt32());
1316           break;
1317         case Constant::kInt64:
1318           __ Set(dst, src.ToInt64());
1319           break;
1320         case Constant::kFloat32:
1321           __ Move(dst,
1322                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1323           break;
1324         case Constant::kFloat64:
1325           __ Move(dst,
1326                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1327           break;
1328         case Constant::kExternalReference:
1329           __ Move(dst, src.ToExternalReference());
1330           break;
1331         case Constant::kHeapObject:
1332           __ Move(dst, src.ToHeapObject());
1333           break;
1334         case Constant::kRpoNumber:
1335           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1336           break;
1337       }
1338       if (destination->IsStackSlot()) {
1339         __ movq(g.ToOperand(destination), kScratchRegister);
1340       }
1341     } else if (src.type() == Constant::kFloat32) {
1342       // TODO(turbofan): Can we do better here?
1343       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1344       if (destination->IsDoubleRegister()) {
1345         __ Move(g.ToDoubleRegister(destination), src_const);
1346       } else {
1347         DCHECK(destination->IsDoubleStackSlot());
1348         Operand dst = g.ToOperand(destination);
1349         __ movl(dst, Immediate(src_const));
1350       }
1351     } else {
1352       DCHECK_EQ(Constant::kFloat64, src.type());
1353       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1354       if (destination->IsDoubleRegister()) {
1355         __ Move(g.ToDoubleRegister(destination), src_const);
1356       } else {
1357         DCHECK(destination->IsDoubleStackSlot());
1358         __ movq(kScratchRegister, src_const);
1359         __ movq(g.ToOperand(destination), kScratchRegister);
1360       }
1361     }
1362   } else if (source->IsDoubleRegister()) {
1363     XMMRegister src = g.ToDoubleRegister(source);
1364     if (destination->IsDoubleRegister()) {
1365       XMMRegister dst = g.ToDoubleRegister(destination);
1366       __ movsd(dst, src);
1367     } else {
1368       DCHECK(destination->IsDoubleStackSlot());
1369       Operand dst = g.ToOperand(destination);
1370       __ movsd(dst, src);
1371     }
1372   } else if (source->IsDoubleStackSlot()) {
1373     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1374     Operand src = g.ToOperand(source);
1375     if (destination->IsDoubleRegister()) {
1376       XMMRegister dst = g.ToDoubleRegister(destination);
1377       __ movsd(dst, src);
1378     } else {
1379       // We rely on having xmm0 available as a fixed scratch register.
1380       Operand dst = g.ToOperand(destination);
1381       __ movsd(xmm0, src);
1382       __ movsd(dst, xmm0);
1383     }
1384   } else {
1385     UNREACHABLE();
1386   }
1387 }
1388
1389
1390 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1391                                  InstructionOperand* destination) {
1392   X64OperandConverter g(this, NULL);
1393   // Dispatch on the source and destination operand kinds.  Not all
1394   // combinations are possible.
1395   if (source->IsRegister() && destination->IsRegister()) {
1396     // Register-register.
1397     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1398   } else if (source->IsRegister() && destination->IsStackSlot()) {
1399     Register src = g.ToRegister(source);
1400     Operand dst = g.ToOperand(destination);
1401     __ xchgq(src, dst);
1402   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1403              (source->IsDoubleStackSlot() &&
1404               destination->IsDoubleStackSlot())) {
1405     // Memory-memory.
1406     Register tmp = kScratchRegister;
1407     Operand src = g.ToOperand(source);
1408     Operand dst = g.ToOperand(destination);
1409     __ movq(tmp, dst);
1410     __ xchgq(tmp, src);
1411     __ movq(dst, tmp);
1412   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1413     // XMM register-register swap. We rely on having xmm0
1414     // available as a fixed scratch register.
1415     XMMRegister src = g.ToDoubleRegister(source);
1416     XMMRegister dst = g.ToDoubleRegister(destination);
1417     __ movsd(xmm0, src);
1418     __ movsd(src, dst);
1419     __ movsd(dst, xmm0);
1420   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1421     // XMM register-memory swap.  We rely on having xmm0
1422     // available as a fixed scratch register.
1423     XMMRegister src = g.ToDoubleRegister(source);
1424     Operand dst = g.ToOperand(destination);
1425     __ movsd(xmm0, src);
1426     __ movsd(src, dst);
1427     __ movsd(dst, xmm0);
1428   } else {
1429     // No other combinations are possible.
1430     UNREACHABLE();
1431   }
1432 }
1433
1434
1435 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1436   for (size_t index = 0; index < target_count; ++index) {
1437     __ dq(targets[index]);
1438   }
1439 }
1440
1441
1442 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1443
1444
1445 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1446   int space_needed = Deoptimizer::patch_size();
1447   if (!info()->IsStub()) {
1448     // Ensure that we have enough space after the previous lazy-bailout
1449     // instruction for patching the code here.
1450     int current_pc = masm()->pc_offset();
1451     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1452       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1453       __ Nop(padding_size);
1454     }
1455   }
1456   MarkLazyDeoptSite();
1457 }
1458
1459 #undef __
1460
1461 }  // namespace internal
1462 }  // namespace compiler
1463 }  // namespace v8