[turbofan] Improve code generation for unordered comparisons.
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/compiler/osr.h"
12 #include "src/scopes.h"
13 #include "src/x64/assembler-x64.h"
14 #include "src/x64/macro-assembler-x64.h"
15
16 namespace v8 {
17 namespace internal {
18 namespace compiler {
19
20 #define __ masm()->
21
22
23 // Adds X64 specific methods for decoding operands.
24 class X64OperandConverter : public InstructionOperandConverter {
25  public:
26   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
27       : InstructionOperandConverter(gen, instr) {}
28
29   Immediate InputImmediate(int index) {
30     return ToImmediate(instr_->InputAt(index));
31   }
32
33   Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
34
35   Operand OutputOperand() { return ToOperand(instr_->Output()); }
36
37   Immediate ToImmediate(InstructionOperand* operand) {
38     return Immediate(ToConstant(operand).ToInt32());
39   }
40
41   Operand ToOperand(InstructionOperand* op, int extra = 0) {
42     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
43     // The linkage computes where all spill slots are located.
44     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
45     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
46   }
47
48   static int NextOffset(int* offset) {
49     int i = *offset;
50     (*offset)++;
51     return i;
52   }
53
54   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
55     STATIC_ASSERT(0 == static_cast<int>(times_1));
56     STATIC_ASSERT(1 == static_cast<int>(times_2));
57     STATIC_ASSERT(2 == static_cast<int>(times_4));
58     STATIC_ASSERT(3 == static_cast<int>(times_8));
59     int scale = static_cast<int>(mode - one);
60     DCHECK(scale >= 0 && scale < 4);
61     return static_cast<ScaleFactor>(scale);
62   }
63
64   Operand MemoryOperand(int* offset) {
65     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
66     switch (mode) {
67       case kMode_MR: {
68         Register base = InputRegister(NextOffset(offset));
69         int32_t disp = 0;
70         return Operand(base, disp);
71       }
72       case kMode_MRI: {
73         Register base = InputRegister(NextOffset(offset));
74         int32_t disp = InputInt32(NextOffset(offset));
75         return Operand(base, disp);
76       }
77       case kMode_MR1:
78       case kMode_MR2:
79       case kMode_MR4:
80       case kMode_MR8: {
81         Register base = InputRegister(NextOffset(offset));
82         Register index = InputRegister(NextOffset(offset));
83         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
84         int32_t disp = 0;
85         return Operand(base, index, scale, disp);
86       }
87       case kMode_MR1I:
88       case kMode_MR2I:
89       case kMode_MR4I:
90       case kMode_MR8I: {
91         Register base = InputRegister(NextOffset(offset));
92         Register index = InputRegister(NextOffset(offset));
93         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
94         int32_t disp = InputInt32(NextOffset(offset));
95         return Operand(base, index, scale, disp);
96       }
97       case kMode_M1: {
98         Register base = InputRegister(NextOffset(offset));
99         int32_t disp = 0;
100         return Operand(base, disp);
101       }
102       case kMode_M2:
103         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
104         return Operand(no_reg, 0);
105       case kMode_M4:
106       case kMode_M8: {
107         Register index = InputRegister(NextOffset(offset));
108         ScaleFactor scale = ScaleFor(kMode_M1, mode);
109         int32_t disp = 0;
110         return Operand(index, scale, disp);
111       }
112       case kMode_M1I:
113       case kMode_M2I:
114       case kMode_M4I:
115       case kMode_M8I: {
116         Register index = InputRegister(NextOffset(offset));
117         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
118         int32_t disp = InputInt32(NextOffset(offset));
119         return Operand(index, scale, disp);
120       }
121       case kMode_None:
122         UNREACHABLE();
123         return Operand(no_reg, 0);
124     }
125     UNREACHABLE();
126     return Operand(no_reg, 0);
127   }
128
129   Operand MemoryOperand(int first_input = 0) {
130     return MemoryOperand(&first_input);
131   }
132 };
133
134
135 namespace {
136
137 bool HasImmediateInput(Instruction* instr, int index) {
138   return instr->InputAt(index)->IsImmediate();
139 }
140
141
142 class OutOfLineLoadZero FINAL : public OutOfLineCode {
143  public:
144   OutOfLineLoadZero(CodeGenerator* gen, Register result)
145       : OutOfLineCode(gen), result_(result) {}
146
147   void Generate() FINAL { __ xorl(result_, result_); }
148
149  private:
150   Register const result_;
151 };
152
153
154 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
155  public:
156   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
157       : OutOfLineCode(gen), result_(result) {}
158
159   void Generate() FINAL { __ pcmpeqd(result_, result_); }
160
161  private:
162   XMMRegister const result_;
163 };
164
165
166 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
167  public:
168   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
169                              XMMRegister input)
170       : OutOfLineCode(gen), result_(result), input_(input) {}
171
172   void Generate() FINAL {
173     __ subp(rsp, Immediate(kDoubleSize));
174     __ movsd(MemOperand(rsp, 0), input_);
175     __ SlowTruncateToI(result_, rsp, 0);
176     __ addp(rsp, Immediate(kDoubleSize));
177   }
178
179  private:
180   Register const result_;
181   XMMRegister const input_;
182 };
183
184 }  // namespace
185
186
187 #define ASSEMBLE_UNOP(asm_instr)         \
188   do {                                   \
189     if (instr->Output()->IsRegister()) { \
190       __ asm_instr(i.OutputRegister());  \
191     } else {                             \
192       __ asm_instr(i.OutputOperand());   \
193     }                                    \
194   } while (0)
195
196
197 #define ASSEMBLE_BINOP(asm_instr)                              \
198   do {                                                         \
199     if (HasImmediateInput(instr, 1)) {                         \
200       if (instr->InputAt(0)->IsRegister()) {                   \
201         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
202       } else {                                                 \
203         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
204       }                                                        \
205     } else {                                                   \
206       if (instr->InputAt(1)->IsRegister()) {                   \
207         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
208       } else {                                                 \
209         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
210       }                                                        \
211     }                                                          \
212   } while (0)
213
214
215 #define ASSEMBLE_MULT(asm_instr)                              \
216   do {                                                        \
217     if (HasImmediateInput(instr, 1)) {                        \
218       if (instr->InputAt(0)->IsRegister()) {                  \
219         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
220                      i.InputImmediate(1));                    \
221       } else {                                                \
222         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
223                      i.InputImmediate(1));                    \
224       }                                                       \
225     } else {                                                  \
226       if (instr->InputAt(1)->IsRegister()) {                  \
227         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
228       } else {                                                \
229         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
230       }                                                       \
231     }                                                         \
232   } while (0)
233
234
235 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
236   do {                                                                     \
237     if (HasImmediateInput(instr, 1)) {                                     \
238       if (instr->Output()->IsRegister()) {                                 \
239         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
240       } else {                                                             \
241         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
242       }                                                                    \
243     } else {                                                               \
244       if (instr->Output()->IsRegister()) {                                 \
245         __ asm_instr##_cl(i.OutputRegister());                             \
246       } else {                                                             \
247         __ asm_instr##_cl(i.OutputOperand());                              \
248       }                                                                    \
249     }                                                                      \
250   } while (0)
251
252
253 #define ASSEMBLE_DOUBLE_BINOP(asm_instr)                                \
254   do {                                                                  \
255     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
256       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
257     } else {                                                            \
258       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
259     }                                                                   \
260   } while (0)
261
262
263 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
264   do {                                                                 \
265     CpuFeatureScope avx_scope(masm(), AVX);                            \
266     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
267       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
268                    i.InputDoubleRegister(1));                          \
269     } else {                                                           \
270       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
271                    i.InputOperand(1));                                 \
272     }                                                                  \
273   } while (0)
274
275
276 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
277   do {                                                                       \
278     auto result = i.OutputDoubleRegister();                                  \
279     auto buffer = i.InputRegister(0);                                        \
280     auto index1 = i.InputRegister(1);                                        \
281     auto index2 = i.InputInt32(2);                                           \
282     OutOfLineCode* ool;                                                      \
283     if (instr->InputAt(3)->IsRegister()) {                                   \
284       auto length = i.InputRegister(3);                                      \
285       DCHECK_EQ(0, index2);                                                  \
286       __ cmpl(index1, length);                                               \
287       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
288     } else {                                                                 \
289       auto length = i.InputInt32(3);                                         \
290       DCHECK_LE(index2, length);                                             \
291       __ cmpq(index1, Immediate(length - index2));                           \
292       class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
293        public:                                                               \
294         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
295                            Register buffer, Register index1, int32_t index2, \
296                            int32_t length)                                   \
297             : OutOfLineCode(gen),                                            \
298               result_(result),                                               \
299               buffer_(buffer),                                               \
300               index1_(index1),                                               \
301               index2_(index2),                                               \
302               length_(length) {}                                             \
303                                                                              \
304         void Generate() FINAL {                                              \
305           __ leal(kScratchRegister, Operand(index1_, index2_));              \
306           __ pcmpeqd(result_, result_);                                      \
307           __ cmpl(kScratchRegister, Immediate(length_));                     \
308           __ j(above_equal, exit());                                         \
309           __ asm_instr(result_,                                              \
310                        Operand(buffer_, kScratchRegister, times_1, 0));      \
311         }                                                                    \
312                                                                              \
313        private:                                                              \
314         XMMRegister const result_;                                           \
315         Register const buffer_;                                              \
316         Register const index1_;                                              \
317         int32_t const index2_;                                               \
318         int32_t const length_;                                               \
319       };                                                                     \
320       ool = new (zone())                                                     \
321           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
322     }                                                                        \
323     __ j(above_equal, ool->entry());                                         \
324     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
325     __ bind(ool->exit());                                                    \
326   } while (false)
327
328
329 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
330   do {                                                                         \
331     auto result = i.OutputRegister();                                          \
332     auto buffer = i.InputRegister(0);                                          \
333     auto index1 = i.InputRegister(1);                                          \
334     auto index2 = i.InputInt32(2);                                             \
335     OutOfLineCode* ool;                                                        \
336     if (instr->InputAt(3)->IsRegister()) {                                     \
337       auto length = i.InputRegister(3);                                        \
338       DCHECK_EQ(0, index2);                                                    \
339       __ cmpl(index1, length);                                                 \
340       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
341     } else {                                                                   \
342       auto length = i.InputInt32(3);                                           \
343       DCHECK_LE(index2, length);                                               \
344       __ cmpq(index1, Immediate(length - index2));                             \
345       class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
346        public:                                                                 \
347         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
348                              Register buffer, Register index1, int32_t index2, \
349                              int32_t length)                                   \
350             : OutOfLineCode(gen),                                              \
351               result_(result),                                                 \
352               buffer_(buffer),                                                 \
353               index1_(index1),                                                 \
354               index2_(index2),                                                 \
355               length_(length) {}                                               \
356                                                                                \
357         void Generate() FINAL {                                                \
358           Label oob;                                                           \
359           __ leal(kScratchRegister, Operand(index1_, index2_));                \
360           __ cmpl(kScratchRegister, Immediate(length_));                       \
361           __ j(above_equal, &oob, Label::kNear);                               \
362           __ asm_instr(result_,                                                \
363                        Operand(buffer_, kScratchRegister, times_1, 0));        \
364           __ jmp(exit());                                                      \
365           __ bind(&oob);                                                       \
366           __ xorl(result_, result_);                                           \
367         }                                                                      \
368                                                                                \
369        private:                                                                \
370         Register const result_;                                                \
371         Register const buffer_;                                                \
372         Register const index1_;                                                \
373         int32_t const index2_;                                                 \
374         int32_t const length_;                                                 \
375       };                                                                       \
376       ool = new (zone())                                                       \
377           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
378     }                                                                          \
379     __ j(above_equal, ool->entry());                                           \
380     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
381     __ bind(ool->exit());                                                      \
382   } while (false)
383
384
385 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
386   do {                                                                       \
387     auto buffer = i.InputRegister(0);                                        \
388     auto index1 = i.InputRegister(1);                                        \
389     auto index2 = i.InputInt32(2);                                           \
390     auto value = i.InputDoubleRegister(4);                                   \
391     if (instr->InputAt(3)->IsRegister()) {                                   \
392       auto length = i.InputRegister(3);                                      \
393       DCHECK_EQ(0, index2);                                                  \
394       Label done;                                                            \
395       __ cmpl(index1, length);                                               \
396       __ j(above_equal, &done, Label::kNear);                                \
397       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
398       __ bind(&done);                                                        \
399     } else {                                                                 \
400       auto length = i.InputInt32(3);                                         \
401       DCHECK_LE(index2, length);                                             \
402       __ cmpq(index1, Immediate(length - index2));                           \
403       class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
404        public:                                                               \
405         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
406                             Register index1, int32_t index2, int32_t length, \
407                             XMMRegister value)                               \
408             : OutOfLineCode(gen),                                            \
409               buffer_(buffer),                                               \
410               index1_(index1),                                               \
411               index2_(index2),                                               \
412               length_(length),                                               \
413               value_(value) {}                                               \
414                                                                              \
415         void Generate() FINAL {                                              \
416           __ leal(kScratchRegister, Operand(index1_, index2_));              \
417           __ cmpl(kScratchRegister, Immediate(length_));                     \
418           __ j(above_equal, exit());                                         \
419           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
420                        value_);                                              \
421         }                                                                    \
422                                                                              \
423        private:                                                              \
424         Register const buffer_;                                              \
425         Register const index1_;                                              \
426         int32_t const index2_;                                               \
427         int32_t const length_;                                               \
428         XMMRegister const value_;                                            \
429       };                                                                     \
430       auto ool = new (zone())                                                \
431           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
432       __ j(above_equal, ool->entry());                                       \
433       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
434       __ bind(ool->exit());                                                  \
435     }                                                                        \
436   } while (false)
437
438
439 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
440   do {                                                                         \
441     auto buffer = i.InputRegister(0);                                          \
442     auto index1 = i.InputRegister(1);                                          \
443     auto index2 = i.InputInt32(2);                                             \
444     if (instr->InputAt(3)->IsRegister()) {                                     \
445       auto length = i.InputRegister(3);                                        \
446       DCHECK_EQ(0, index2);                                                    \
447       Label done;                                                              \
448       __ cmpl(index1, length);                                                 \
449       __ j(above_equal, &done, Label::kNear);                                  \
450       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
451       __ bind(&done);                                                          \
452     } else {                                                                   \
453       auto length = i.InputInt32(3);                                           \
454       DCHECK_LE(index2, length);                                               \
455       __ cmpq(index1, Immediate(length - index2));                             \
456       class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
457        public:                                                                 \
458         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
459                               Register index1, int32_t index2, int32_t length, \
460                               Value value)                                     \
461             : OutOfLineCode(gen),                                              \
462               buffer_(buffer),                                                 \
463               index1_(index1),                                                 \
464               index2_(index2),                                                 \
465               length_(length),                                                 \
466               value_(value) {}                                                 \
467                                                                                \
468         void Generate() FINAL {                                                \
469           __ leal(kScratchRegister, Operand(index1_, index2_));                \
470           __ cmpl(kScratchRegister, Immediate(length_));                       \
471           __ j(above_equal, exit());                                           \
472           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
473                        value_);                                                \
474         }                                                                      \
475                                                                                \
476        private:                                                                \
477         Register const buffer_;                                                \
478         Register const index1_;                                                \
479         int32_t const index2_;                                                 \
480         int32_t const length_;                                                 \
481         Value const value_;                                                    \
482       };                                                                       \
483       auto ool = new (zone())                                                  \
484           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
485       __ j(above_equal, ool->entry());                                         \
486       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
487       __ bind(ool->exit());                                                    \
488     }                                                                          \
489   } while (false)
490
491
492 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
493   do {                                                           \
494     if (instr->InputAt(4)->IsRegister()) {                       \
495       Register value = i.InputRegister(4);                       \
496       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
497     } else {                                                     \
498       Immediate value = i.InputImmediate(4);                     \
499       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
500     }                                                            \
501   } while (false)
502
503
504 // Assembles an instruction after register allocation, producing machine code.
505 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
506   X64OperandConverter i(this, instr);
507
508   switch (ArchOpcodeField::decode(instr->opcode())) {
509     case kArchCallCodeObject: {
510       EnsureSpaceForLazyDeopt();
511       if (HasImmediateInput(instr, 0)) {
512         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
513         __ Call(code, RelocInfo::CODE_TARGET);
514       } else {
515         Register reg = i.InputRegister(0);
516         int entry = Code::kHeaderSize - kHeapObjectTag;
517         __ Call(Operand(reg, entry));
518       }
519       AddSafepointAndDeopt(instr);
520       break;
521     }
522     case kArchCallJSFunction: {
523       EnsureSpaceForLazyDeopt();
524       Register func = i.InputRegister(0);
525       if (FLAG_debug_code) {
526         // Check the function's context matches the context argument.
527         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
528         __ Assert(equal, kWrongFunctionContext);
529       }
530       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
531       AddSafepointAndDeopt(instr);
532       break;
533     }
534     case kArchJmp:
535       AssembleArchJump(i.InputRpo(0));
536       break;
537     case kArchNop:
538       // don't emit code for nops.
539       break;
540     case kArchRet:
541       AssembleReturn();
542       break;
543     case kArchStackPointer:
544       __ movq(i.OutputRegister(), rsp);
545       break;
546     case kArchTruncateDoubleToI: {
547       auto result = i.OutputRegister();
548       auto input = i.InputDoubleRegister(0);
549       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
550       __ cvttsd2siq(result, input);
551       __ cmpq(result, Immediate(1));
552       __ j(overflow, ool->entry());
553       __ bind(ool->exit());
554       break;
555     }
556     case kX64Add32:
557       ASSEMBLE_BINOP(addl);
558       break;
559     case kX64Add:
560       ASSEMBLE_BINOP(addq);
561       break;
562     case kX64Sub32:
563       ASSEMBLE_BINOP(subl);
564       break;
565     case kX64Sub:
566       ASSEMBLE_BINOP(subq);
567       break;
568     case kX64And32:
569       ASSEMBLE_BINOP(andl);
570       break;
571     case kX64And:
572       ASSEMBLE_BINOP(andq);
573       break;
574     case kX64Cmp32:
575       ASSEMBLE_BINOP(cmpl);
576       break;
577     case kX64Cmp:
578       ASSEMBLE_BINOP(cmpq);
579       break;
580     case kX64Test32:
581       ASSEMBLE_BINOP(testl);
582       break;
583     case kX64Test:
584       ASSEMBLE_BINOP(testq);
585       break;
586     case kX64Imul32:
587       ASSEMBLE_MULT(imull);
588       break;
589     case kX64Imul:
590       ASSEMBLE_MULT(imulq);
591       break;
592     case kX64ImulHigh32:
593       if (instr->InputAt(1)->IsRegister()) {
594         __ imull(i.InputRegister(1));
595       } else {
596         __ imull(i.InputOperand(1));
597       }
598       break;
599     case kX64UmulHigh32:
600       if (instr->InputAt(1)->IsRegister()) {
601         __ mull(i.InputRegister(1));
602       } else {
603         __ mull(i.InputOperand(1));
604       }
605       break;
606     case kX64Idiv32:
607       __ cdq();
608       __ idivl(i.InputRegister(1));
609       break;
610     case kX64Idiv:
611       __ cqo();
612       __ idivq(i.InputRegister(1));
613       break;
614     case kX64Udiv32:
615       __ xorl(rdx, rdx);
616       __ divl(i.InputRegister(1));
617       break;
618     case kX64Udiv:
619       __ xorq(rdx, rdx);
620       __ divq(i.InputRegister(1));
621       break;
622     case kX64Not:
623       ASSEMBLE_UNOP(notq);
624       break;
625     case kX64Not32:
626       ASSEMBLE_UNOP(notl);
627       break;
628     case kX64Neg:
629       ASSEMBLE_UNOP(negq);
630       break;
631     case kX64Neg32:
632       ASSEMBLE_UNOP(negl);
633       break;
634     case kX64Or32:
635       ASSEMBLE_BINOP(orl);
636       break;
637     case kX64Or:
638       ASSEMBLE_BINOP(orq);
639       break;
640     case kX64Xor32:
641       ASSEMBLE_BINOP(xorl);
642       break;
643     case kX64Xor:
644       ASSEMBLE_BINOP(xorq);
645       break;
646     case kX64Shl32:
647       ASSEMBLE_SHIFT(shll, 5);
648       break;
649     case kX64Shl:
650       ASSEMBLE_SHIFT(shlq, 6);
651       break;
652     case kX64Shr32:
653       ASSEMBLE_SHIFT(shrl, 5);
654       break;
655     case kX64Shr:
656       ASSEMBLE_SHIFT(shrq, 6);
657       break;
658     case kX64Sar32:
659       ASSEMBLE_SHIFT(sarl, 5);
660       break;
661     case kX64Sar:
662       ASSEMBLE_SHIFT(sarq, 6);
663       break;
664     case kX64Ror32:
665       ASSEMBLE_SHIFT(rorl, 5);
666       break;
667     case kX64Ror:
668       ASSEMBLE_SHIFT(rorq, 6);
669       break;
670     case kSSEFloat64Cmp:
671       ASSEMBLE_DOUBLE_BINOP(ucomisd);
672       break;
673     case kSSEFloat64Add:
674       ASSEMBLE_DOUBLE_BINOP(addsd);
675       break;
676     case kSSEFloat64Sub:
677       ASSEMBLE_DOUBLE_BINOP(subsd);
678       break;
679     case kSSEFloat64Mul:
680       ASSEMBLE_DOUBLE_BINOP(mulsd);
681       break;
682     case kSSEFloat64Div:
683       ASSEMBLE_DOUBLE_BINOP(divsd);
684       break;
685     case kSSEFloat64Mod: {
686       __ subq(rsp, Immediate(kDoubleSize));
687       // Move values to st(0) and st(1).
688       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
689       __ fld_d(Operand(rsp, 0));
690       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
691       __ fld_d(Operand(rsp, 0));
692       // Loop while fprem isn't done.
693       Label mod_loop;
694       __ bind(&mod_loop);
695       // This instructions traps on all kinds inputs, but we are assuming the
696       // floating point control word is set to ignore them all.
697       __ fprem();
698       // The following 2 instruction implicitly use rax.
699       __ fnstsw_ax();
700       if (CpuFeatures::IsSupported(SAHF)) {
701         CpuFeatureScope sahf_scope(masm(), SAHF);
702         __ sahf();
703       } else {
704         __ shrl(rax, Immediate(8));
705         __ andl(rax, Immediate(0xFF));
706         __ pushq(rax);
707         __ popfq();
708       }
709       __ j(parity_even, &mod_loop);
710       // Move output to stack and clean up.
711       __ fstp(1);
712       __ fstp_d(Operand(rsp, 0));
713       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
714       __ addq(rsp, Immediate(kDoubleSize));
715       break;
716     }
717     case kSSEFloat64Sqrt:
718       if (instr->InputAt(0)->IsDoubleRegister()) {
719         __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
720       } else {
721         __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
722       }
723       break;
724     case kSSEFloat64Floor: {
725       CpuFeatureScope sse_scope(masm(), SSE4_1);
726       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
727                  v8::internal::Assembler::kRoundDown);
728       break;
729     }
730     case kSSEFloat64Ceil: {
731       CpuFeatureScope sse_scope(masm(), SSE4_1);
732       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
733                  v8::internal::Assembler::kRoundUp);
734       break;
735     }
736     case kSSEFloat64RoundTruncate: {
737       CpuFeatureScope sse_scope(masm(), SSE4_1);
738       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
739                  v8::internal::Assembler::kRoundToZero);
740       break;
741     }
742     case kSSECvtss2sd:
743       if (instr->InputAt(0)->IsDoubleRegister()) {
744         __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
745       } else {
746         __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
747       }
748       break;
749     case kSSECvtsd2ss:
750       if (instr->InputAt(0)->IsDoubleRegister()) {
751         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
752       } else {
753         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
754       }
755       break;
756     case kSSEFloat64ToInt32:
757       if (instr->InputAt(0)->IsDoubleRegister()) {
758         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
759       } else {
760         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
761       }
762       break;
763     case kSSEFloat64ToUint32: {
764       if (instr->InputAt(0)->IsDoubleRegister()) {
765         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
766       } else {
767         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
768       }
769       __ AssertZeroExtended(i.OutputRegister());
770       break;
771     }
772     case kSSEInt32ToFloat64:
773       if (instr->InputAt(0)->IsRegister()) {
774         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
775       } else {
776         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
777       }
778       break;
779     case kSSEUint32ToFloat64:
780       if (instr->InputAt(0)->IsRegister()) {
781         __ movl(kScratchRegister, i.InputRegister(0));
782       } else {
783         __ movl(kScratchRegister, i.InputOperand(0));
784       }
785       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
786       break;
787     case kAVXFloat64Add:
788       ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
789       break;
790     case kAVXFloat64Sub:
791       ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
792       break;
793     case kAVXFloat64Mul:
794       ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
795       break;
796     case kAVXFloat64Div:
797       ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
798       break;
799     case kX64Movsxbl:
800       if (instr->addressing_mode() != kMode_None) {
801         __ movsxbl(i.OutputRegister(), i.MemoryOperand());
802       } else if (instr->InputAt(0)->IsRegister()) {
803         __ movsxbl(i.OutputRegister(), i.InputRegister(0));
804       } else {
805         __ movsxbl(i.OutputRegister(), i.InputOperand(0));
806       }
807       __ AssertZeroExtended(i.OutputRegister());
808       break;
809     case kX64Movzxbl:
810       __ movzxbl(i.OutputRegister(), i.MemoryOperand());
811       break;
812     case kX64Movb: {
813       int index = 0;
814       Operand operand = i.MemoryOperand(&index);
815       if (HasImmediateInput(instr, index)) {
816         __ movb(operand, Immediate(i.InputInt8(index)));
817       } else {
818         __ movb(operand, i.InputRegister(index));
819       }
820       break;
821     }
822     case kX64Movsxwl:
823       if (instr->addressing_mode() != kMode_None) {
824         __ movsxwl(i.OutputRegister(), i.MemoryOperand());
825       } else if (instr->InputAt(0)->IsRegister()) {
826         __ movsxwl(i.OutputRegister(), i.InputRegister(0));
827       } else {
828         __ movsxwl(i.OutputRegister(), i.InputOperand(0));
829       }
830       __ AssertZeroExtended(i.OutputRegister());
831       break;
832     case kX64Movzxwl:
833       __ movzxwl(i.OutputRegister(), i.MemoryOperand());
834       __ AssertZeroExtended(i.OutputRegister());
835       break;
836     case kX64Movw: {
837       int index = 0;
838       Operand operand = i.MemoryOperand(&index);
839       if (HasImmediateInput(instr, index)) {
840         __ movw(operand, Immediate(i.InputInt16(index)));
841       } else {
842         __ movw(operand, i.InputRegister(index));
843       }
844       break;
845     }
846     case kX64Movl:
847       if (instr->HasOutput()) {
848         if (instr->addressing_mode() == kMode_None) {
849           if (instr->InputAt(0)->IsRegister()) {
850             __ movl(i.OutputRegister(), i.InputRegister(0));
851           } else {
852             __ movl(i.OutputRegister(), i.InputOperand(0));
853           }
854         } else {
855           __ movl(i.OutputRegister(), i.MemoryOperand());
856         }
857         __ AssertZeroExtended(i.OutputRegister());
858       } else {
859         int index = 0;
860         Operand operand = i.MemoryOperand(&index);
861         if (HasImmediateInput(instr, index)) {
862           __ movl(operand, i.InputImmediate(index));
863         } else {
864           __ movl(operand, i.InputRegister(index));
865         }
866       }
867       break;
868     case kX64Movsxlq: {
869       if (instr->InputAt(0)->IsRegister()) {
870         __ movsxlq(i.OutputRegister(), i.InputRegister(0));
871       } else {
872         __ movsxlq(i.OutputRegister(), i.InputOperand(0));
873       }
874       break;
875     }
876     case kX64Movq:
877       if (instr->HasOutput()) {
878         __ movq(i.OutputRegister(), i.MemoryOperand());
879       } else {
880         int index = 0;
881         Operand operand = i.MemoryOperand(&index);
882         if (HasImmediateInput(instr, index)) {
883           __ movq(operand, i.InputImmediate(index));
884         } else {
885           __ movq(operand, i.InputRegister(index));
886         }
887       }
888       break;
889     case kX64Movss:
890       if (instr->HasOutput()) {
891         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
892       } else {
893         int index = 0;
894         Operand operand = i.MemoryOperand(&index);
895         __ movss(operand, i.InputDoubleRegister(index));
896       }
897       break;
898     case kX64Movsd:
899       if (instr->HasOutput()) {
900         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
901       } else {
902         int index = 0;
903         Operand operand = i.MemoryOperand(&index);
904         __ movsd(operand, i.InputDoubleRegister(index));
905       }
906       break;
907     case kX64Lea32: {
908       AddressingMode mode = AddressingModeField::decode(instr->opcode());
909       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
910       // and addressing mode just happens to work out. The "addl"/"subl" forms
911       // in these cases are faster based on measurements.
912       if (i.InputRegister(0).is(i.OutputRegister())) {
913         if (mode == kMode_MRI) {
914           int32_t constant_summand = i.InputInt32(1);
915           if (constant_summand > 0) {
916             __ addl(i.OutputRegister(), Immediate(constant_summand));
917           } else if (constant_summand < 0) {
918             __ subl(i.OutputRegister(), Immediate(-constant_summand));
919           }
920         } else if (mode == kMode_MR1) {
921           if (i.InputRegister(1).is(i.OutputRegister())) {
922             __ shll(i.OutputRegister(), Immediate(1));
923           } else {
924             __ leal(i.OutputRegister(), i.MemoryOperand());
925           }
926         } else if (mode == kMode_M2) {
927           __ shll(i.OutputRegister(), Immediate(1));
928         } else if (mode == kMode_M4) {
929           __ shll(i.OutputRegister(), Immediate(2));
930         } else if (mode == kMode_M8) {
931           __ shll(i.OutputRegister(), Immediate(3));
932         } else {
933           __ leal(i.OutputRegister(), i.MemoryOperand());
934         }
935       } else {
936         __ leal(i.OutputRegister(), i.MemoryOperand());
937       }
938       __ AssertZeroExtended(i.OutputRegister());
939       break;
940     }
941     case kX64Lea:
942       __ leaq(i.OutputRegister(), i.MemoryOperand());
943       break;
944     case kX64Dec32:
945       __ decl(i.OutputRegister());
946       break;
947     case kX64Inc32:
948       __ incl(i.OutputRegister());
949       break;
950     case kX64Push:
951       if (HasImmediateInput(instr, 0)) {
952         __ pushq(i.InputImmediate(0));
953       } else {
954         if (instr->InputAt(0)->IsRegister()) {
955           __ pushq(i.InputRegister(0));
956         } else {
957           __ pushq(i.InputOperand(0));
958         }
959       }
960       break;
961     case kX64StoreWriteBarrier: {
962       Register object = i.InputRegister(0);
963       Register index = i.InputRegister(1);
964       Register value = i.InputRegister(2);
965       __ movsxlq(index, index);
966       __ movq(Operand(object, index, times_1, 0), value);
967       __ leaq(index, Operand(object, index, times_1, 0));
968       SaveFPRegsMode mode =
969           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
970       __ RecordWrite(object, index, value, mode);
971       break;
972     }
973     case kCheckedLoadInt8:
974       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
975       break;
976     case kCheckedLoadUint8:
977       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
978       break;
979     case kCheckedLoadInt16:
980       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
981       break;
982     case kCheckedLoadUint16:
983       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
984       break;
985     case kCheckedLoadWord32:
986       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
987       break;
988     case kCheckedLoadFloat32:
989       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
990       break;
991     case kCheckedLoadFloat64:
992       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
993       break;
994     case kCheckedStoreWord8:
995       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
996       break;
997     case kCheckedStoreWord16:
998       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
999       break;
1000     case kCheckedStoreWord32:
1001       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1002       break;
1003     case kCheckedStoreFloat32:
1004       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1005       break;
1006     case kCheckedStoreFloat64:
1007       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1008       break;
1009   }
1010 }
1011
1012
1013 // Assembles branches after this instruction.
1014 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1015   X64OperandConverter i(this, instr);
1016   Label::Distance flabel_distance =
1017       branch->fallthru ? Label::kNear : Label::kFar;
1018   Label* tlabel = branch->true_label;
1019   Label* flabel = branch->false_label;
1020   switch (branch->condition) {
1021     case kUnorderedEqual:
1022       __ j(parity_even, flabel, flabel_distance);
1023     // Fall through.
1024     case kEqual:
1025       __ j(equal, tlabel);
1026       break;
1027     case kUnorderedNotEqual:
1028       __ j(parity_even, tlabel);
1029     // Fall through.
1030     case kNotEqual:
1031       __ j(not_equal, tlabel);
1032       break;
1033     case kSignedLessThan:
1034       __ j(less, tlabel);
1035       break;
1036     case kSignedGreaterThanOrEqual:
1037       __ j(greater_equal, tlabel);
1038       break;
1039     case kSignedLessThanOrEqual:
1040       __ j(less_equal, tlabel);
1041       break;
1042     case kSignedGreaterThan:
1043       __ j(greater, tlabel);
1044       break;
1045     case kUnsignedLessThan:
1046       __ j(below, tlabel);
1047       break;
1048     case kUnsignedGreaterThanOrEqual:
1049       __ j(above_equal, tlabel);
1050       break;
1051     case kUnsignedLessThanOrEqual:
1052       __ j(below_equal, tlabel);
1053       break;
1054     case kUnsignedGreaterThan:
1055       __ j(above, tlabel);
1056       break;
1057     case kOverflow:
1058       __ j(overflow, tlabel);
1059       break;
1060     case kNotOverflow:
1061       __ j(no_overflow, tlabel);
1062       break;
1063   }
1064   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1065 }
1066
1067
1068 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1069   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1070 }
1071
1072
1073 // Assembles boolean materializations after this instruction.
1074 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1075                                         FlagsCondition condition) {
1076   X64OperandConverter i(this, instr);
1077   Label done;
1078
1079   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1080   // last output of the instruction.
1081   Label check;
1082   DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
1083   Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
1084   Condition cc = no_condition;
1085   switch (condition) {
1086     case kUnorderedEqual:
1087       __ j(parity_odd, &check, Label::kNear);
1088       __ movl(reg, Immediate(0));
1089       __ jmp(&done, Label::kNear);
1090     // Fall through.
1091     case kEqual:
1092       cc = equal;
1093       break;
1094     case kUnorderedNotEqual:
1095       __ j(parity_odd, &check, Label::kNear);
1096       __ movl(reg, Immediate(1));
1097       __ jmp(&done, Label::kNear);
1098     // Fall through.
1099     case kNotEqual:
1100       cc = not_equal;
1101       break;
1102     case kSignedLessThan:
1103       cc = less;
1104       break;
1105     case kSignedGreaterThanOrEqual:
1106       cc = greater_equal;
1107       break;
1108     case kSignedLessThanOrEqual:
1109       cc = less_equal;
1110       break;
1111     case kSignedGreaterThan:
1112       cc = greater;
1113       break;
1114     case kUnsignedLessThan:
1115       cc = below;
1116       break;
1117     case kUnsignedGreaterThanOrEqual:
1118       cc = above_equal;
1119       break;
1120     case kUnsignedLessThanOrEqual:
1121       cc = below_equal;
1122       break;
1123     case kUnsignedGreaterThan:
1124       cc = above;
1125       break;
1126     case kOverflow:
1127       cc = overflow;
1128       break;
1129     case kNotOverflow:
1130       cc = no_overflow;
1131       break;
1132   }
1133   __ bind(&check);
1134   __ setcc(cc, reg);
1135   __ movzxbl(reg, reg);
1136   __ bind(&done);
1137 }
1138
1139
1140 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1141   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1142       isolate(), deoptimization_id, Deoptimizer::LAZY);
1143   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1144 }
1145
1146
1147 void CodeGenerator::AssemblePrologue() {
1148   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1149   int stack_slots = frame()->GetSpillSlotCount();
1150   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1151     __ pushq(rbp);
1152     __ movq(rbp, rsp);
1153     const RegList saves = descriptor->CalleeSavedRegisters();
1154     if (saves != 0) {  // Save callee-saved registers.
1155       int register_save_area_size = 0;
1156       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1157         if (!((1 << i) & saves)) continue;
1158         __ pushq(Register::from_code(i));
1159         register_save_area_size += kPointerSize;
1160       }
1161       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1162     }
1163   } else if (descriptor->IsJSFunctionCall()) {
1164     CompilationInfo* info = this->info();
1165     __ Prologue(info->IsCodePreAgingActive());
1166     frame()->SetRegisterSaveAreaSize(
1167         StandardFrameConstants::kFixedFrameSizeFromFp);
1168   } else {
1169     __ StubPrologue();
1170     frame()->SetRegisterSaveAreaSize(
1171         StandardFrameConstants::kFixedFrameSizeFromFp);
1172   }
1173
1174   if (info()->is_osr()) {
1175     // TurboFan OSR-compiled functions cannot be entered directly.
1176     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1177
1178     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1179     // frame is still on the stack. Optimized code uses OSR values directly from
1180     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1181     // remaining stack slots.
1182     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1183     osr_pc_offset_ = __ pc_offset();
1184     int unoptimized_slots =
1185         static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1186     DCHECK(stack_slots >= unoptimized_slots);
1187     stack_slots -= unoptimized_slots;
1188   }
1189
1190   if (stack_slots > 0) {
1191     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1192   }
1193 }
1194
1195
1196 void CodeGenerator::AssembleReturn() {
1197   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1198   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1199     if (frame()->GetRegisterSaveAreaSize() > 0) {
1200       // Remove this frame's spill slots first.
1201       int stack_slots = frame()->GetSpillSlotCount();
1202       if (stack_slots > 0) {
1203         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1204       }
1205       const RegList saves = descriptor->CalleeSavedRegisters();
1206       // Restore registers.
1207       if (saves != 0) {
1208         for (int i = 0; i < Register::kNumRegisters; i++) {
1209           if (!((1 << i) & saves)) continue;
1210           __ popq(Register::from_code(i));
1211         }
1212       }
1213       __ popq(rbp);  // Pop caller's frame pointer.
1214       __ ret(0);
1215     } else {
1216       // No saved registers.
1217       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1218       __ popq(rbp);       // Pop caller's frame pointer.
1219       __ ret(0);
1220     }
1221   } else {
1222     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1223     __ popq(rbp);       // Pop caller's frame pointer.
1224     int pop_count = descriptor->IsJSFunctionCall()
1225                         ? static_cast<int>(descriptor->JSParameterCount())
1226                         : 0;
1227     __ ret(pop_count * kPointerSize);
1228   }
1229 }
1230
1231
1232 void CodeGenerator::AssembleMove(InstructionOperand* source,
1233                                  InstructionOperand* destination) {
1234   X64OperandConverter g(this, NULL);
1235   // Dispatch on the source and destination operand kinds.  Not all
1236   // combinations are possible.
1237   if (source->IsRegister()) {
1238     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1239     Register src = g.ToRegister(source);
1240     if (destination->IsRegister()) {
1241       __ movq(g.ToRegister(destination), src);
1242     } else {
1243       __ movq(g.ToOperand(destination), src);
1244     }
1245   } else if (source->IsStackSlot()) {
1246     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1247     Operand src = g.ToOperand(source);
1248     if (destination->IsRegister()) {
1249       Register dst = g.ToRegister(destination);
1250       __ movq(dst, src);
1251     } else {
1252       // Spill on demand to use a temporary register for memory-to-memory
1253       // moves.
1254       Register tmp = kScratchRegister;
1255       Operand dst = g.ToOperand(destination);
1256       __ movq(tmp, src);
1257       __ movq(dst, tmp);
1258     }
1259   } else if (source->IsConstant()) {
1260     ConstantOperand* constant_source = ConstantOperand::cast(source);
1261     Constant src = g.ToConstant(constant_source);
1262     if (destination->IsRegister() || destination->IsStackSlot()) {
1263       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1264                                                : kScratchRegister;
1265       switch (src.type()) {
1266         case Constant::kInt32:
1267           // TODO(dcarney): don't need scratch in this case.
1268           __ Set(dst, src.ToInt32());
1269           break;
1270         case Constant::kInt64:
1271           __ Set(dst, src.ToInt64());
1272           break;
1273         case Constant::kFloat32:
1274           __ Move(dst,
1275                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1276           break;
1277         case Constant::kFloat64:
1278           __ Move(dst,
1279                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1280           break;
1281         case Constant::kExternalReference:
1282           __ Move(dst, src.ToExternalReference());
1283           break;
1284         case Constant::kHeapObject:
1285           __ Move(dst, src.ToHeapObject());
1286           break;
1287         case Constant::kRpoNumber:
1288           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1289           break;
1290       }
1291       if (destination->IsStackSlot()) {
1292         __ movq(g.ToOperand(destination), kScratchRegister);
1293       }
1294     } else if (src.type() == Constant::kFloat32) {
1295       // TODO(turbofan): Can we do better here?
1296       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1297       if (destination->IsDoubleRegister()) {
1298         __ Move(g.ToDoubleRegister(destination), src_const);
1299       } else {
1300         DCHECK(destination->IsDoubleStackSlot());
1301         Operand dst = g.ToOperand(destination);
1302         __ movl(dst, Immediate(src_const));
1303       }
1304     } else {
1305       DCHECK_EQ(Constant::kFloat64, src.type());
1306       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1307       if (destination->IsDoubleRegister()) {
1308         __ Move(g.ToDoubleRegister(destination), src_const);
1309       } else {
1310         DCHECK(destination->IsDoubleStackSlot());
1311         __ movq(kScratchRegister, src_const);
1312         __ movq(g.ToOperand(destination), kScratchRegister);
1313       }
1314     }
1315   } else if (source->IsDoubleRegister()) {
1316     XMMRegister src = g.ToDoubleRegister(source);
1317     if (destination->IsDoubleRegister()) {
1318       XMMRegister dst = g.ToDoubleRegister(destination);
1319       __ movsd(dst, src);
1320     } else {
1321       DCHECK(destination->IsDoubleStackSlot());
1322       Operand dst = g.ToOperand(destination);
1323       __ movsd(dst, src);
1324     }
1325   } else if (source->IsDoubleStackSlot()) {
1326     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1327     Operand src = g.ToOperand(source);
1328     if (destination->IsDoubleRegister()) {
1329       XMMRegister dst = g.ToDoubleRegister(destination);
1330       __ movsd(dst, src);
1331     } else {
1332       // We rely on having xmm0 available as a fixed scratch register.
1333       Operand dst = g.ToOperand(destination);
1334       __ movsd(xmm0, src);
1335       __ movsd(dst, xmm0);
1336     }
1337   } else {
1338     UNREACHABLE();
1339   }
1340 }
1341
1342
1343 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1344                                  InstructionOperand* destination) {
1345   X64OperandConverter g(this, NULL);
1346   // Dispatch on the source and destination operand kinds.  Not all
1347   // combinations are possible.
1348   if (source->IsRegister() && destination->IsRegister()) {
1349     // Register-register.
1350     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1351   } else if (source->IsRegister() && destination->IsStackSlot()) {
1352     Register src = g.ToRegister(source);
1353     Operand dst = g.ToOperand(destination);
1354     __ xchgq(src, dst);
1355   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1356              (source->IsDoubleStackSlot() &&
1357               destination->IsDoubleStackSlot())) {
1358     // Memory-memory.
1359     Register tmp = kScratchRegister;
1360     Operand src = g.ToOperand(source);
1361     Operand dst = g.ToOperand(destination);
1362     __ movq(tmp, dst);
1363     __ xchgq(tmp, src);
1364     __ movq(dst, tmp);
1365   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1366     // XMM register-register swap. We rely on having xmm0
1367     // available as a fixed scratch register.
1368     XMMRegister src = g.ToDoubleRegister(source);
1369     XMMRegister dst = g.ToDoubleRegister(destination);
1370     __ movsd(xmm0, src);
1371     __ movsd(src, dst);
1372     __ movsd(dst, xmm0);
1373   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1374     // XMM register-memory swap.  We rely on having xmm0
1375     // available as a fixed scratch register.
1376     XMMRegister src = g.ToDoubleRegister(source);
1377     Operand dst = g.ToOperand(destination);
1378     __ movsd(xmm0, src);
1379     __ movsd(src, dst);
1380     __ movsd(dst, xmm0);
1381   } else {
1382     // No other combinations are possible.
1383     UNREACHABLE();
1384   }
1385 }
1386
1387
1388 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1389
1390
1391 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1392   int space_needed = Deoptimizer::patch_size();
1393   if (!info()->IsStub()) {
1394     // Ensure that we have enough space after the previous lazy-bailout
1395     // instruction for patching the code here.
1396     int current_pc = masm()->pc_offset();
1397     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1398       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1399       __ Nop(padding_size);
1400     }
1401   }
1402   MarkLazyDeoptSite();
1403 }
1404
1405 #undef __
1406
1407 }  // namespace internal
1408 }  // namespace compiler
1409 }  // namespace v8