[turbofan] Fix unsafe out-of-bounds check for checked loads/stores.
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
14
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18
19 #define __ masm()->
20
21
22 // Adds X64 specific methods for decoding operands.
23 class X64OperandConverter : public InstructionOperandConverter {
24  public:
25   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26       : InstructionOperandConverter(gen, instr) {}
27
28   Immediate InputImmediate(int index) {
29     return ToImmediate(instr_->InputAt(index));
30   }
31
32   Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
33
34   Operand OutputOperand() { return ToOperand(instr_->Output()); }
35
36   Immediate ToImmediate(InstructionOperand* operand) {
37     return Immediate(ToConstant(operand).ToInt32());
38   }
39
40   Operand ToOperand(InstructionOperand* op, int extra = 0) {
41     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
42     // The linkage computes where all spill slots are located.
43     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
44     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
45   }
46
47   static int NextOffset(int* offset) {
48     int i = *offset;
49     (*offset)++;
50     return i;
51   }
52
53   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54     STATIC_ASSERT(0 == static_cast<int>(times_1));
55     STATIC_ASSERT(1 == static_cast<int>(times_2));
56     STATIC_ASSERT(2 == static_cast<int>(times_4));
57     STATIC_ASSERT(3 == static_cast<int>(times_8));
58     int scale = static_cast<int>(mode - one);
59     DCHECK(scale >= 0 && scale < 4);
60     return static_cast<ScaleFactor>(scale);
61   }
62
63   Operand MemoryOperand(int* offset) {
64     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
65     switch (mode) {
66       case kMode_MR: {
67         Register base = InputRegister(NextOffset(offset));
68         int32_t disp = 0;
69         return Operand(base, disp);
70       }
71       case kMode_MRI: {
72         Register base = InputRegister(NextOffset(offset));
73         int32_t disp = InputInt32(NextOffset(offset));
74         return Operand(base, disp);
75       }
76       case kMode_MR1:
77       case kMode_MR2:
78       case kMode_MR4:
79       case kMode_MR8: {
80         Register base = InputRegister(NextOffset(offset));
81         Register index = InputRegister(NextOffset(offset));
82         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
83         int32_t disp = 0;
84         return Operand(base, index, scale, disp);
85       }
86       case kMode_MR1I:
87       case kMode_MR2I:
88       case kMode_MR4I:
89       case kMode_MR8I: {
90         Register base = InputRegister(NextOffset(offset));
91         Register index = InputRegister(NextOffset(offset));
92         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93         int32_t disp = InputInt32(NextOffset(offset));
94         return Operand(base, index, scale, disp);
95       }
96       case kMode_M1: {
97         Register base = InputRegister(NextOffset(offset));
98         int32_t disp = 0;
99         return Operand(base, disp);
100       }
101       case kMode_M2:
102         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
103         return Operand(no_reg, 0);
104       case kMode_M4:
105       case kMode_M8: {
106         Register index = InputRegister(NextOffset(offset));
107         ScaleFactor scale = ScaleFor(kMode_M1, mode);
108         int32_t disp = 0;
109         return Operand(index, scale, disp);
110       }
111       case kMode_M1I:
112       case kMode_M2I:
113       case kMode_M4I:
114       case kMode_M8I: {
115         Register index = InputRegister(NextOffset(offset));
116         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
117         int32_t disp = InputInt32(NextOffset(offset));
118         return Operand(index, scale, disp);
119       }
120       case kMode_None:
121         UNREACHABLE();
122         return Operand(no_reg, 0);
123     }
124     UNREACHABLE();
125     return Operand(no_reg, 0);
126   }
127
128   Operand MemoryOperand(int first_input = 0) {
129     return MemoryOperand(&first_input);
130   }
131 };
132
133
134 namespace {
135
136 bool HasImmediateInput(Instruction* instr, int index) {
137   return instr->InputAt(index)->IsImmediate();
138 }
139
140
141 class OutOfLineLoadZero FINAL : public OutOfLineCode {
142  public:
143   OutOfLineLoadZero(CodeGenerator* gen, Register result)
144       : OutOfLineCode(gen), result_(result) {}
145
146   void Generate() FINAL { __ xorl(result_, result_); }
147
148  private:
149   Register const result_;
150 };
151
152
153 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
154  public:
155   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
156       : OutOfLineCode(gen), result_(result) {}
157
158   void Generate() FINAL { __ pcmpeqd(result_, result_); }
159
160  private:
161   XMMRegister const result_;
162 };
163
164
165 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
166  public:
167   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
168                              XMMRegister input)
169       : OutOfLineCode(gen), result_(result), input_(input) {}
170
171   void Generate() FINAL {
172     __ subp(rsp, Immediate(kDoubleSize));
173     __ movsd(MemOperand(rsp, 0), input_);
174     __ SlowTruncateToI(result_, rsp, 0);
175     __ addp(rsp, Immediate(kDoubleSize));
176   }
177
178  private:
179   Register const result_;
180   XMMRegister const input_;
181 };
182
183 }  // namespace
184
185
186 #define ASSEMBLE_UNOP(asm_instr)         \
187   do {                                   \
188     if (instr->Output()->IsRegister()) { \
189       __ asm_instr(i.OutputRegister());  \
190     } else {                             \
191       __ asm_instr(i.OutputOperand());   \
192     }                                    \
193   } while (0)
194
195
196 #define ASSEMBLE_BINOP(asm_instr)                              \
197   do {                                                         \
198     if (HasImmediateInput(instr, 1)) {                         \
199       if (instr->InputAt(0)->IsRegister()) {                   \
200         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
201       } else {                                                 \
202         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
203       }                                                        \
204     } else {                                                   \
205       if (instr->InputAt(1)->IsRegister()) {                   \
206         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
207       } else {                                                 \
208         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
209       }                                                        \
210     }                                                          \
211   } while (0)
212
213
214 #define ASSEMBLE_MULT(asm_instr)                              \
215   do {                                                        \
216     if (HasImmediateInput(instr, 1)) {                        \
217       if (instr->InputAt(0)->IsRegister()) {                  \
218         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
219                      i.InputImmediate(1));                    \
220       } else {                                                \
221         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
222                      i.InputImmediate(1));                    \
223       }                                                       \
224     } else {                                                  \
225       if (instr->InputAt(1)->IsRegister()) {                  \
226         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
227       } else {                                                \
228         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
229       }                                                       \
230     }                                                         \
231   } while (0)
232
233
234 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
235   do {                                                                     \
236     if (HasImmediateInput(instr, 1)) {                                     \
237       if (instr->Output()->IsRegister()) {                                 \
238         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
239       } else {                                                             \
240         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
241       }                                                                    \
242     } else {                                                               \
243       if (instr->Output()->IsRegister()) {                                 \
244         __ asm_instr##_cl(i.OutputRegister());                             \
245       } else {                                                             \
246         __ asm_instr##_cl(i.OutputOperand());                              \
247       }                                                                    \
248     }                                                                      \
249   } while (0)
250
251
252 #define ASSEMBLE_DOUBLE_BINOP(asm_instr)                                \
253   do {                                                                  \
254     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
255       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
256     } else {                                                            \
257       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
258     }                                                                   \
259   } while (0)
260
261
262 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
263   do {                                                                 \
264     CpuFeatureScope avx_scope(masm(), AVX);                            \
265     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
266       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
267                    i.InputDoubleRegister(1));                          \
268     } else {                                                           \
269       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
270                    i.InputOperand(1));                                 \
271     }                                                                  \
272   } while (0)
273
274
275 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
276   do {                                                                       \
277     auto result = i.OutputDoubleRegister();                                  \
278     auto buffer = i.InputRegister(0);                                        \
279     auto index1 = i.InputRegister(1);                                        \
280     auto index2 = i.InputInt32(2);                                           \
281     OutOfLineCode* ool;                                                      \
282     if (instr->InputAt(3)->IsRegister()) {                                   \
283       auto length = i.InputRegister(3);                                      \
284       DCHECK_EQ(0, index2);                                                  \
285       __ cmpl(index1, length);                                               \
286       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
287     } else {                                                                 \
288       auto length = i.InputInt32(3);                                         \
289       DCHECK_LE(index2, length);                                             \
290       __ cmpq(index1, Immediate(length - index2));                           \
291       class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
292        public:                                                               \
293         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
294                            Register buffer, Register index1, int32_t index2, \
295                            int32_t length)                                   \
296             : OutOfLineCode(gen),                                            \
297               result_(result),                                               \
298               buffer_(buffer),                                               \
299               index1_(index1),                                               \
300               index2_(index2),                                               \
301               length_(length) {}                                             \
302                                                                              \
303         void Generate() FINAL {                                              \
304           __ leal(kScratchRegister, Operand(index1_, index2_));              \
305           __ pcmpeqd(result_, result_);                                      \
306           __ cmpl(kScratchRegister, Immediate(length_));                     \
307           __ j(above_equal, exit());                                         \
308           __ asm_instr(result_,                                              \
309                        Operand(buffer_, kScratchRegister, times_1, 0));      \
310         }                                                                    \
311                                                                              \
312        private:                                                              \
313         XMMRegister const result_;                                           \
314         Register const buffer_;                                              \
315         Register const index1_;                                              \
316         int32_t const index2_;                                               \
317         int32_t const length_;                                               \
318       };                                                                     \
319       ool = new (zone())                                                     \
320           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
321     }                                                                        \
322     __ j(above_equal, ool->entry());                                         \
323     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
324     __ bind(ool->exit());                                                    \
325   } while (false)
326
327
328 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
329   do {                                                                         \
330     auto result = i.OutputRegister();                                          \
331     auto buffer = i.InputRegister(0);                                          \
332     auto index1 = i.InputRegister(1);                                          \
333     auto index2 = i.InputInt32(2);                                             \
334     OutOfLineCode* ool;                                                        \
335     if (instr->InputAt(3)->IsRegister()) {                                     \
336       auto length = i.InputRegister(3);                                        \
337       DCHECK_EQ(0, index2);                                                    \
338       __ cmpl(index1, length);                                                 \
339       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
340     } else {                                                                   \
341       auto length = i.InputInt32(3);                                           \
342       DCHECK_LE(index2, length);                                               \
343       __ cmpq(index1, Immediate(length - index2));                             \
344       class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
345        public:                                                                 \
346         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
347                              Register buffer, Register index1, int32_t index2, \
348                              int32_t length)                                   \
349             : OutOfLineCode(gen),                                              \
350               result_(result),                                                 \
351               buffer_(buffer),                                                 \
352               index1_(index1),                                                 \
353               index2_(index2),                                                 \
354               length_(length) {}                                               \
355                                                                                \
356         void Generate() FINAL {                                                \
357           __ leal(kScratchRegister, Operand(index1_, index2_));                \
358           __ xorl(result_, result_);                                           \
359           __ cmpl(kScratchRegister, Immediate(length_));                       \
360           __ j(above_equal, exit());                                           \
361           __ asm_instr(result_,                                                \
362                        Operand(buffer_, kScratchRegister, times_1, 0));        \
363         }                                                                      \
364                                                                                \
365        private:                                                                \
366         Register const result_;                                                \
367         Register const buffer_;                                                \
368         Register const index1_;                                                \
369         int32_t const index2_;                                                 \
370         int32_t const length_;                                                 \
371       };                                                                       \
372       ool = new (zone())                                                       \
373           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
374     }                                                                          \
375     __ j(above_equal, ool->entry());                                           \
376     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
377     __ bind(ool->exit());                                                      \
378   } while (false)
379
380
381 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
382   do {                                                                       \
383     auto buffer = i.InputRegister(0);                                        \
384     auto index1 = i.InputRegister(1);                                        \
385     auto index2 = i.InputInt32(2);                                           \
386     auto value = i.InputDoubleRegister(4);                                   \
387     if (instr->InputAt(3)->IsRegister()) {                                   \
388       auto length = i.InputRegister(3);                                      \
389       DCHECK_EQ(0, index2);                                                  \
390       Label done;                                                            \
391       __ cmpl(index1, length);                                               \
392       __ j(above_equal, &done, Label::kNear);                                \
393       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
394       __ bind(&done);                                                        \
395     } else {                                                                 \
396       auto length = i.InputInt32(3);                                         \
397       DCHECK_LE(index2, length);                                             \
398       __ cmpq(index1, Immediate(length - index2));                           \
399       class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
400        public:                                                               \
401         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
402                             Register index1, int32_t index2, int32_t length, \
403                             XMMRegister value)                               \
404             : OutOfLineCode(gen),                                            \
405               buffer_(buffer),                                               \
406               index1_(index1),                                               \
407               index2_(index2),                                               \
408               length_(length),                                               \
409               value_(value) {}                                               \
410                                                                              \
411         void Generate() FINAL {                                              \
412           __ leal(kScratchRegister, Operand(index1_, index2_));              \
413           __ cmpl(kScratchRegister, Immediate(length_));                     \
414           __ j(above_equal, exit());                                         \
415           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
416                        value_);                                              \
417         }                                                                    \
418                                                                              \
419        private:                                                              \
420         Register const buffer_;                                              \
421         Register const index1_;                                              \
422         int32_t const index2_;                                               \
423         int32_t const length_;                                               \
424         XMMRegister const value_;                                            \
425       };                                                                     \
426       auto ool = new (zone())                                                \
427           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
428       __ j(above_equal, ool->entry());                                       \
429       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
430       __ bind(ool->exit());                                                  \
431     }                                                                        \
432   } while (false)
433
434
435 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
436   do {                                                                         \
437     auto buffer = i.InputRegister(0);                                          \
438     auto index1 = i.InputRegister(1);                                          \
439     auto index2 = i.InputInt32(2);                                             \
440     if (instr->InputAt(3)->IsRegister()) {                                     \
441       auto length = i.InputRegister(3);                                        \
442       DCHECK_EQ(0, index2);                                                    \
443       Label done;                                                              \
444       __ cmpl(index1, length);                                                 \
445       __ j(above_equal, &done, Label::kNear);                                  \
446       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
447       __ bind(&done);                                                          \
448     } else {                                                                   \
449       auto length = i.InputInt32(3);                                           \
450       DCHECK_LE(index2, length);                                               \
451       __ cmpq(index1, Immediate(length - index2));                             \
452       class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
453        public:                                                                 \
454         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
455                               Register index1, int32_t index2, int32_t length, \
456                               Value value)                                     \
457             : OutOfLineCode(gen),                                              \
458               buffer_(buffer),                                                 \
459               index1_(index1),                                                 \
460               index2_(index2),                                                 \
461               length_(length),                                                 \
462               value_(value) {}                                                 \
463                                                                                \
464         void Generate() FINAL {                                                \
465           __ leal(kScratchRegister, Operand(index1_, index2_));                \
466           __ cmpl(kScratchRegister, Immediate(length_));                       \
467           __ j(above_equal, exit());                                           \
468           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
469                        value_);                                                \
470         }                                                                      \
471                                                                                \
472        private:                                                                \
473         Register const buffer_;                                                \
474         Register const index1_;                                                \
475         int32_t const index2_;                                                 \
476         int32_t const length_;                                                 \
477         Value const value_;                                                    \
478       };                                                                       \
479       auto ool = new (zone())                                                  \
480           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
481       __ j(above_equal, ool->entry());                                         \
482       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
483       __ bind(ool->exit());                                                    \
484     }                                                                          \
485   } while (false)
486
487
488 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
489   do {                                                           \
490     if (instr->InputAt(4)->IsRegister()) {                       \
491       Register value = i.InputRegister(4);                       \
492       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
493     } else {                                                     \
494       Immediate value = i.InputImmediate(4);                     \
495       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
496     }                                                            \
497   } while (false)
498
499
500 // Assembles an instruction after register allocation, producing machine code.
501 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
502   X64OperandConverter i(this, instr);
503
504   switch (ArchOpcodeField::decode(instr->opcode())) {
505     case kArchCallCodeObject: {
506       EnsureSpaceForLazyDeopt();
507       if (HasImmediateInput(instr, 0)) {
508         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
509         __ Call(code, RelocInfo::CODE_TARGET);
510       } else {
511         Register reg = i.InputRegister(0);
512         int entry = Code::kHeaderSize - kHeapObjectTag;
513         __ Call(Operand(reg, entry));
514       }
515       AddSafepointAndDeopt(instr);
516       break;
517     }
518     case kArchCallJSFunction: {
519       EnsureSpaceForLazyDeopt();
520       Register func = i.InputRegister(0);
521       if (FLAG_debug_code) {
522         // Check the function's context matches the context argument.
523         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
524         __ Assert(equal, kWrongFunctionContext);
525       }
526       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
527       AddSafepointAndDeopt(instr);
528       break;
529     }
530     case kArchJmp:
531       AssembleArchJump(i.InputRpo(0));
532       break;
533     case kArchNop:
534       // don't emit code for nops.
535       break;
536     case kArchRet:
537       AssembleReturn();
538       break;
539     case kArchStackPointer:
540       __ movq(i.OutputRegister(), rsp);
541       break;
542     case kArchTruncateDoubleToI: {
543       auto result = i.OutputRegister();
544       auto input = i.InputDoubleRegister(0);
545       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
546       __ cvttsd2siq(result, input);
547       __ cmpq(result, Immediate(1));
548       __ j(overflow, ool->entry());
549       __ bind(ool->exit());
550       break;
551     }
552     case kX64Add32:
553       ASSEMBLE_BINOP(addl);
554       break;
555     case kX64Add:
556       ASSEMBLE_BINOP(addq);
557       break;
558     case kX64Sub32:
559       ASSEMBLE_BINOP(subl);
560       break;
561     case kX64Sub:
562       ASSEMBLE_BINOP(subq);
563       break;
564     case kX64And32:
565       ASSEMBLE_BINOP(andl);
566       break;
567     case kX64And:
568       ASSEMBLE_BINOP(andq);
569       break;
570     case kX64Cmp32:
571       ASSEMBLE_BINOP(cmpl);
572       break;
573     case kX64Cmp:
574       ASSEMBLE_BINOP(cmpq);
575       break;
576     case kX64Test32:
577       ASSEMBLE_BINOP(testl);
578       break;
579     case kX64Test:
580       ASSEMBLE_BINOP(testq);
581       break;
582     case kX64Imul32:
583       ASSEMBLE_MULT(imull);
584       break;
585     case kX64Imul:
586       ASSEMBLE_MULT(imulq);
587       break;
588     case kX64ImulHigh32:
589       if (instr->InputAt(1)->IsRegister()) {
590         __ imull(i.InputRegister(1));
591       } else {
592         __ imull(i.InputOperand(1));
593       }
594       break;
595     case kX64UmulHigh32:
596       if (instr->InputAt(1)->IsRegister()) {
597         __ mull(i.InputRegister(1));
598       } else {
599         __ mull(i.InputOperand(1));
600       }
601       break;
602     case kX64Idiv32:
603       __ cdq();
604       __ idivl(i.InputRegister(1));
605       break;
606     case kX64Idiv:
607       __ cqo();
608       __ idivq(i.InputRegister(1));
609       break;
610     case kX64Udiv32:
611       __ xorl(rdx, rdx);
612       __ divl(i.InputRegister(1));
613       break;
614     case kX64Udiv:
615       __ xorq(rdx, rdx);
616       __ divq(i.InputRegister(1));
617       break;
618     case kX64Not:
619       ASSEMBLE_UNOP(notq);
620       break;
621     case kX64Not32:
622       ASSEMBLE_UNOP(notl);
623       break;
624     case kX64Neg:
625       ASSEMBLE_UNOP(negq);
626       break;
627     case kX64Neg32:
628       ASSEMBLE_UNOP(negl);
629       break;
630     case kX64Or32:
631       ASSEMBLE_BINOP(orl);
632       break;
633     case kX64Or:
634       ASSEMBLE_BINOP(orq);
635       break;
636     case kX64Xor32:
637       ASSEMBLE_BINOP(xorl);
638       break;
639     case kX64Xor:
640       ASSEMBLE_BINOP(xorq);
641       break;
642     case kX64Shl32:
643       ASSEMBLE_SHIFT(shll, 5);
644       break;
645     case kX64Shl:
646       ASSEMBLE_SHIFT(shlq, 6);
647       break;
648     case kX64Shr32:
649       ASSEMBLE_SHIFT(shrl, 5);
650       break;
651     case kX64Shr:
652       ASSEMBLE_SHIFT(shrq, 6);
653       break;
654     case kX64Sar32:
655       ASSEMBLE_SHIFT(sarl, 5);
656       break;
657     case kX64Sar:
658       ASSEMBLE_SHIFT(sarq, 6);
659       break;
660     case kX64Ror32:
661       ASSEMBLE_SHIFT(rorl, 5);
662       break;
663     case kX64Ror:
664       ASSEMBLE_SHIFT(rorq, 6);
665       break;
666     case kSSEFloat64Cmp:
667       ASSEMBLE_DOUBLE_BINOP(ucomisd);
668       break;
669     case kSSEFloat64Add:
670       ASSEMBLE_DOUBLE_BINOP(addsd);
671       break;
672     case kSSEFloat64Sub:
673       ASSEMBLE_DOUBLE_BINOP(subsd);
674       break;
675     case kSSEFloat64Mul:
676       ASSEMBLE_DOUBLE_BINOP(mulsd);
677       break;
678     case kSSEFloat64Div:
679       ASSEMBLE_DOUBLE_BINOP(divsd);
680       break;
681     case kSSEFloat64Mod: {
682       __ subq(rsp, Immediate(kDoubleSize));
683       // Move values to st(0) and st(1).
684       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
685       __ fld_d(Operand(rsp, 0));
686       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
687       __ fld_d(Operand(rsp, 0));
688       // Loop while fprem isn't done.
689       Label mod_loop;
690       __ bind(&mod_loop);
691       // This instructions traps on all kinds inputs, but we are assuming the
692       // floating point control word is set to ignore them all.
693       __ fprem();
694       // The following 2 instruction implicitly use rax.
695       __ fnstsw_ax();
696       if (CpuFeatures::IsSupported(SAHF)) {
697         CpuFeatureScope sahf_scope(masm(), SAHF);
698         __ sahf();
699       } else {
700         __ shrl(rax, Immediate(8));
701         __ andl(rax, Immediate(0xFF));
702         __ pushq(rax);
703         __ popfq();
704       }
705       __ j(parity_even, &mod_loop);
706       // Move output to stack and clean up.
707       __ fstp(1);
708       __ fstp_d(Operand(rsp, 0));
709       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
710       __ addq(rsp, Immediate(kDoubleSize));
711       break;
712     }
713     case kSSEFloat64Sqrt:
714       if (instr->InputAt(0)->IsDoubleRegister()) {
715         __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
716       } else {
717         __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
718       }
719       break;
720     case kSSEFloat64Floor: {
721       CpuFeatureScope sse_scope(masm(), SSE4_1);
722       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
723                  v8::internal::Assembler::kRoundDown);
724       break;
725     }
726     case kSSEFloat64Ceil: {
727       CpuFeatureScope sse_scope(masm(), SSE4_1);
728       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
729                  v8::internal::Assembler::kRoundUp);
730       break;
731     }
732     case kSSEFloat64RoundTruncate: {
733       CpuFeatureScope sse_scope(masm(), SSE4_1);
734       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
735                  v8::internal::Assembler::kRoundToZero);
736       break;
737     }
738     case kSSECvtss2sd:
739       if (instr->InputAt(0)->IsDoubleRegister()) {
740         __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
741       } else {
742         __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
743       }
744       break;
745     case kSSECvtsd2ss:
746       if (instr->InputAt(0)->IsDoubleRegister()) {
747         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
748       } else {
749         __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
750       }
751       break;
752     case kSSEFloat64ToInt32:
753       if (instr->InputAt(0)->IsDoubleRegister()) {
754         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
755       } else {
756         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
757       }
758       break;
759     case kSSEFloat64ToUint32: {
760       if (instr->InputAt(0)->IsDoubleRegister()) {
761         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
762       } else {
763         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
764       }
765       __ AssertZeroExtended(i.OutputRegister());
766       break;
767     }
768     case kSSEInt32ToFloat64:
769       if (instr->InputAt(0)->IsRegister()) {
770         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
771       } else {
772         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
773       }
774       break;
775     case kSSEUint32ToFloat64:
776       if (instr->InputAt(0)->IsRegister()) {
777         __ movl(kScratchRegister, i.InputRegister(0));
778       } else {
779         __ movl(kScratchRegister, i.InputOperand(0));
780       }
781       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
782       break;
783     case kAVXFloat64Add:
784       ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
785       break;
786     case kAVXFloat64Sub:
787       ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
788       break;
789     case kAVXFloat64Mul:
790       ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
791       break;
792     case kAVXFloat64Div:
793       ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
794       break;
795     case kX64Movsxbl:
796       if (instr->addressing_mode() != kMode_None) {
797         __ movsxbl(i.OutputRegister(), i.MemoryOperand());
798       } else if (instr->InputAt(0)->IsRegister()) {
799         __ movsxbl(i.OutputRegister(), i.InputRegister(0));
800       } else {
801         __ movsxbl(i.OutputRegister(), i.InputOperand(0));
802       }
803       __ AssertZeroExtended(i.OutputRegister());
804       break;
805     case kX64Movzxbl:
806       __ movzxbl(i.OutputRegister(), i.MemoryOperand());
807       break;
808     case kX64Movb: {
809       int index = 0;
810       Operand operand = i.MemoryOperand(&index);
811       if (HasImmediateInput(instr, index)) {
812         __ movb(operand, Immediate(i.InputInt8(index)));
813       } else {
814         __ movb(operand, i.InputRegister(index));
815       }
816       break;
817     }
818     case kX64Movsxwl:
819       if (instr->addressing_mode() != kMode_None) {
820         __ movsxwl(i.OutputRegister(), i.MemoryOperand());
821       } else if (instr->InputAt(0)->IsRegister()) {
822         __ movsxwl(i.OutputRegister(), i.InputRegister(0));
823       } else {
824         __ movsxwl(i.OutputRegister(), i.InputOperand(0));
825       }
826       __ AssertZeroExtended(i.OutputRegister());
827       break;
828     case kX64Movzxwl:
829       __ movzxwl(i.OutputRegister(), i.MemoryOperand());
830       __ AssertZeroExtended(i.OutputRegister());
831       break;
832     case kX64Movw: {
833       int index = 0;
834       Operand operand = i.MemoryOperand(&index);
835       if (HasImmediateInput(instr, index)) {
836         __ movw(operand, Immediate(i.InputInt16(index)));
837       } else {
838         __ movw(operand, i.InputRegister(index));
839       }
840       break;
841     }
842     case kX64Movl:
843       if (instr->HasOutput()) {
844         if (instr->addressing_mode() == kMode_None) {
845           if (instr->InputAt(0)->IsRegister()) {
846             __ movl(i.OutputRegister(), i.InputRegister(0));
847           } else {
848             __ movl(i.OutputRegister(), i.InputOperand(0));
849           }
850         } else {
851           __ movl(i.OutputRegister(), i.MemoryOperand());
852         }
853         __ AssertZeroExtended(i.OutputRegister());
854       } else {
855         int index = 0;
856         Operand operand = i.MemoryOperand(&index);
857         if (HasImmediateInput(instr, index)) {
858           __ movl(operand, i.InputImmediate(index));
859         } else {
860           __ movl(operand, i.InputRegister(index));
861         }
862       }
863       break;
864     case kX64Movsxlq: {
865       if (instr->InputAt(0)->IsRegister()) {
866         __ movsxlq(i.OutputRegister(), i.InputRegister(0));
867       } else {
868         __ movsxlq(i.OutputRegister(), i.InputOperand(0));
869       }
870       break;
871     }
872     case kX64Movq:
873       if (instr->HasOutput()) {
874         __ movq(i.OutputRegister(), i.MemoryOperand());
875       } else {
876         int index = 0;
877         Operand operand = i.MemoryOperand(&index);
878         if (HasImmediateInput(instr, index)) {
879           __ movq(operand, i.InputImmediate(index));
880         } else {
881           __ movq(operand, i.InputRegister(index));
882         }
883       }
884       break;
885     case kX64Movss:
886       if (instr->HasOutput()) {
887         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
888       } else {
889         int index = 0;
890         Operand operand = i.MemoryOperand(&index);
891         __ movss(operand, i.InputDoubleRegister(index));
892       }
893       break;
894     case kX64Movsd:
895       if (instr->HasOutput()) {
896         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
897       } else {
898         int index = 0;
899         Operand operand = i.MemoryOperand(&index);
900         __ movsd(operand, i.InputDoubleRegister(index));
901       }
902       break;
903     case kX64Lea32: {
904       AddressingMode mode = AddressingModeField::decode(instr->opcode());
905       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
906       // and addressing mode just happens to work out. The "addl"/"subl" forms
907       // in these cases are faster based on measurements.
908       if (i.InputRegister(0).is(i.OutputRegister())) {
909         if (mode == kMode_MRI) {
910           int32_t constant_summand = i.InputInt32(1);
911           if (constant_summand > 0) {
912             __ addl(i.OutputRegister(), Immediate(constant_summand));
913           } else if (constant_summand < 0) {
914             __ subl(i.OutputRegister(), Immediate(-constant_summand));
915           }
916         } else if (mode == kMode_MR1) {
917           if (i.InputRegister(1).is(i.OutputRegister())) {
918             __ shll(i.OutputRegister(), Immediate(1));
919           } else {
920             __ leal(i.OutputRegister(), i.MemoryOperand());
921           }
922         } else if (mode == kMode_M2) {
923           __ shll(i.OutputRegister(), Immediate(1));
924         } else if (mode == kMode_M4) {
925           __ shll(i.OutputRegister(), Immediate(2));
926         } else if (mode == kMode_M8) {
927           __ shll(i.OutputRegister(), Immediate(3));
928         } else {
929           __ leal(i.OutputRegister(), i.MemoryOperand());
930         }
931       } else {
932         __ leal(i.OutputRegister(), i.MemoryOperand());
933       }
934       __ AssertZeroExtended(i.OutputRegister());
935       break;
936     }
937     case kX64Lea:
938       __ leaq(i.OutputRegister(), i.MemoryOperand());
939       break;
940     case kX64Dec32:
941       __ decl(i.OutputRegister());
942       break;
943     case kX64Inc32:
944       __ incl(i.OutputRegister());
945       break;
946     case kX64Push:
947       if (HasImmediateInput(instr, 0)) {
948         __ pushq(i.InputImmediate(0));
949       } else {
950         if (instr->InputAt(0)->IsRegister()) {
951           __ pushq(i.InputRegister(0));
952         } else {
953           __ pushq(i.InputOperand(0));
954         }
955       }
956       break;
957     case kX64StoreWriteBarrier: {
958       Register object = i.InputRegister(0);
959       Register index = i.InputRegister(1);
960       Register value = i.InputRegister(2);
961       __ movsxlq(index, index);
962       __ movq(Operand(object, index, times_1, 0), value);
963       __ leaq(index, Operand(object, index, times_1, 0));
964       SaveFPRegsMode mode =
965           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
966       __ RecordWrite(object, index, value, mode);
967       break;
968     }
969     case kCheckedLoadInt8:
970       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
971       break;
972     case kCheckedLoadUint8:
973       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
974       break;
975     case kCheckedLoadInt16:
976       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
977       break;
978     case kCheckedLoadUint16:
979       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
980       break;
981     case kCheckedLoadWord32:
982       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
983       break;
984     case kCheckedLoadFloat32:
985       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
986       break;
987     case kCheckedLoadFloat64:
988       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
989       break;
990     case kCheckedStoreWord8:
991       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
992       break;
993     case kCheckedStoreWord16:
994       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
995       break;
996     case kCheckedStoreWord32:
997       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
998       break;
999     case kCheckedStoreFloat32:
1000       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1001       break;
1002     case kCheckedStoreFloat64:
1003       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1004       break;
1005   }
1006 }
1007
1008
1009 // Assembles branches after this instruction.
1010 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1011   X64OperandConverter i(this, instr);
1012   Label::Distance flabel_distance =
1013       branch->fallthru ? Label::kNear : Label::kFar;
1014   Label* tlabel = branch->true_label;
1015   Label* flabel = branch->false_label;
1016   switch (branch->condition) {
1017     case kUnorderedEqual:
1018       __ j(parity_even, flabel, flabel_distance);
1019     // Fall through.
1020     case kEqual:
1021       __ j(equal, tlabel);
1022       break;
1023     case kUnorderedNotEqual:
1024       __ j(parity_even, tlabel);
1025     // Fall through.
1026     case kNotEqual:
1027       __ j(not_equal, tlabel);
1028       break;
1029     case kSignedLessThan:
1030       __ j(less, tlabel);
1031       break;
1032     case kSignedGreaterThanOrEqual:
1033       __ j(greater_equal, tlabel);
1034       break;
1035     case kSignedLessThanOrEqual:
1036       __ j(less_equal, tlabel);
1037       break;
1038     case kSignedGreaterThan:
1039       __ j(greater, tlabel);
1040       break;
1041     case kUnorderedLessThan:
1042       __ j(parity_even, flabel, flabel_distance);
1043     // Fall through.
1044     case kUnsignedLessThan:
1045       __ j(below, tlabel);
1046       break;
1047     case kUnorderedGreaterThanOrEqual:
1048       __ j(parity_even, tlabel);
1049     // Fall through.
1050     case kUnsignedGreaterThanOrEqual:
1051       __ j(above_equal, tlabel);
1052       break;
1053     case kUnorderedLessThanOrEqual:
1054       __ j(parity_even, flabel, flabel_distance);
1055     // Fall through.
1056     case kUnsignedLessThanOrEqual:
1057       __ j(below_equal, tlabel);
1058       break;
1059     case kUnorderedGreaterThan:
1060       __ j(parity_even, tlabel);
1061     // Fall through.
1062     case kUnsignedGreaterThan:
1063       __ j(above, tlabel);
1064       break;
1065     case kOverflow:
1066       __ j(overflow, tlabel);
1067       break;
1068     case kNotOverflow:
1069       __ j(no_overflow, tlabel);
1070       break;
1071   }
1072   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1073 }
1074
1075
1076 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1077   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1078 }
1079
1080
1081 // Assembles boolean materializations after this instruction.
1082 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1083                                         FlagsCondition condition) {
1084   X64OperandConverter i(this, instr);
1085   Label done;
1086
1087   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1088   // last output of the instruction.
1089   Label check;
1090   DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
1091   Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
1092   Condition cc = no_condition;
1093   switch (condition) {
1094     case kUnorderedEqual:
1095       __ j(parity_odd, &check, Label::kNear);
1096       __ movl(reg, Immediate(0));
1097       __ jmp(&done, Label::kNear);
1098     // Fall through.
1099     case kEqual:
1100       cc = equal;
1101       break;
1102     case kUnorderedNotEqual:
1103       __ j(parity_odd, &check, Label::kNear);
1104       __ movl(reg, Immediate(1));
1105       __ jmp(&done, Label::kNear);
1106     // Fall through.
1107     case kNotEqual:
1108       cc = not_equal;
1109       break;
1110     case kSignedLessThan:
1111       cc = less;
1112       break;
1113     case kSignedGreaterThanOrEqual:
1114       cc = greater_equal;
1115       break;
1116     case kSignedLessThanOrEqual:
1117       cc = less_equal;
1118       break;
1119     case kSignedGreaterThan:
1120       cc = greater;
1121       break;
1122     case kUnorderedLessThan:
1123       __ j(parity_odd, &check, Label::kNear);
1124       __ movl(reg, Immediate(0));
1125       __ jmp(&done, Label::kNear);
1126     // Fall through.
1127     case kUnsignedLessThan:
1128       cc = below;
1129       break;
1130     case kUnorderedGreaterThanOrEqual:
1131       __ j(parity_odd, &check, Label::kNear);
1132       __ movl(reg, Immediate(1));
1133       __ jmp(&done, Label::kNear);
1134     // Fall through.
1135     case kUnsignedGreaterThanOrEqual:
1136       cc = above_equal;
1137       break;
1138     case kUnorderedLessThanOrEqual:
1139       __ j(parity_odd, &check, Label::kNear);
1140       __ movl(reg, Immediate(0));
1141       __ jmp(&done, Label::kNear);
1142     // Fall through.
1143     case kUnsignedLessThanOrEqual:
1144       cc = below_equal;
1145       break;
1146     case kUnorderedGreaterThan:
1147       __ j(parity_odd, &check, Label::kNear);
1148       __ movl(reg, Immediate(1));
1149       __ jmp(&done, Label::kNear);
1150     // Fall through.
1151     case kUnsignedGreaterThan:
1152       cc = above;
1153       break;
1154     case kOverflow:
1155       cc = overflow;
1156       break;
1157     case kNotOverflow:
1158       cc = no_overflow;
1159       break;
1160   }
1161   __ bind(&check);
1162   __ setcc(cc, reg);
1163   __ movzxbl(reg, reg);
1164   __ bind(&done);
1165 }
1166
1167
1168 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1169   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1170       isolate(), deoptimization_id, Deoptimizer::LAZY);
1171   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1172 }
1173
1174
1175 void CodeGenerator::AssemblePrologue() {
1176   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1177   int stack_slots = frame()->GetSpillSlotCount();
1178   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1179     __ pushq(rbp);
1180     __ movq(rbp, rsp);
1181     const RegList saves = descriptor->CalleeSavedRegisters();
1182     if (saves != 0) {  // Save callee-saved registers.
1183       int register_save_area_size = 0;
1184       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1185         if (!((1 << i) & saves)) continue;
1186         __ pushq(Register::from_code(i));
1187         register_save_area_size += kPointerSize;
1188       }
1189       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1190     }
1191   } else if (descriptor->IsJSFunctionCall()) {
1192     CompilationInfo* info = this->info();
1193     __ Prologue(info->IsCodePreAgingActive());
1194     frame()->SetRegisterSaveAreaSize(
1195         StandardFrameConstants::kFixedFrameSizeFromFp);
1196   } else {
1197     __ StubPrologue();
1198     frame()->SetRegisterSaveAreaSize(
1199         StandardFrameConstants::kFixedFrameSizeFromFp);
1200   }
1201   if (stack_slots > 0) {
1202     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1203   }
1204 }
1205
1206
1207 void CodeGenerator::AssembleReturn() {
1208   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1209   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1210     if (frame()->GetRegisterSaveAreaSize() > 0) {
1211       // Remove this frame's spill slots first.
1212       int stack_slots = frame()->GetSpillSlotCount();
1213       if (stack_slots > 0) {
1214         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1215       }
1216       const RegList saves = descriptor->CalleeSavedRegisters();
1217       // Restore registers.
1218       if (saves != 0) {
1219         for (int i = 0; i < Register::kNumRegisters; i++) {
1220           if (!((1 << i) & saves)) continue;
1221           __ popq(Register::from_code(i));
1222         }
1223       }
1224       __ popq(rbp);  // Pop caller's frame pointer.
1225       __ ret(0);
1226     } else {
1227       // No saved registers.
1228       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1229       __ popq(rbp);       // Pop caller's frame pointer.
1230       __ ret(0);
1231     }
1232   } else {
1233     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1234     __ popq(rbp);       // Pop caller's frame pointer.
1235     int pop_count = descriptor->IsJSFunctionCall()
1236                         ? static_cast<int>(descriptor->JSParameterCount())
1237                         : 0;
1238     __ ret(pop_count * kPointerSize);
1239   }
1240 }
1241
1242
1243 void CodeGenerator::AssembleMove(InstructionOperand* source,
1244                                  InstructionOperand* destination) {
1245   X64OperandConverter g(this, NULL);
1246   // Dispatch on the source and destination operand kinds.  Not all
1247   // combinations are possible.
1248   if (source->IsRegister()) {
1249     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1250     Register src = g.ToRegister(source);
1251     if (destination->IsRegister()) {
1252       __ movq(g.ToRegister(destination), src);
1253     } else {
1254       __ movq(g.ToOperand(destination), src);
1255     }
1256   } else if (source->IsStackSlot()) {
1257     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1258     Operand src = g.ToOperand(source);
1259     if (destination->IsRegister()) {
1260       Register dst = g.ToRegister(destination);
1261       __ movq(dst, src);
1262     } else {
1263       // Spill on demand to use a temporary register for memory-to-memory
1264       // moves.
1265       Register tmp = kScratchRegister;
1266       Operand dst = g.ToOperand(destination);
1267       __ movq(tmp, src);
1268       __ movq(dst, tmp);
1269     }
1270   } else if (source->IsConstant()) {
1271     ConstantOperand* constant_source = ConstantOperand::cast(source);
1272     Constant src = g.ToConstant(constant_source);
1273     if (destination->IsRegister() || destination->IsStackSlot()) {
1274       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1275                                                : kScratchRegister;
1276       switch (src.type()) {
1277         case Constant::kInt32:
1278           // TODO(dcarney): don't need scratch in this case.
1279           __ Set(dst, src.ToInt32());
1280           break;
1281         case Constant::kInt64:
1282           __ Set(dst, src.ToInt64());
1283           break;
1284         case Constant::kFloat32:
1285           __ Move(dst,
1286                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1287           break;
1288         case Constant::kFloat64:
1289           __ Move(dst,
1290                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1291           break;
1292         case Constant::kExternalReference:
1293           __ Move(dst, src.ToExternalReference());
1294           break;
1295         case Constant::kHeapObject:
1296           __ Move(dst, src.ToHeapObject());
1297           break;
1298         case Constant::kRpoNumber:
1299           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1300           break;
1301       }
1302       if (destination->IsStackSlot()) {
1303         __ movq(g.ToOperand(destination), kScratchRegister);
1304       }
1305     } else if (src.type() == Constant::kFloat32) {
1306       // TODO(turbofan): Can we do better here?
1307       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1308       if (destination->IsDoubleRegister()) {
1309         __ Move(g.ToDoubleRegister(destination), src_const);
1310       } else {
1311         DCHECK(destination->IsDoubleStackSlot());
1312         Operand dst = g.ToOperand(destination);
1313         __ movl(dst, Immediate(src_const));
1314       }
1315     } else {
1316       DCHECK_EQ(Constant::kFloat64, src.type());
1317       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1318       if (destination->IsDoubleRegister()) {
1319         __ Move(g.ToDoubleRegister(destination), src_const);
1320       } else {
1321         DCHECK(destination->IsDoubleStackSlot());
1322         __ movq(kScratchRegister, src_const);
1323         __ movq(g.ToOperand(destination), kScratchRegister);
1324       }
1325     }
1326   } else if (source->IsDoubleRegister()) {
1327     XMMRegister src = g.ToDoubleRegister(source);
1328     if (destination->IsDoubleRegister()) {
1329       XMMRegister dst = g.ToDoubleRegister(destination);
1330       __ movsd(dst, src);
1331     } else {
1332       DCHECK(destination->IsDoubleStackSlot());
1333       Operand dst = g.ToOperand(destination);
1334       __ movsd(dst, src);
1335     }
1336   } else if (source->IsDoubleStackSlot()) {
1337     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1338     Operand src = g.ToOperand(source);
1339     if (destination->IsDoubleRegister()) {
1340       XMMRegister dst = g.ToDoubleRegister(destination);
1341       __ movsd(dst, src);
1342     } else {
1343       // We rely on having xmm0 available as a fixed scratch register.
1344       Operand dst = g.ToOperand(destination);
1345       __ movsd(xmm0, src);
1346       __ movsd(dst, xmm0);
1347     }
1348   } else {
1349     UNREACHABLE();
1350   }
1351 }
1352
1353
1354 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1355                                  InstructionOperand* destination) {
1356   X64OperandConverter g(this, NULL);
1357   // Dispatch on the source and destination operand kinds.  Not all
1358   // combinations are possible.
1359   if (source->IsRegister() && destination->IsRegister()) {
1360     // Register-register.
1361     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1362   } else if (source->IsRegister() && destination->IsStackSlot()) {
1363     Register src = g.ToRegister(source);
1364     Operand dst = g.ToOperand(destination);
1365     __ xchgq(src, dst);
1366   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1367              (source->IsDoubleStackSlot() &&
1368               destination->IsDoubleStackSlot())) {
1369     // Memory-memory.
1370     Register tmp = kScratchRegister;
1371     Operand src = g.ToOperand(source);
1372     Operand dst = g.ToOperand(destination);
1373     __ movq(tmp, dst);
1374     __ xchgq(tmp, src);
1375     __ movq(dst, tmp);
1376   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1377     // XMM register-register swap. We rely on having xmm0
1378     // available as a fixed scratch register.
1379     XMMRegister src = g.ToDoubleRegister(source);
1380     XMMRegister dst = g.ToDoubleRegister(destination);
1381     __ movsd(xmm0, src);
1382     __ movsd(src, dst);
1383     __ movsd(dst, xmm0);
1384   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1385     // XMM register-memory swap.  We rely on having xmm0
1386     // available as a fixed scratch register.
1387     XMMRegister src = g.ToDoubleRegister(source);
1388     Operand dst = g.ToOperand(destination);
1389     __ movsd(xmm0, src);
1390     __ movsd(src, dst);
1391     __ movsd(dst, xmm0);
1392   } else {
1393     // No other combinations are possible.
1394     UNREACHABLE();
1395   }
1396 }
1397
1398
1399 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1400
1401
1402 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1403   int space_needed = Deoptimizer::patch_size();
1404   if (!info()->IsStub()) {
1405     // Ensure that we have enough space after the previous lazy-bailout
1406     // instruction for patching the code here.
1407     int current_pc = masm()->pc_offset();
1408     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1409       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1410       __ Nop(padding_size);
1411     }
1412   }
1413   MarkLazyDeoptSite();
1414 }
1415
1416 #undef __
1417
1418 }  // namespace internal
1419 }  // namespace compiler
1420 }  // namespace v8