[turbofan] Simplifying handling of callee-cleanup stack area.
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 #define kScratchDoubleReg xmm0
22
23
24 // Adds X64 specific methods for decoding operands.
25 class X64OperandConverter : public InstructionOperandConverter {
26  public:
27   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
28       : InstructionOperandConverter(gen, instr) {}
29
30   Immediate InputImmediate(size_t index) {
31     return ToImmediate(instr_->InputAt(index));
32   }
33
34   Operand InputOperand(size_t index, int extra = 0) {
35     return ToOperand(instr_->InputAt(index), extra);
36   }
37
38   Operand OutputOperand() { return ToOperand(instr_->Output()); }
39
40   Immediate ToImmediate(InstructionOperand* operand) {
41     Constant constant = ToConstant(operand);
42     if (constant.type() == Constant::kFloat64) {
43       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
44       return Immediate(0);
45     }
46     return Immediate(constant.ToInt32());
47   }
48
49   Operand ToOperand(InstructionOperand* op, int extra = 0) {
50     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
51     // The linkage computes where all spill slots are located.
52     FrameOffset offset = linkage()->GetFrameOffset(
53         AllocatedOperand::cast(op)->index(), frame(), extra);
54     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
55   }
56
57   static size_t NextOffset(size_t* offset) {
58     size_t i = *offset;
59     (*offset)++;
60     return i;
61   }
62
63   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
64     STATIC_ASSERT(0 == static_cast<int>(times_1));
65     STATIC_ASSERT(1 == static_cast<int>(times_2));
66     STATIC_ASSERT(2 == static_cast<int>(times_4));
67     STATIC_ASSERT(3 == static_cast<int>(times_8));
68     int scale = static_cast<int>(mode - one);
69     DCHECK(scale >= 0 && scale < 4);
70     return static_cast<ScaleFactor>(scale);
71   }
72
73   Operand MemoryOperand(size_t* offset) {
74     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
75     switch (mode) {
76       case kMode_MR: {
77         Register base = InputRegister(NextOffset(offset));
78         int32_t disp = 0;
79         return Operand(base, disp);
80       }
81       case kMode_MRI: {
82         Register base = InputRegister(NextOffset(offset));
83         int32_t disp = InputInt32(NextOffset(offset));
84         return Operand(base, disp);
85       }
86       case kMode_MR1:
87       case kMode_MR2:
88       case kMode_MR4:
89       case kMode_MR8: {
90         Register base = InputRegister(NextOffset(offset));
91         Register index = InputRegister(NextOffset(offset));
92         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
93         int32_t disp = 0;
94         return Operand(base, index, scale, disp);
95       }
96       case kMode_MR1I:
97       case kMode_MR2I:
98       case kMode_MR4I:
99       case kMode_MR8I: {
100         Register base = InputRegister(NextOffset(offset));
101         Register index = InputRegister(NextOffset(offset));
102         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
103         int32_t disp = InputInt32(NextOffset(offset));
104         return Operand(base, index, scale, disp);
105       }
106       case kMode_M1: {
107         Register base = InputRegister(NextOffset(offset));
108         int32_t disp = 0;
109         return Operand(base, disp);
110       }
111       case kMode_M2:
112         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
113         return Operand(no_reg, 0);
114       case kMode_M4:
115       case kMode_M8: {
116         Register index = InputRegister(NextOffset(offset));
117         ScaleFactor scale = ScaleFor(kMode_M1, mode);
118         int32_t disp = 0;
119         return Operand(index, scale, disp);
120       }
121       case kMode_M1I:
122       case kMode_M2I:
123       case kMode_M4I:
124       case kMode_M8I: {
125         Register index = InputRegister(NextOffset(offset));
126         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
127         int32_t disp = InputInt32(NextOffset(offset));
128         return Operand(index, scale, disp);
129       }
130       case kMode_None:
131         UNREACHABLE();
132         return Operand(no_reg, 0);
133     }
134     UNREACHABLE();
135     return Operand(no_reg, 0);
136   }
137
138   Operand MemoryOperand(size_t first_input = 0) {
139     return MemoryOperand(&first_input);
140   }
141 };
142
143
144 namespace {
145
146 bool HasImmediateInput(Instruction* instr, size_t index) {
147   return instr->InputAt(index)->IsImmediate();
148 }
149
150
151 class OutOfLineLoadZero final : public OutOfLineCode {
152  public:
153   OutOfLineLoadZero(CodeGenerator* gen, Register result)
154       : OutOfLineCode(gen), result_(result) {}
155
156   void Generate() final { __ xorl(result_, result_); }
157
158  private:
159   Register const result_;
160 };
161
162
163 class OutOfLineLoadNaN final : public OutOfLineCode {
164  public:
165   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
166       : OutOfLineCode(gen), result_(result) {}
167
168   void Generate() final { __ pcmpeqd(result_, result_); }
169
170  private:
171   XMMRegister const result_;
172 };
173
174
175 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
176  public:
177   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
178                              XMMRegister input)
179       : OutOfLineCode(gen), result_(result), input_(input) {}
180
181   void Generate() final {
182     __ subp(rsp, Immediate(kDoubleSize));
183     __ movsd(MemOperand(rsp, 0), input_);
184     __ SlowTruncateToI(result_, rsp, 0);
185     __ addp(rsp, Immediate(kDoubleSize));
186   }
187
188  private:
189   Register const result_;
190   XMMRegister const input_;
191 };
192
193 }  // namespace
194
195
196 #define ASSEMBLE_UNOP(asm_instr)         \
197   do {                                   \
198     if (instr->Output()->IsRegister()) { \
199       __ asm_instr(i.OutputRegister());  \
200     } else {                             \
201       __ asm_instr(i.OutputOperand());   \
202     }                                    \
203   } while (0)
204
205
206 #define ASSEMBLE_BINOP(asm_instr)                              \
207   do {                                                         \
208     if (HasImmediateInput(instr, 1)) {                         \
209       if (instr->InputAt(0)->IsRegister()) {                   \
210         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
211       } else {                                                 \
212         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
213       }                                                        \
214     } else {                                                   \
215       if (instr->InputAt(1)->IsRegister()) {                   \
216         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
217       } else {                                                 \
218         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
219       }                                                        \
220     }                                                          \
221   } while (0)
222
223
224 #define ASSEMBLE_MULT(asm_instr)                              \
225   do {                                                        \
226     if (HasImmediateInput(instr, 1)) {                        \
227       if (instr->InputAt(0)->IsRegister()) {                  \
228         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
229                      i.InputImmediate(1));                    \
230       } else {                                                \
231         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
232                      i.InputImmediate(1));                    \
233       }                                                       \
234     } else {                                                  \
235       if (instr->InputAt(1)->IsRegister()) {                  \
236         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
237       } else {                                                \
238         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
239       }                                                       \
240     }                                                         \
241   } while (0)
242
243
244 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
245   do {                                                                     \
246     if (HasImmediateInput(instr, 1)) {                                     \
247       if (instr->Output()->IsRegister()) {                                 \
248         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
249       } else {                                                             \
250         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
251       }                                                                    \
252     } else {                                                               \
253       if (instr->Output()->IsRegister()) {                                 \
254         __ asm_instr##_cl(i.OutputRegister());                             \
255       } else {                                                             \
256         __ asm_instr##_cl(i.OutputOperand());                              \
257       }                                                                    \
258     }                                                                      \
259   } while (0)
260
261
262 #define ASSEMBLE_MOVX(asm_instr)                            \
263   do {                                                      \
264     if (instr->addressing_mode() != kMode_None) {           \
265       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
266     } else if (instr->InputAt(0)->IsRegister()) {           \
267       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
268     } else {                                                \
269       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
270     }                                                       \
271   } while (0)
272
273
274 #define ASSEMBLE_SSE_BINOP(asm_instr)                                   \
275   do {                                                                  \
276     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
277       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
278     } else {                                                            \
279       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
280     }                                                                   \
281   } while (0)
282
283
284 #define ASSEMBLE_SSE_UNOP(asm_instr)                                    \
285   do {                                                                  \
286     if (instr->InputAt(0)->IsDoubleRegister()) {                        \
287       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
288     } else {                                                            \
289       __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0));        \
290     }                                                                   \
291   } while (0)
292
293
294 #define ASSEMBLE_AVX_BINOP(asm_instr)                                  \
295   do {                                                                 \
296     CpuFeatureScope avx_scope(masm(), AVX);                            \
297     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
298       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
299                    i.InputDoubleRegister(1));                          \
300     } else {                                                           \
301       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
302                    i.InputOperand(1));                                 \
303     }                                                                  \
304   } while (0)
305
306
307 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
308   do {                                                                       \
309     auto result = i.OutputDoubleRegister();                                  \
310     auto buffer = i.InputRegister(0);                                        \
311     auto index1 = i.InputRegister(1);                                        \
312     auto index2 = i.InputInt32(2);                                           \
313     OutOfLineCode* ool;                                                      \
314     if (instr->InputAt(3)->IsRegister()) {                                   \
315       auto length = i.InputRegister(3);                                      \
316       DCHECK_EQ(0, index2);                                                  \
317       __ cmpl(index1, length);                                               \
318       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
319     } else {                                                                 \
320       auto length = i.InputInt32(3);                                         \
321       DCHECK_LE(index2, length);                                             \
322       __ cmpq(index1, Immediate(length - index2));                           \
323       class OutOfLineLoadFloat final : public OutOfLineCode {                \
324        public:                                                               \
325         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
326                            Register buffer, Register index1, int32_t index2, \
327                            int32_t length)                                   \
328             : OutOfLineCode(gen),                                            \
329               result_(result),                                               \
330               buffer_(buffer),                                               \
331               index1_(index1),                                               \
332               index2_(index2),                                               \
333               length_(length) {}                                             \
334                                                                              \
335         void Generate() final {                                              \
336           __ leal(kScratchRegister, Operand(index1_, index2_));              \
337           __ pcmpeqd(result_, result_);                                      \
338           __ cmpl(kScratchRegister, Immediate(length_));                     \
339           __ j(above_equal, exit());                                         \
340           __ asm_instr(result_,                                              \
341                        Operand(buffer_, kScratchRegister, times_1, 0));      \
342         }                                                                    \
343                                                                              \
344        private:                                                              \
345         XMMRegister const result_;                                           \
346         Register const buffer_;                                              \
347         Register const index1_;                                              \
348         int32_t const index2_;                                               \
349         int32_t const length_;                                               \
350       };                                                                     \
351       ool = new (zone())                                                     \
352           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
353     }                                                                        \
354     __ j(above_equal, ool->entry());                                         \
355     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
356     __ bind(ool->exit());                                                    \
357   } while (false)
358
359
360 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
361   do {                                                                         \
362     auto result = i.OutputRegister();                                          \
363     auto buffer = i.InputRegister(0);                                          \
364     auto index1 = i.InputRegister(1);                                          \
365     auto index2 = i.InputInt32(2);                                             \
366     OutOfLineCode* ool;                                                        \
367     if (instr->InputAt(3)->IsRegister()) {                                     \
368       auto length = i.InputRegister(3);                                        \
369       DCHECK_EQ(0, index2);                                                    \
370       __ cmpl(index1, length);                                                 \
371       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
372     } else {                                                                   \
373       auto length = i.InputInt32(3);                                           \
374       DCHECK_LE(index2, length);                                               \
375       __ cmpq(index1, Immediate(length - index2));                             \
376       class OutOfLineLoadInteger final : public OutOfLineCode {                \
377        public:                                                                 \
378         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
379                              Register buffer, Register index1, int32_t index2, \
380                              int32_t length)                                   \
381             : OutOfLineCode(gen),                                              \
382               result_(result),                                                 \
383               buffer_(buffer),                                                 \
384               index1_(index1),                                                 \
385               index2_(index2),                                                 \
386               length_(length) {}                                               \
387                                                                                \
388         void Generate() final {                                                \
389           Label oob;                                                           \
390           __ leal(kScratchRegister, Operand(index1_, index2_));                \
391           __ cmpl(kScratchRegister, Immediate(length_));                       \
392           __ j(above_equal, &oob, Label::kNear);                               \
393           __ asm_instr(result_,                                                \
394                        Operand(buffer_, kScratchRegister, times_1, 0));        \
395           __ jmp(exit());                                                      \
396           __ bind(&oob);                                                       \
397           __ xorl(result_, result_);                                           \
398         }                                                                      \
399                                                                                \
400        private:                                                                \
401         Register const result_;                                                \
402         Register const buffer_;                                                \
403         Register const index1_;                                                \
404         int32_t const index2_;                                                 \
405         int32_t const length_;                                                 \
406       };                                                                       \
407       ool = new (zone())                                                       \
408           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
409     }                                                                          \
410     __ j(above_equal, ool->entry());                                           \
411     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
412     __ bind(ool->exit());                                                      \
413   } while (false)
414
415
416 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
417   do {                                                                       \
418     auto buffer = i.InputRegister(0);                                        \
419     auto index1 = i.InputRegister(1);                                        \
420     auto index2 = i.InputInt32(2);                                           \
421     auto value = i.InputDoubleRegister(4);                                   \
422     if (instr->InputAt(3)->IsRegister()) {                                   \
423       auto length = i.InputRegister(3);                                      \
424       DCHECK_EQ(0, index2);                                                  \
425       Label done;                                                            \
426       __ cmpl(index1, length);                                               \
427       __ j(above_equal, &done, Label::kNear);                                \
428       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
429       __ bind(&done);                                                        \
430     } else {                                                                 \
431       auto length = i.InputInt32(3);                                         \
432       DCHECK_LE(index2, length);                                             \
433       __ cmpq(index1, Immediate(length - index2));                           \
434       class OutOfLineStoreFloat final : public OutOfLineCode {               \
435        public:                                                               \
436         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
437                             Register index1, int32_t index2, int32_t length, \
438                             XMMRegister value)                               \
439             : OutOfLineCode(gen),                                            \
440               buffer_(buffer),                                               \
441               index1_(index1),                                               \
442               index2_(index2),                                               \
443               length_(length),                                               \
444               value_(value) {}                                               \
445                                                                              \
446         void Generate() final {                                              \
447           __ leal(kScratchRegister, Operand(index1_, index2_));              \
448           __ cmpl(kScratchRegister, Immediate(length_));                     \
449           __ j(above_equal, exit());                                         \
450           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
451                        value_);                                              \
452         }                                                                    \
453                                                                              \
454        private:                                                              \
455         Register const buffer_;                                              \
456         Register const index1_;                                              \
457         int32_t const index2_;                                               \
458         int32_t const length_;                                               \
459         XMMRegister const value_;                                            \
460       };                                                                     \
461       auto ool = new (zone())                                                \
462           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
463       __ j(above_equal, ool->entry());                                       \
464       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
465       __ bind(ool->exit());                                                  \
466     }                                                                        \
467   } while (false)
468
469
470 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
471   do {                                                                         \
472     auto buffer = i.InputRegister(0);                                          \
473     auto index1 = i.InputRegister(1);                                          \
474     auto index2 = i.InputInt32(2);                                             \
475     if (instr->InputAt(3)->IsRegister()) {                                     \
476       auto length = i.InputRegister(3);                                        \
477       DCHECK_EQ(0, index2);                                                    \
478       Label done;                                                              \
479       __ cmpl(index1, length);                                                 \
480       __ j(above_equal, &done, Label::kNear);                                  \
481       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
482       __ bind(&done);                                                          \
483     } else {                                                                   \
484       auto length = i.InputInt32(3);                                           \
485       DCHECK_LE(index2, length);                                               \
486       __ cmpq(index1, Immediate(length - index2));                             \
487       class OutOfLineStoreInteger final : public OutOfLineCode {               \
488        public:                                                                 \
489         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
490                               Register index1, int32_t index2, int32_t length, \
491                               Value value)                                     \
492             : OutOfLineCode(gen),                                              \
493               buffer_(buffer),                                                 \
494               index1_(index1),                                                 \
495               index2_(index2),                                                 \
496               length_(length),                                                 \
497               value_(value) {}                                                 \
498                                                                                \
499         void Generate() final {                                                \
500           __ leal(kScratchRegister, Operand(index1_, index2_));                \
501           __ cmpl(kScratchRegister, Immediate(length_));                       \
502           __ j(above_equal, exit());                                           \
503           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
504                        value_);                                                \
505         }                                                                      \
506                                                                                \
507        private:                                                                \
508         Register const buffer_;                                                \
509         Register const index1_;                                                \
510         int32_t const index2_;                                                 \
511         int32_t const length_;                                                 \
512         Value const value_;                                                    \
513       };                                                                       \
514       auto ool = new (zone())                                                  \
515           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
516       __ j(above_equal, ool->entry());                                         \
517       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
518       __ bind(ool->exit());                                                    \
519     }                                                                          \
520   } while (false)
521
522
523 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
524   do {                                                           \
525     if (instr->InputAt(4)->IsRegister()) {                       \
526       Register value = i.InputRegister(4);                       \
527       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
528     } else {                                                     \
529       Immediate value = i.InputImmediate(4);                     \
530       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
531     }                                                            \
532   } while (false)
533
534
535 void CodeGenerator::AssembleDeconstructActivationRecord() {
536   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
537   int stack_slots = frame()->GetSpillSlotCount();
538   if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
539     __ movq(rsp, rbp);
540     __ popq(rbp);
541   }
542 }
543
544
545 // Assembles an instruction after register allocation, producing machine code.
546 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
547   X64OperandConverter i(this, instr);
548
549   switch (ArchOpcodeField::decode(instr->opcode())) {
550     case kArchCallCodeObject: {
551       EnsureSpaceForLazyDeopt();
552       if (HasImmediateInput(instr, 0)) {
553         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
554         __ Call(code, RelocInfo::CODE_TARGET);
555       } else {
556         Register reg = i.InputRegister(0);
557         int entry = Code::kHeaderSize - kHeapObjectTag;
558         __ Call(Operand(reg, entry));
559       }
560       RecordCallPosition(instr);
561       break;
562     }
563     case kArchTailCallCodeObject: {
564       AssembleDeconstructActivationRecord();
565       if (HasImmediateInput(instr, 0)) {
566         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
567         __ jmp(code, RelocInfo::CODE_TARGET);
568       } else {
569         Register reg = i.InputRegister(0);
570         __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
571         __ jmp(reg);
572       }
573       break;
574     }
575     case kArchCallJSFunction: {
576       EnsureSpaceForLazyDeopt();
577       Register func = i.InputRegister(0);
578       if (FLAG_debug_code) {
579         // Check the function's context matches the context argument.
580         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
581         __ Assert(equal, kWrongFunctionContext);
582       }
583       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
584       RecordCallPosition(instr);
585       break;
586     }
587     case kArchTailCallJSFunction: {
588       Register func = i.InputRegister(0);
589       if (FLAG_debug_code) {
590         // Check the function's context matches the context argument.
591         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
592         __ Assert(equal, kWrongFunctionContext);
593       }
594       AssembleDeconstructActivationRecord();
595       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
596       break;
597     }
598     case kArchPrepareCallCFunction: {
599       int const num_parameters = MiscField::decode(instr->opcode());
600       __ PrepareCallCFunction(num_parameters);
601       break;
602     }
603     case kArchCallCFunction: {
604       int const num_parameters = MiscField::decode(instr->opcode());
605       if (HasImmediateInput(instr, 0)) {
606         ExternalReference ref = i.InputExternalReference(0);
607         __ CallCFunction(ref, num_parameters);
608       } else {
609         Register func = i.InputRegister(0);
610         __ CallCFunction(func, num_parameters);
611       }
612       break;
613     }
614     case kArchJmp:
615       AssembleArchJump(i.InputRpo(0));
616       break;
617     case kArchLookupSwitch:
618       AssembleArchLookupSwitch(instr);
619       break;
620     case kArchTableSwitch:
621       AssembleArchTableSwitch(instr);
622       break;
623     case kArchNop:
624       // don't emit code for nops.
625       break;
626     case kArchDeoptimize: {
627       int deopt_state_id =
628           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
629       AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
630       break;
631     }
632     case kArchRet:
633       AssembleReturn();
634       break;
635     case kArchStackPointer:
636       __ movq(i.OutputRegister(), rsp);
637       break;
638     case kArchFramePointer:
639       __ movq(i.OutputRegister(), rbp);
640       break;
641     case kArchTruncateDoubleToI: {
642       auto result = i.OutputRegister();
643       auto input = i.InputDoubleRegister(0);
644       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
645       __ cvttsd2siq(result, input);
646       __ cmpq(result, Immediate(1));
647       __ j(overflow, ool->entry());
648       __ bind(ool->exit());
649       break;
650     }
651     case kX64Add32:
652       ASSEMBLE_BINOP(addl);
653       break;
654     case kX64Add:
655       ASSEMBLE_BINOP(addq);
656       break;
657     case kX64Sub32:
658       ASSEMBLE_BINOP(subl);
659       break;
660     case kX64Sub:
661       ASSEMBLE_BINOP(subq);
662       break;
663     case kX64And32:
664       ASSEMBLE_BINOP(andl);
665       break;
666     case kX64And:
667       ASSEMBLE_BINOP(andq);
668       break;
669     case kX64Cmp32:
670       ASSEMBLE_BINOP(cmpl);
671       break;
672     case kX64Cmp:
673       ASSEMBLE_BINOP(cmpq);
674       break;
675     case kX64Test32:
676       ASSEMBLE_BINOP(testl);
677       break;
678     case kX64Test:
679       ASSEMBLE_BINOP(testq);
680       break;
681     case kX64Imul32:
682       ASSEMBLE_MULT(imull);
683       break;
684     case kX64Imul:
685       ASSEMBLE_MULT(imulq);
686       break;
687     case kX64ImulHigh32:
688       if (instr->InputAt(1)->IsRegister()) {
689         __ imull(i.InputRegister(1));
690       } else {
691         __ imull(i.InputOperand(1));
692       }
693       break;
694     case kX64UmulHigh32:
695       if (instr->InputAt(1)->IsRegister()) {
696         __ mull(i.InputRegister(1));
697       } else {
698         __ mull(i.InputOperand(1));
699       }
700       break;
701     case kX64Idiv32:
702       __ cdq();
703       __ idivl(i.InputRegister(1));
704       break;
705     case kX64Idiv:
706       __ cqo();
707       __ idivq(i.InputRegister(1));
708       break;
709     case kX64Udiv32:
710       __ xorl(rdx, rdx);
711       __ divl(i.InputRegister(1));
712       break;
713     case kX64Udiv:
714       __ xorq(rdx, rdx);
715       __ divq(i.InputRegister(1));
716       break;
717     case kX64Not:
718       ASSEMBLE_UNOP(notq);
719       break;
720     case kX64Not32:
721       ASSEMBLE_UNOP(notl);
722       break;
723     case kX64Neg:
724       ASSEMBLE_UNOP(negq);
725       break;
726     case kX64Neg32:
727       ASSEMBLE_UNOP(negl);
728       break;
729     case kX64Or32:
730       ASSEMBLE_BINOP(orl);
731       break;
732     case kX64Or:
733       ASSEMBLE_BINOP(orq);
734       break;
735     case kX64Xor32:
736       ASSEMBLE_BINOP(xorl);
737       break;
738     case kX64Xor:
739       ASSEMBLE_BINOP(xorq);
740       break;
741     case kX64Shl32:
742       ASSEMBLE_SHIFT(shll, 5);
743       break;
744     case kX64Shl:
745       ASSEMBLE_SHIFT(shlq, 6);
746       break;
747     case kX64Shr32:
748       ASSEMBLE_SHIFT(shrl, 5);
749       break;
750     case kX64Shr:
751       ASSEMBLE_SHIFT(shrq, 6);
752       break;
753     case kX64Sar32:
754       ASSEMBLE_SHIFT(sarl, 5);
755       break;
756     case kX64Sar:
757       ASSEMBLE_SHIFT(sarq, 6);
758       break;
759     case kX64Ror32:
760       ASSEMBLE_SHIFT(rorl, 5);
761       break;
762     case kX64Ror:
763       ASSEMBLE_SHIFT(rorq, 6);
764       break;
765     case kX64Lzcnt32:
766       if (instr->InputAt(0)->IsRegister()) {
767         __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
768       } else {
769         __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
770       }
771       break;
772     case kSSEFloat32Cmp:
773       ASSEMBLE_SSE_BINOP(ucomiss);
774       break;
775     case kSSEFloat32Add:
776       ASSEMBLE_SSE_BINOP(addss);
777       break;
778     case kSSEFloat32Sub:
779       ASSEMBLE_SSE_BINOP(subss);
780       break;
781     case kSSEFloat32Mul:
782       ASSEMBLE_SSE_BINOP(mulss);
783       break;
784     case kSSEFloat32Div:
785       ASSEMBLE_SSE_BINOP(divss);
786       // Don't delete this mov. It may improve performance on some CPUs,
787       // when there is a (v)mulss depending on the result.
788       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
789       break;
790     case kSSEFloat32Abs: {
791       // TODO(bmeurer): Use RIP relative 128-bit constants.
792       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
793       __ psrlq(kScratchDoubleReg, 33);
794       __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
795       break;
796     }
797     case kSSEFloat32Neg: {
798       // TODO(bmeurer): Use RIP relative 128-bit constants.
799       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
800       __ psllq(kScratchDoubleReg, 31);
801       __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
802       break;
803     }
804     case kSSEFloat32Sqrt:
805       ASSEMBLE_SSE_UNOP(sqrtss);
806       break;
807     case kSSEFloat32Max:
808       ASSEMBLE_SSE_BINOP(maxss);
809       break;
810     case kSSEFloat32Min:
811       ASSEMBLE_SSE_BINOP(minss);
812       break;
813     case kSSEFloat32ToFloat64:
814       ASSEMBLE_SSE_UNOP(cvtss2sd);
815       break;
816     case kSSEFloat64Cmp:
817       ASSEMBLE_SSE_BINOP(ucomisd);
818       break;
819     case kSSEFloat64Add:
820       ASSEMBLE_SSE_BINOP(addsd);
821       break;
822     case kSSEFloat64Sub:
823       ASSEMBLE_SSE_BINOP(subsd);
824       break;
825     case kSSEFloat64Mul:
826       ASSEMBLE_SSE_BINOP(mulsd);
827       break;
828     case kSSEFloat64Div:
829       ASSEMBLE_SSE_BINOP(divsd);
830       // Don't delete this mov. It may improve performance on some CPUs,
831       // when there is a (v)mulsd depending on the result.
832       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
833       break;
834     case kSSEFloat64Mod: {
835       __ subq(rsp, Immediate(kDoubleSize));
836       // Move values to st(0) and st(1).
837       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
838       __ fld_d(Operand(rsp, 0));
839       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
840       __ fld_d(Operand(rsp, 0));
841       // Loop while fprem isn't done.
842       Label mod_loop;
843       __ bind(&mod_loop);
844       // This instructions traps on all kinds inputs, but we are assuming the
845       // floating point control word is set to ignore them all.
846       __ fprem();
847       // The following 2 instruction implicitly use rax.
848       __ fnstsw_ax();
849       if (CpuFeatures::IsSupported(SAHF)) {
850         CpuFeatureScope sahf_scope(masm(), SAHF);
851         __ sahf();
852       } else {
853         __ shrl(rax, Immediate(8));
854         __ andl(rax, Immediate(0xFF));
855         __ pushq(rax);
856         __ popfq();
857       }
858       __ j(parity_even, &mod_loop);
859       // Move output to stack and clean up.
860       __ fstp(1);
861       __ fstp_d(Operand(rsp, 0));
862       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
863       __ addq(rsp, Immediate(kDoubleSize));
864       break;
865     }
866     case kSSEFloat64Max:
867       ASSEMBLE_SSE_BINOP(maxsd);
868       break;
869     case kSSEFloat64Min:
870       ASSEMBLE_SSE_BINOP(minsd);
871       break;
872     case kSSEFloat64Abs: {
873       // TODO(bmeurer): Use RIP relative 128-bit constants.
874       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
875       __ psrlq(kScratchDoubleReg, 1);
876       __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
877       break;
878     }
879     case kSSEFloat64Neg: {
880       // TODO(bmeurer): Use RIP relative 128-bit constants.
881       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
882       __ psllq(kScratchDoubleReg, 63);
883       __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
884       break;
885     }
886     case kSSEFloat64Sqrt:
887       ASSEMBLE_SSE_UNOP(sqrtsd);
888       break;
889     case kSSEFloat64Round: {
890       CpuFeatureScope sse_scope(masm(), SSE4_1);
891       RoundingMode const mode =
892           static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
893       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
894       break;
895     }
896     case kSSEFloat64ToFloat32:
897       ASSEMBLE_SSE_UNOP(cvtsd2ss);
898       break;
899     case kSSEFloat64ToInt32:
900       if (instr->InputAt(0)->IsDoubleRegister()) {
901         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
902       } else {
903         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
904       }
905       break;
906     case kSSEFloat64ToUint32: {
907       if (instr->InputAt(0)->IsDoubleRegister()) {
908         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
909       } else {
910         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
911       }
912       __ AssertZeroExtended(i.OutputRegister());
913       break;
914     }
915     case kSSEInt32ToFloat64:
916       if (instr->InputAt(0)->IsRegister()) {
917         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
918       } else {
919         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
920       }
921       break;
922     case kSSEUint32ToFloat64:
923       if (instr->InputAt(0)->IsRegister()) {
924         __ movl(kScratchRegister, i.InputRegister(0));
925       } else {
926         __ movl(kScratchRegister, i.InputOperand(0));
927       }
928       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
929       break;
930     case kSSEFloat64ExtractLowWord32:
931       if (instr->InputAt(0)->IsDoubleStackSlot()) {
932         __ movl(i.OutputRegister(), i.InputOperand(0));
933       } else {
934         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
935       }
936       break;
937     case kSSEFloat64ExtractHighWord32:
938       if (instr->InputAt(0)->IsDoubleStackSlot()) {
939         __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
940       } else {
941         __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
942       }
943       break;
944     case kSSEFloat64InsertLowWord32:
945       if (instr->InputAt(1)->IsRegister()) {
946         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
947       } else {
948         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
949       }
950       break;
951     case kSSEFloat64InsertHighWord32:
952       if (instr->InputAt(1)->IsRegister()) {
953         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
954       } else {
955         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
956       }
957       break;
958     case kSSEFloat64LoadLowWord32:
959       if (instr->InputAt(0)->IsRegister()) {
960         __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
961       } else {
962         __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
963       }
964       break;
965     case kAVXFloat32Cmp: {
966       CpuFeatureScope avx_scope(masm(), AVX);
967       if (instr->InputAt(1)->IsDoubleRegister()) {
968         __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
969       } else {
970         __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
971       }
972       break;
973     }
974     case kAVXFloat32Add:
975       ASSEMBLE_AVX_BINOP(vaddss);
976       break;
977     case kAVXFloat32Sub:
978       ASSEMBLE_AVX_BINOP(vsubss);
979       break;
980     case kAVXFloat32Mul:
981       ASSEMBLE_AVX_BINOP(vmulss);
982       break;
983     case kAVXFloat32Div:
984       ASSEMBLE_AVX_BINOP(vdivss);
985       // Don't delete this mov. It may improve performance on some CPUs,
986       // when there is a (v)mulss depending on the result.
987       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
988       break;
989     case kAVXFloat32Max:
990       ASSEMBLE_AVX_BINOP(vmaxss);
991       break;
992     case kAVXFloat32Min:
993       ASSEMBLE_AVX_BINOP(vminss);
994       break;
995     case kAVXFloat64Cmp: {
996       CpuFeatureScope avx_scope(masm(), AVX);
997       if (instr->InputAt(1)->IsDoubleRegister()) {
998         __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
999       } else {
1000         __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1001       }
1002       break;
1003     }
1004     case kAVXFloat64Add:
1005       ASSEMBLE_AVX_BINOP(vaddsd);
1006       break;
1007     case kAVXFloat64Sub:
1008       ASSEMBLE_AVX_BINOP(vsubsd);
1009       break;
1010     case kAVXFloat64Mul:
1011       ASSEMBLE_AVX_BINOP(vmulsd);
1012       break;
1013     case kAVXFloat64Div:
1014       ASSEMBLE_AVX_BINOP(vdivsd);
1015       // Don't delete this mov. It may improve performance on some CPUs,
1016       // when there is a (v)mulsd depending on the result.
1017       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1018       break;
1019     case kAVXFloat64Max:
1020       ASSEMBLE_AVX_BINOP(vmaxsd);
1021       break;
1022     case kAVXFloat64Min:
1023       ASSEMBLE_AVX_BINOP(vminsd);
1024       break;
1025     case kAVXFloat32Abs: {
1026       // TODO(bmeurer): Use RIP relative 128-bit constants.
1027       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1028       __ psrlq(kScratchDoubleReg, 33);
1029       CpuFeatureScope avx_scope(masm(), AVX);
1030       if (instr->InputAt(0)->IsDoubleRegister()) {
1031         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1032                   i.InputDoubleRegister(0));
1033       } else {
1034         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1035                   i.InputOperand(0));
1036       }
1037       break;
1038     }
1039     case kAVXFloat32Neg: {
1040       // TODO(bmeurer): Use RIP relative 128-bit constants.
1041       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1042       __ psllq(kScratchDoubleReg, 31);
1043       CpuFeatureScope avx_scope(masm(), AVX);
1044       if (instr->InputAt(0)->IsDoubleRegister()) {
1045         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1046                   i.InputDoubleRegister(0));
1047       } else {
1048         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1049                   i.InputOperand(0));
1050       }
1051       break;
1052     }
1053     case kAVXFloat64Abs: {
1054       // TODO(bmeurer): Use RIP relative 128-bit constants.
1055       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1056       __ psrlq(kScratchDoubleReg, 1);
1057       CpuFeatureScope avx_scope(masm(), AVX);
1058       if (instr->InputAt(0)->IsDoubleRegister()) {
1059         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1060                   i.InputDoubleRegister(0));
1061       } else {
1062         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1063                   i.InputOperand(0));
1064       }
1065       break;
1066     }
1067     case kAVXFloat64Neg: {
1068       // TODO(bmeurer): Use RIP relative 128-bit constants.
1069       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1070       __ psllq(kScratchDoubleReg, 63);
1071       CpuFeatureScope avx_scope(masm(), AVX);
1072       if (instr->InputAt(0)->IsDoubleRegister()) {
1073         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1074                   i.InputDoubleRegister(0));
1075       } else {
1076         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1077                   i.InputOperand(0));
1078       }
1079       break;
1080     }
1081     case kX64Movsxbl:
1082       ASSEMBLE_MOVX(movsxbl);
1083       __ AssertZeroExtended(i.OutputRegister());
1084       break;
1085     case kX64Movzxbl:
1086       ASSEMBLE_MOVX(movzxbl);
1087       __ AssertZeroExtended(i.OutputRegister());
1088       break;
1089     case kX64Movb: {
1090       size_t index = 0;
1091       Operand operand = i.MemoryOperand(&index);
1092       if (HasImmediateInput(instr, index)) {
1093         __ movb(operand, Immediate(i.InputInt8(index)));
1094       } else {
1095         __ movb(operand, i.InputRegister(index));
1096       }
1097       break;
1098     }
1099     case kX64Movsxwl:
1100       ASSEMBLE_MOVX(movsxwl);
1101       __ AssertZeroExtended(i.OutputRegister());
1102       break;
1103     case kX64Movzxwl:
1104       ASSEMBLE_MOVX(movzxwl);
1105       __ AssertZeroExtended(i.OutputRegister());
1106       break;
1107     case kX64Movw: {
1108       size_t index = 0;
1109       Operand operand = i.MemoryOperand(&index);
1110       if (HasImmediateInput(instr, index)) {
1111         __ movw(operand, Immediate(i.InputInt16(index)));
1112       } else {
1113         __ movw(operand, i.InputRegister(index));
1114       }
1115       break;
1116     }
1117     case kX64Movl:
1118       if (instr->HasOutput()) {
1119         if (instr->addressing_mode() == kMode_None) {
1120           if (instr->InputAt(0)->IsRegister()) {
1121             __ movl(i.OutputRegister(), i.InputRegister(0));
1122           } else {
1123             __ movl(i.OutputRegister(), i.InputOperand(0));
1124           }
1125         } else {
1126           __ movl(i.OutputRegister(), i.MemoryOperand());
1127         }
1128         __ AssertZeroExtended(i.OutputRegister());
1129       } else {
1130         size_t index = 0;
1131         Operand operand = i.MemoryOperand(&index);
1132         if (HasImmediateInput(instr, index)) {
1133           __ movl(operand, i.InputImmediate(index));
1134         } else {
1135           __ movl(operand, i.InputRegister(index));
1136         }
1137       }
1138       break;
1139     case kX64Movsxlq:
1140       ASSEMBLE_MOVX(movsxlq);
1141       break;
1142     case kX64Movq:
1143       if (instr->HasOutput()) {
1144         __ movq(i.OutputRegister(), i.MemoryOperand());
1145       } else {
1146         size_t index = 0;
1147         Operand operand = i.MemoryOperand(&index);
1148         if (HasImmediateInput(instr, index)) {
1149           __ movq(operand, i.InputImmediate(index));
1150         } else {
1151           __ movq(operand, i.InputRegister(index));
1152         }
1153       }
1154       break;
1155     case kX64Movss:
1156       if (instr->HasOutput()) {
1157         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1158       } else {
1159         size_t index = 0;
1160         Operand operand = i.MemoryOperand(&index);
1161         __ movss(operand, i.InputDoubleRegister(index));
1162       }
1163       break;
1164     case kX64Movsd:
1165       if (instr->HasOutput()) {
1166         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1167       } else {
1168         size_t index = 0;
1169         Operand operand = i.MemoryOperand(&index);
1170         __ movsd(operand, i.InputDoubleRegister(index));
1171       }
1172       break;
1173     case kX64Lea32: {
1174       AddressingMode mode = AddressingModeField::decode(instr->opcode());
1175       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1176       // and addressing mode just happens to work out. The "addl"/"subl" forms
1177       // in these cases are faster based on measurements.
1178       if (i.InputRegister(0).is(i.OutputRegister())) {
1179         if (mode == kMode_MRI) {
1180           int32_t constant_summand = i.InputInt32(1);
1181           if (constant_summand > 0) {
1182             __ addl(i.OutputRegister(), Immediate(constant_summand));
1183           } else if (constant_summand < 0) {
1184             __ subl(i.OutputRegister(), Immediate(-constant_summand));
1185           }
1186         } else if (mode == kMode_MR1) {
1187           if (i.InputRegister(1).is(i.OutputRegister())) {
1188             __ shll(i.OutputRegister(), Immediate(1));
1189           } else {
1190             __ leal(i.OutputRegister(), i.MemoryOperand());
1191           }
1192         } else if (mode == kMode_M2) {
1193           __ shll(i.OutputRegister(), Immediate(1));
1194         } else if (mode == kMode_M4) {
1195           __ shll(i.OutputRegister(), Immediate(2));
1196         } else if (mode == kMode_M8) {
1197           __ shll(i.OutputRegister(), Immediate(3));
1198         } else {
1199           __ leal(i.OutputRegister(), i.MemoryOperand());
1200         }
1201       } else {
1202         __ leal(i.OutputRegister(), i.MemoryOperand());
1203       }
1204       __ AssertZeroExtended(i.OutputRegister());
1205       break;
1206     }
1207     case kX64Lea:
1208       __ leaq(i.OutputRegister(), i.MemoryOperand());
1209       break;
1210     case kX64Dec32:
1211       __ decl(i.OutputRegister());
1212       break;
1213     case kX64Inc32:
1214       __ incl(i.OutputRegister());
1215       break;
1216     case kX64Push:
1217       if (HasImmediateInput(instr, 0)) {
1218         __ pushq(i.InputImmediate(0));
1219       } else {
1220         if (instr->InputAt(0)->IsRegister()) {
1221           __ pushq(i.InputRegister(0));
1222         } else {
1223           __ pushq(i.InputOperand(0));
1224         }
1225       }
1226       break;
1227     case kX64Poke: {
1228       int const slot = MiscField::decode(instr->opcode());
1229       if (HasImmediateInput(instr, 0)) {
1230         __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1231       } else {
1232         __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1233       }
1234       break;
1235     }
1236     case kX64StoreWriteBarrier: {
1237       Register object = i.InputRegister(0);
1238       Register value = i.InputRegister(2);
1239       SaveFPRegsMode mode =
1240           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1241       if (HasImmediateInput(instr, 1)) {
1242         int index = i.InputInt32(1);
1243         Register scratch = i.TempRegister(1);
1244         __ movq(Operand(object, index), value);
1245         __ RecordWriteContextSlot(object, index, value, scratch, mode);
1246       } else {
1247         Register index = i.InputRegister(1);
1248         __ movq(Operand(object, index, times_1, 0), value);
1249         __ leaq(index, Operand(object, index, times_1, 0));
1250         __ RecordWrite(object, index, value, mode);
1251       }
1252       break;
1253     }
1254     case kCheckedLoadInt8:
1255       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1256       break;
1257     case kCheckedLoadUint8:
1258       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1259       break;
1260     case kCheckedLoadInt16:
1261       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1262       break;
1263     case kCheckedLoadUint16:
1264       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1265       break;
1266     case kCheckedLoadWord32:
1267       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1268       break;
1269     case kCheckedLoadFloat32:
1270       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1271       break;
1272     case kCheckedLoadFloat64:
1273       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1274       break;
1275     case kCheckedStoreWord8:
1276       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1277       break;
1278     case kCheckedStoreWord16:
1279       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1280       break;
1281     case kCheckedStoreWord32:
1282       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1283       break;
1284     case kCheckedStoreFloat32:
1285       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1286       break;
1287     case kCheckedStoreFloat64:
1288       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1289       break;
1290     case kX64StackCheck:
1291       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1292       break;
1293   }
1294 }  // NOLINT(readability/fn_size)
1295
1296
1297 // Assembles branches after this instruction.
1298 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1299   X64OperandConverter i(this, instr);
1300   Label::Distance flabel_distance =
1301       branch->fallthru ? Label::kNear : Label::kFar;
1302   Label* tlabel = branch->true_label;
1303   Label* flabel = branch->false_label;
1304   switch (branch->condition) {
1305     case kUnorderedEqual:
1306       __ j(parity_even, flabel, flabel_distance);
1307     // Fall through.
1308     case kEqual:
1309       __ j(equal, tlabel);
1310       break;
1311     case kUnorderedNotEqual:
1312       __ j(parity_even, tlabel);
1313     // Fall through.
1314     case kNotEqual:
1315       __ j(not_equal, tlabel);
1316       break;
1317     case kSignedLessThan:
1318       __ j(less, tlabel);
1319       break;
1320     case kSignedGreaterThanOrEqual:
1321       __ j(greater_equal, tlabel);
1322       break;
1323     case kSignedLessThanOrEqual:
1324       __ j(less_equal, tlabel);
1325       break;
1326     case kSignedGreaterThan:
1327       __ j(greater, tlabel);
1328       break;
1329     case kUnsignedLessThan:
1330       __ j(below, tlabel);
1331       break;
1332     case kUnsignedGreaterThanOrEqual:
1333       __ j(above_equal, tlabel);
1334       break;
1335     case kUnsignedLessThanOrEqual:
1336       __ j(below_equal, tlabel);
1337       break;
1338     case kUnsignedGreaterThan:
1339       __ j(above, tlabel);
1340       break;
1341     case kOverflow:
1342       __ j(overflow, tlabel);
1343       break;
1344     case kNotOverflow:
1345       __ j(no_overflow, tlabel);
1346       break;
1347   }
1348   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1349 }
1350
1351
1352 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1353   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1354 }
1355
1356
1357 // Assembles boolean materializations after this instruction.
1358 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1359                                         FlagsCondition condition) {
1360   X64OperandConverter i(this, instr);
1361   Label done;
1362
1363   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1364   // last output of the instruction.
1365   Label check;
1366   DCHECK_NE(0u, instr->OutputCount());
1367   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1368   Condition cc = no_condition;
1369   switch (condition) {
1370     case kUnorderedEqual:
1371       __ j(parity_odd, &check, Label::kNear);
1372       __ movl(reg, Immediate(0));
1373       __ jmp(&done, Label::kNear);
1374     // Fall through.
1375     case kEqual:
1376       cc = equal;
1377       break;
1378     case kUnorderedNotEqual:
1379       __ j(parity_odd, &check, Label::kNear);
1380       __ movl(reg, Immediate(1));
1381       __ jmp(&done, Label::kNear);
1382     // Fall through.
1383     case kNotEqual:
1384       cc = not_equal;
1385       break;
1386     case kSignedLessThan:
1387       cc = less;
1388       break;
1389     case kSignedGreaterThanOrEqual:
1390       cc = greater_equal;
1391       break;
1392     case kSignedLessThanOrEqual:
1393       cc = less_equal;
1394       break;
1395     case kSignedGreaterThan:
1396       cc = greater;
1397       break;
1398     case kUnsignedLessThan:
1399       cc = below;
1400       break;
1401     case kUnsignedGreaterThanOrEqual:
1402       cc = above_equal;
1403       break;
1404     case kUnsignedLessThanOrEqual:
1405       cc = below_equal;
1406       break;
1407     case kUnsignedGreaterThan:
1408       cc = above;
1409       break;
1410     case kOverflow:
1411       cc = overflow;
1412       break;
1413     case kNotOverflow:
1414       cc = no_overflow;
1415       break;
1416   }
1417   __ bind(&check);
1418   __ setcc(cc, reg);
1419   __ movzxbl(reg, reg);
1420   __ bind(&done);
1421 }
1422
1423
1424 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1425   X64OperandConverter i(this, instr);
1426   Register input = i.InputRegister(0);
1427   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1428     __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1429     __ j(equal, GetLabel(i.InputRpo(index + 1)));
1430   }
1431   AssembleArchJump(i.InputRpo(1));
1432 }
1433
1434
1435 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1436   X64OperandConverter i(this, instr);
1437   Register input = i.InputRegister(0);
1438   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1439   Label** cases = zone()->NewArray<Label*>(case_count);
1440   for (int32_t index = 0; index < case_count; ++index) {
1441     cases[index] = GetLabel(i.InputRpo(index + 2));
1442   }
1443   Label* const table = AddJumpTable(cases, case_count);
1444   __ cmpl(input, Immediate(case_count));
1445   __ j(above_equal, GetLabel(i.InputRpo(1)));
1446   __ leaq(kScratchRegister, Operand(table));
1447   __ jmp(Operand(kScratchRegister, input, times_8, 0));
1448 }
1449
1450
1451 void CodeGenerator::AssembleDeoptimizerCall(
1452     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1453   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1454       isolate(), deoptimization_id, bailout_type);
1455   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1456 }
1457
1458
1459 void CodeGenerator::AssemblePrologue() {
1460   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1461   int stack_slots = frame()->GetSpillSlotCount();
1462   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1463     __ pushq(rbp);
1464     __ movq(rbp, rsp);
1465     int register_save_area_size = 0;
1466     const RegList saves = descriptor->CalleeSavedRegisters();
1467     if (saves != 0) {  // Save callee-saved registers.
1468       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1469         if (!((1 << i) & saves)) continue;
1470         __ pushq(Register::from_code(i));
1471         register_save_area_size += kPointerSize;
1472       }
1473     }
1474     const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1475     if (saves_fp != 0) {  // Save callee-saved XMM registers.
1476       const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1477       const int stack_size = saves_fp_count * 16;
1478       // Adjust the stack pointer.
1479       __ subp(rsp, Immediate(stack_size));
1480       // Store the registers on the stack.
1481       int slot_idx = 0;
1482       for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1483         if (!((1 << i) & saves_fp)) continue;
1484         __ movdqu(Operand(rsp, 16 * slot_idx), XMMRegister::from_code(i));
1485         slot_idx++;
1486       }
1487       register_save_area_size += stack_size;
1488     }
1489     if (register_save_area_size > 0) {
1490       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1491     }
1492   } else if (descriptor->IsJSFunctionCall()) {
1493     CompilationInfo* info = this->info();
1494     __ Prologue(info->IsCodePreAgingActive());
1495     frame()->SetRegisterSaveAreaSize(
1496         StandardFrameConstants::kFixedFrameSizeFromFp);
1497   } else if (needs_frame_) {
1498     __ StubPrologue();
1499     frame()->SetRegisterSaveAreaSize(
1500         StandardFrameConstants::kFixedFrameSizeFromFp);
1501   }
1502
1503   if (info()->is_osr()) {
1504     // TurboFan OSR-compiled functions cannot be entered directly.
1505     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1506
1507     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1508     // frame is still on the stack. Optimized code uses OSR values directly from
1509     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1510     // remaining stack slots.
1511     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1512     osr_pc_offset_ = __ pc_offset();
1513     // TODO(titzer): cannot address target function == local #-1
1514     __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1515     DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1516     stack_slots -= frame()->GetOsrStackSlotCount();
1517   }
1518
1519   if (stack_slots > 0) {
1520     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1521   }
1522 }
1523
1524
1525 void CodeGenerator::AssembleReturn() {
1526   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1527   int stack_slots = frame()->GetSpillSlotCount();
1528   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1529     if (frame()->GetRegisterSaveAreaSize() > 0) {
1530       // Remove this frame's spill slots first.
1531       if (stack_slots > 0) {
1532         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1533       }
1534       // Restore registers.
1535       const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1536       if (saves_fp != 0) {
1537         const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1538         const int stack_size = saves_fp_count * 16;
1539         // Load the registers from the stack.
1540         int slot_idx = 0;
1541         for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1542           if (!((1 << i) & saves_fp)) continue;
1543           __ movdqu(XMMRegister::from_code(i), Operand(rsp, 16 * slot_idx));
1544           slot_idx++;
1545         }
1546         // Adjust the stack pointer.
1547         __ addp(rsp, Immediate(stack_size));
1548       }
1549       const RegList saves = descriptor->CalleeSavedRegisters();
1550       if (saves != 0) {
1551         for (int i = 0; i < Register::kNumRegisters; i++) {
1552           if (!((1 << i) & saves)) continue;
1553           __ popq(Register::from_code(i));
1554         }
1555       }
1556       __ popq(rbp);  // Pop caller's frame pointer.
1557       __ ret(0);
1558     } else {
1559       // No saved registers.
1560       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1561       __ popq(rbp);       // Pop caller's frame pointer.
1562       __ ret(0);
1563     }
1564   } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
1565     // Canonicalize JSFunction return sites for now.
1566     if (return_label_.is_bound()) {
1567       __ jmp(&return_label_);
1568     } else {
1569       __ bind(&return_label_);
1570       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1571       __ popq(rbp);       // Pop caller's frame pointer.
1572       int pop_count = static_cast<int>(descriptor->StackParameterCount());
1573       if (pop_count == 0) {
1574         __ Ret();
1575       } else {
1576         __ Ret(pop_count * kPointerSize, rbx);
1577       }
1578     }
1579   } else {
1580     __ Ret();
1581   }
1582 }
1583
1584
1585 void CodeGenerator::AssembleMove(InstructionOperand* source,
1586                                  InstructionOperand* destination) {
1587   X64OperandConverter g(this, NULL);
1588   // Dispatch on the source and destination operand kinds.  Not all
1589   // combinations are possible.
1590   if (source->IsRegister()) {
1591     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1592     Register src = g.ToRegister(source);
1593     if (destination->IsRegister()) {
1594       __ movq(g.ToRegister(destination), src);
1595     } else {
1596       __ movq(g.ToOperand(destination), src);
1597     }
1598   } else if (source->IsStackSlot()) {
1599     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1600     Operand src = g.ToOperand(source);
1601     if (destination->IsRegister()) {
1602       Register dst = g.ToRegister(destination);
1603       __ movq(dst, src);
1604     } else {
1605       // Spill on demand to use a temporary register for memory-to-memory
1606       // moves.
1607       Register tmp = kScratchRegister;
1608       Operand dst = g.ToOperand(destination);
1609       __ movq(tmp, src);
1610       __ movq(dst, tmp);
1611     }
1612   } else if (source->IsConstant()) {
1613     ConstantOperand* constant_source = ConstantOperand::cast(source);
1614     Constant src = g.ToConstant(constant_source);
1615     if (destination->IsRegister() || destination->IsStackSlot()) {
1616       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1617                                                : kScratchRegister;
1618       switch (src.type()) {
1619         case Constant::kInt32:
1620           // TODO(dcarney): don't need scratch in this case.
1621           __ Set(dst, src.ToInt32());
1622           break;
1623         case Constant::kInt64:
1624           __ Set(dst, src.ToInt64());
1625           break;
1626         case Constant::kFloat32:
1627           __ Move(dst,
1628                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1629           break;
1630         case Constant::kFloat64:
1631           __ Move(dst,
1632                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1633           break;
1634         case Constant::kExternalReference:
1635           __ Move(dst, src.ToExternalReference());
1636           break;
1637         case Constant::kHeapObject: {
1638           Handle<HeapObject> src_object = src.ToHeapObject();
1639           Heap::RootListIndex index;
1640           int offset;
1641           if (IsMaterializableFromFrame(src_object, &offset)) {
1642             __ movp(dst, Operand(rbp, offset));
1643           } else if (IsMaterializableFromRoot(src_object, &index)) {
1644             __ LoadRoot(dst, index);
1645           } else {
1646             __ Move(dst, src_object);
1647           }
1648           break;
1649         }
1650         case Constant::kRpoNumber:
1651           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1652           break;
1653       }
1654       if (destination->IsStackSlot()) {
1655         __ movq(g.ToOperand(destination), kScratchRegister);
1656       }
1657     } else if (src.type() == Constant::kFloat32) {
1658       // TODO(turbofan): Can we do better here?
1659       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1660       if (destination->IsDoubleRegister()) {
1661         __ Move(g.ToDoubleRegister(destination), src_const);
1662       } else {
1663         DCHECK(destination->IsDoubleStackSlot());
1664         Operand dst = g.ToOperand(destination);
1665         __ movl(dst, Immediate(src_const));
1666       }
1667     } else {
1668       DCHECK_EQ(Constant::kFloat64, src.type());
1669       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1670       if (destination->IsDoubleRegister()) {
1671         __ Move(g.ToDoubleRegister(destination), src_const);
1672       } else {
1673         DCHECK(destination->IsDoubleStackSlot());
1674         __ movq(kScratchRegister, src_const);
1675         __ movq(g.ToOperand(destination), kScratchRegister);
1676       }
1677     }
1678   } else if (source->IsDoubleRegister()) {
1679     XMMRegister src = g.ToDoubleRegister(source);
1680     if (destination->IsDoubleRegister()) {
1681       XMMRegister dst = g.ToDoubleRegister(destination);
1682       __ movaps(dst, src);
1683     } else {
1684       DCHECK(destination->IsDoubleStackSlot());
1685       Operand dst = g.ToOperand(destination);
1686       __ movsd(dst, src);
1687     }
1688   } else if (source->IsDoubleStackSlot()) {
1689     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1690     Operand src = g.ToOperand(source);
1691     if (destination->IsDoubleRegister()) {
1692       XMMRegister dst = g.ToDoubleRegister(destination);
1693       __ movsd(dst, src);
1694     } else {
1695       // We rely on having xmm0 available as a fixed scratch register.
1696       Operand dst = g.ToOperand(destination);
1697       __ movsd(xmm0, src);
1698       __ movsd(dst, xmm0);
1699     }
1700   } else {
1701     UNREACHABLE();
1702   }
1703 }
1704
1705
1706 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1707                                  InstructionOperand* destination) {
1708   X64OperandConverter g(this, NULL);
1709   // Dispatch on the source and destination operand kinds.  Not all
1710   // combinations are possible.
1711   if (source->IsRegister() && destination->IsRegister()) {
1712     // Register-register.
1713     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1714   } else if (source->IsRegister() && destination->IsStackSlot()) {
1715     Register src = g.ToRegister(source);
1716     Operand dst = g.ToOperand(destination);
1717     __ xchgq(src, dst);
1718   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1719              (source->IsDoubleStackSlot() &&
1720               destination->IsDoubleStackSlot())) {
1721     // Memory-memory.
1722     Register tmp = kScratchRegister;
1723     Operand src = g.ToOperand(source);
1724     Operand dst = g.ToOperand(destination);
1725     __ movq(tmp, dst);
1726     __ xchgq(tmp, src);
1727     __ movq(dst, tmp);
1728   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1729     // XMM register-register swap. We rely on having xmm0
1730     // available as a fixed scratch register.
1731     XMMRegister src = g.ToDoubleRegister(source);
1732     XMMRegister dst = g.ToDoubleRegister(destination);
1733     __ movaps(xmm0, src);
1734     __ movaps(src, dst);
1735     __ movaps(dst, xmm0);
1736   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1737     // XMM register-memory swap.  We rely on having xmm0
1738     // available as a fixed scratch register.
1739     XMMRegister src = g.ToDoubleRegister(source);
1740     Operand dst = g.ToOperand(destination);
1741     __ movsd(xmm0, src);
1742     __ movsd(src, dst);
1743     __ movsd(dst, xmm0);
1744   } else {
1745     // No other combinations are possible.
1746     UNREACHABLE();
1747   }
1748 }
1749
1750
1751 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1752   for (size_t index = 0; index < target_count; ++index) {
1753     __ dq(targets[index]);
1754   }
1755 }
1756
1757
1758 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1759
1760
1761 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1762   int space_needed = Deoptimizer::patch_size();
1763   if (!info()->IsStub()) {
1764     // Ensure that we have enough space after the previous lazy-bailout
1765     // instruction for patching the code here.
1766     int current_pc = masm()->pc_offset();
1767     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1768       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1769       __ Nop(padding_size);
1770     }
1771   }
1772 }
1773
1774 #undef __
1775
1776 }  // namespace internal
1777 }  // namespace compiler
1778 }  // namespace v8