Add %GetCallerJSFunction intrinsic
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 #define kScratchDoubleReg xmm0
22
23
24 // Adds X64 specific methods for decoding operands.
25 class X64OperandConverter : public InstructionOperandConverter {
26  public:
27   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
28       : InstructionOperandConverter(gen, instr) {}
29
30   Immediate InputImmediate(size_t index) {
31     return ToImmediate(instr_->InputAt(index));
32   }
33
34   Operand InputOperand(size_t index, int extra = 0) {
35     return ToOperand(instr_->InputAt(index), extra);
36   }
37
38   Operand OutputOperand() { return ToOperand(instr_->Output()); }
39
40   Immediate ToImmediate(InstructionOperand* operand) {
41     Constant constant = ToConstant(operand);
42     if (constant.type() == Constant::kFloat64) {
43       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
44       return Immediate(0);
45     }
46     return Immediate(constant.ToInt32());
47   }
48
49   Operand ToOperand(InstructionOperand* op, int extra = 0) {
50     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
51     // The linkage computes where all spill slots are located.
52     FrameOffset offset = linkage()->GetFrameOffset(
53         AllocatedOperand::cast(op)->index(), frame(), extra);
54     return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
55   }
56
57   static size_t NextOffset(size_t* offset) {
58     size_t i = *offset;
59     (*offset)++;
60     return i;
61   }
62
63   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
64     STATIC_ASSERT(0 == static_cast<int>(times_1));
65     STATIC_ASSERT(1 == static_cast<int>(times_2));
66     STATIC_ASSERT(2 == static_cast<int>(times_4));
67     STATIC_ASSERT(3 == static_cast<int>(times_8));
68     int scale = static_cast<int>(mode - one);
69     DCHECK(scale >= 0 && scale < 4);
70     return static_cast<ScaleFactor>(scale);
71   }
72
73   Operand MemoryOperand(size_t* offset) {
74     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
75     switch (mode) {
76       case kMode_MR: {
77         Register base = InputRegister(NextOffset(offset));
78         int32_t disp = 0;
79         return Operand(base, disp);
80       }
81       case kMode_MRI: {
82         Register base = InputRegister(NextOffset(offset));
83         int32_t disp = InputInt32(NextOffset(offset));
84         return Operand(base, disp);
85       }
86       case kMode_MR1:
87       case kMode_MR2:
88       case kMode_MR4:
89       case kMode_MR8: {
90         Register base = InputRegister(NextOffset(offset));
91         Register index = InputRegister(NextOffset(offset));
92         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
93         int32_t disp = 0;
94         return Operand(base, index, scale, disp);
95       }
96       case kMode_MR1I:
97       case kMode_MR2I:
98       case kMode_MR4I:
99       case kMode_MR8I: {
100         Register base = InputRegister(NextOffset(offset));
101         Register index = InputRegister(NextOffset(offset));
102         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
103         int32_t disp = InputInt32(NextOffset(offset));
104         return Operand(base, index, scale, disp);
105       }
106       case kMode_M1: {
107         Register base = InputRegister(NextOffset(offset));
108         int32_t disp = 0;
109         return Operand(base, disp);
110       }
111       case kMode_M2:
112         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
113         return Operand(no_reg, 0);
114       case kMode_M4:
115       case kMode_M8: {
116         Register index = InputRegister(NextOffset(offset));
117         ScaleFactor scale = ScaleFor(kMode_M1, mode);
118         int32_t disp = 0;
119         return Operand(index, scale, disp);
120       }
121       case kMode_M1I:
122       case kMode_M2I:
123       case kMode_M4I:
124       case kMode_M8I: {
125         Register index = InputRegister(NextOffset(offset));
126         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
127         int32_t disp = InputInt32(NextOffset(offset));
128         return Operand(index, scale, disp);
129       }
130       case kMode_None:
131         UNREACHABLE();
132         return Operand(no_reg, 0);
133     }
134     UNREACHABLE();
135     return Operand(no_reg, 0);
136   }
137
138   Operand MemoryOperand(size_t first_input = 0) {
139     return MemoryOperand(&first_input);
140   }
141 };
142
143
144 namespace {
145
146 bool HasImmediateInput(Instruction* instr, size_t index) {
147   return instr->InputAt(index)->IsImmediate();
148 }
149
150
151 class OutOfLineLoadZero final : public OutOfLineCode {
152  public:
153   OutOfLineLoadZero(CodeGenerator* gen, Register result)
154       : OutOfLineCode(gen), result_(result) {}
155
156   void Generate() final { __ xorl(result_, result_); }
157
158  private:
159   Register const result_;
160 };
161
162
163 class OutOfLineLoadNaN final : public OutOfLineCode {
164  public:
165   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
166       : OutOfLineCode(gen), result_(result) {}
167
168   void Generate() final { __ pcmpeqd(result_, result_); }
169
170  private:
171   XMMRegister const result_;
172 };
173
174
175 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
176  public:
177   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
178                              XMMRegister input)
179       : OutOfLineCode(gen), result_(result), input_(input) {}
180
181   void Generate() final {
182     __ subp(rsp, Immediate(kDoubleSize));
183     __ movsd(MemOperand(rsp, 0), input_);
184     __ SlowTruncateToI(result_, rsp, 0);
185     __ addp(rsp, Immediate(kDoubleSize));
186   }
187
188  private:
189   Register const result_;
190   XMMRegister const input_;
191 };
192
193 }  // namespace
194
195
196 #define ASSEMBLE_UNOP(asm_instr)         \
197   do {                                   \
198     if (instr->Output()->IsRegister()) { \
199       __ asm_instr(i.OutputRegister());  \
200     } else {                             \
201       __ asm_instr(i.OutputOperand());   \
202     }                                    \
203   } while (0)
204
205
206 #define ASSEMBLE_BINOP(asm_instr)                              \
207   do {                                                         \
208     if (HasImmediateInput(instr, 1)) {                         \
209       if (instr->InputAt(0)->IsRegister()) {                   \
210         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
211       } else {                                                 \
212         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
213       }                                                        \
214     } else {                                                   \
215       if (instr->InputAt(1)->IsRegister()) {                   \
216         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
217       } else {                                                 \
218         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
219       }                                                        \
220     }                                                          \
221   } while (0)
222
223
224 #define ASSEMBLE_MULT(asm_instr)                              \
225   do {                                                        \
226     if (HasImmediateInput(instr, 1)) {                        \
227       if (instr->InputAt(0)->IsRegister()) {                  \
228         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
229                      i.InputImmediate(1));                    \
230       } else {                                                \
231         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
232                      i.InputImmediate(1));                    \
233       }                                                       \
234     } else {                                                  \
235       if (instr->InputAt(1)->IsRegister()) {                  \
236         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
237       } else {                                                \
238         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
239       }                                                       \
240     }                                                         \
241   } while (0)
242
243
244 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
245   do {                                                                     \
246     if (HasImmediateInput(instr, 1)) {                                     \
247       if (instr->Output()->IsRegister()) {                                 \
248         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
249       } else {                                                             \
250         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
251       }                                                                    \
252     } else {                                                               \
253       if (instr->Output()->IsRegister()) {                                 \
254         __ asm_instr##_cl(i.OutputRegister());                             \
255       } else {                                                             \
256         __ asm_instr##_cl(i.OutputOperand());                              \
257       }                                                                    \
258     }                                                                      \
259   } while (0)
260
261
262 #define ASSEMBLE_MOVX(asm_instr)                            \
263   do {                                                      \
264     if (instr->addressing_mode() != kMode_None) {           \
265       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
266     } else if (instr->InputAt(0)->IsRegister()) {           \
267       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
268     } else {                                                \
269       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
270     }                                                       \
271   } while (0)
272
273
274 #define ASSEMBLE_SSE_BINOP(asm_instr)                                   \
275   do {                                                                  \
276     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
277       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
278     } else {                                                            \
279       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
280     }                                                                   \
281   } while (0)
282
283
284 #define ASSEMBLE_SSE_UNOP(asm_instr)                                    \
285   do {                                                                  \
286     if (instr->InputAt(0)->IsDoubleRegister()) {                        \
287       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
288     } else {                                                            \
289       __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0));        \
290     }                                                                   \
291   } while (0)
292
293
294 #define ASSEMBLE_AVX_BINOP(asm_instr)                                  \
295   do {                                                                 \
296     CpuFeatureScope avx_scope(masm(), AVX);                            \
297     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
298       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
299                    i.InputDoubleRegister(1));                          \
300     } else {                                                           \
301       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
302                    i.InputOperand(1));                                 \
303     }                                                                  \
304   } while (0)
305
306
307 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
308   do {                                                                       \
309     auto result = i.OutputDoubleRegister();                                  \
310     auto buffer = i.InputRegister(0);                                        \
311     auto index1 = i.InputRegister(1);                                        \
312     auto index2 = i.InputInt32(2);                                           \
313     OutOfLineCode* ool;                                                      \
314     if (instr->InputAt(3)->IsRegister()) {                                   \
315       auto length = i.InputRegister(3);                                      \
316       DCHECK_EQ(0, index2);                                                  \
317       __ cmpl(index1, length);                                               \
318       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
319     } else {                                                                 \
320       auto length = i.InputInt32(3);                                         \
321       DCHECK_LE(index2, length);                                             \
322       __ cmpq(index1, Immediate(length - index2));                           \
323       class OutOfLineLoadFloat final : public OutOfLineCode {                \
324        public:                                                               \
325         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
326                            Register buffer, Register index1, int32_t index2, \
327                            int32_t length)                                   \
328             : OutOfLineCode(gen),                                            \
329               result_(result),                                               \
330               buffer_(buffer),                                               \
331               index1_(index1),                                               \
332               index2_(index2),                                               \
333               length_(length) {}                                             \
334                                                                              \
335         void Generate() final {                                              \
336           __ leal(kScratchRegister, Operand(index1_, index2_));              \
337           __ pcmpeqd(result_, result_);                                      \
338           __ cmpl(kScratchRegister, Immediate(length_));                     \
339           __ j(above_equal, exit());                                         \
340           __ asm_instr(result_,                                              \
341                        Operand(buffer_, kScratchRegister, times_1, 0));      \
342         }                                                                    \
343                                                                              \
344        private:                                                              \
345         XMMRegister const result_;                                           \
346         Register const buffer_;                                              \
347         Register const index1_;                                              \
348         int32_t const index2_;                                               \
349         int32_t const length_;                                               \
350       };                                                                     \
351       ool = new (zone())                                                     \
352           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
353     }                                                                        \
354     __ j(above_equal, ool->entry());                                         \
355     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
356     __ bind(ool->exit());                                                    \
357   } while (false)
358
359
360 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
361   do {                                                                         \
362     auto result = i.OutputRegister();                                          \
363     auto buffer = i.InputRegister(0);                                          \
364     auto index1 = i.InputRegister(1);                                          \
365     auto index2 = i.InputInt32(2);                                             \
366     OutOfLineCode* ool;                                                        \
367     if (instr->InputAt(3)->IsRegister()) {                                     \
368       auto length = i.InputRegister(3);                                        \
369       DCHECK_EQ(0, index2);                                                    \
370       __ cmpl(index1, length);                                                 \
371       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
372     } else {                                                                   \
373       auto length = i.InputInt32(3);                                           \
374       DCHECK_LE(index2, length);                                               \
375       __ cmpq(index1, Immediate(length - index2));                             \
376       class OutOfLineLoadInteger final : public OutOfLineCode {                \
377        public:                                                                 \
378         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
379                              Register buffer, Register index1, int32_t index2, \
380                              int32_t length)                                   \
381             : OutOfLineCode(gen),                                              \
382               result_(result),                                                 \
383               buffer_(buffer),                                                 \
384               index1_(index1),                                                 \
385               index2_(index2),                                                 \
386               length_(length) {}                                               \
387                                                                                \
388         void Generate() final {                                                \
389           Label oob;                                                           \
390           __ leal(kScratchRegister, Operand(index1_, index2_));                \
391           __ cmpl(kScratchRegister, Immediate(length_));                       \
392           __ j(above_equal, &oob, Label::kNear);                               \
393           __ asm_instr(result_,                                                \
394                        Operand(buffer_, kScratchRegister, times_1, 0));        \
395           __ jmp(exit());                                                      \
396           __ bind(&oob);                                                       \
397           __ xorl(result_, result_);                                           \
398         }                                                                      \
399                                                                                \
400        private:                                                                \
401         Register const result_;                                                \
402         Register const buffer_;                                                \
403         Register const index1_;                                                \
404         int32_t const index2_;                                                 \
405         int32_t const length_;                                                 \
406       };                                                                       \
407       ool = new (zone())                                                       \
408           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
409     }                                                                          \
410     __ j(above_equal, ool->entry());                                           \
411     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
412     __ bind(ool->exit());                                                      \
413   } while (false)
414
415
416 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
417   do {                                                                       \
418     auto buffer = i.InputRegister(0);                                        \
419     auto index1 = i.InputRegister(1);                                        \
420     auto index2 = i.InputInt32(2);                                           \
421     auto value = i.InputDoubleRegister(4);                                   \
422     if (instr->InputAt(3)->IsRegister()) {                                   \
423       auto length = i.InputRegister(3);                                      \
424       DCHECK_EQ(0, index2);                                                  \
425       Label done;                                                            \
426       __ cmpl(index1, length);                                               \
427       __ j(above_equal, &done, Label::kNear);                                \
428       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
429       __ bind(&done);                                                        \
430     } else {                                                                 \
431       auto length = i.InputInt32(3);                                         \
432       DCHECK_LE(index2, length);                                             \
433       __ cmpq(index1, Immediate(length - index2));                           \
434       class OutOfLineStoreFloat final : public OutOfLineCode {               \
435        public:                                                               \
436         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
437                             Register index1, int32_t index2, int32_t length, \
438                             XMMRegister value)                               \
439             : OutOfLineCode(gen),                                            \
440               buffer_(buffer),                                               \
441               index1_(index1),                                               \
442               index2_(index2),                                               \
443               length_(length),                                               \
444               value_(value) {}                                               \
445                                                                              \
446         void Generate() final {                                              \
447           __ leal(kScratchRegister, Operand(index1_, index2_));              \
448           __ cmpl(kScratchRegister, Immediate(length_));                     \
449           __ j(above_equal, exit());                                         \
450           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
451                        value_);                                              \
452         }                                                                    \
453                                                                              \
454        private:                                                              \
455         Register const buffer_;                                              \
456         Register const index1_;                                              \
457         int32_t const index2_;                                               \
458         int32_t const length_;                                               \
459         XMMRegister const value_;                                            \
460       };                                                                     \
461       auto ool = new (zone())                                                \
462           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
463       __ j(above_equal, ool->entry());                                       \
464       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
465       __ bind(ool->exit());                                                  \
466     }                                                                        \
467   } while (false)
468
469
470 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
471   do {                                                                         \
472     auto buffer = i.InputRegister(0);                                          \
473     auto index1 = i.InputRegister(1);                                          \
474     auto index2 = i.InputInt32(2);                                             \
475     if (instr->InputAt(3)->IsRegister()) {                                     \
476       auto length = i.InputRegister(3);                                        \
477       DCHECK_EQ(0, index2);                                                    \
478       Label done;                                                              \
479       __ cmpl(index1, length);                                                 \
480       __ j(above_equal, &done, Label::kNear);                                  \
481       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
482       __ bind(&done);                                                          \
483     } else {                                                                   \
484       auto length = i.InputInt32(3);                                           \
485       DCHECK_LE(index2, length);                                               \
486       __ cmpq(index1, Immediate(length - index2));                             \
487       class OutOfLineStoreInteger final : public OutOfLineCode {               \
488        public:                                                                 \
489         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
490                               Register index1, int32_t index2, int32_t length, \
491                               Value value)                                     \
492             : OutOfLineCode(gen),                                              \
493               buffer_(buffer),                                                 \
494               index1_(index1),                                                 \
495               index2_(index2),                                                 \
496               length_(length),                                                 \
497               value_(value) {}                                                 \
498                                                                                \
499         void Generate() final {                                                \
500           __ leal(kScratchRegister, Operand(index1_, index2_));                \
501           __ cmpl(kScratchRegister, Immediate(length_));                       \
502           __ j(above_equal, exit());                                           \
503           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
504                        value_);                                                \
505         }                                                                      \
506                                                                                \
507        private:                                                                \
508         Register const buffer_;                                                \
509         Register const index1_;                                                \
510         int32_t const index2_;                                                 \
511         int32_t const length_;                                                 \
512         Value const value_;                                                    \
513       };                                                                       \
514       auto ool = new (zone())                                                  \
515           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
516       __ j(above_equal, ool->entry());                                         \
517       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
518       __ bind(ool->exit());                                                    \
519     }                                                                          \
520   } while (false)
521
522
523 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
524   do {                                                           \
525     if (instr->InputAt(4)->IsRegister()) {                       \
526       Register value = i.InputRegister(4);                       \
527       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
528     } else {                                                     \
529       Immediate value = i.InputImmediate(4);                     \
530       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
531     }                                                            \
532   } while (false)
533
534
535 void CodeGenerator::AssembleDeconstructActivationRecord() {
536   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
537   int stack_slots = frame()->GetSpillSlotCount();
538   if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
539     __ movq(rsp, rbp);
540     __ popq(rbp);
541     int32_t bytes_to_pop =
542         descriptor->IsJSFunctionCall()
543             ? static_cast<int32_t>(descriptor->JSParameterCount() *
544                                    kPointerSize)
545             : 0;
546     __ popq(Operand(rsp, bytes_to_pop));
547     __ addq(rsp, Immediate(bytes_to_pop));
548   }
549 }
550
551
552 // Assembles an instruction after register allocation, producing machine code.
553 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
554   X64OperandConverter i(this, instr);
555
556   switch (ArchOpcodeField::decode(instr->opcode())) {
557     case kArchCallCodeObject: {
558       EnsureSpaceForLazyDeopt();
559       if (HasImmediateInput(instr, 0)) {
560         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
561         __ Call(code, RelocInfo::CODE_TARGET);
562       } else {
563         Register reg = i.InputRegister(0);
564         int entry = Code::kHeaderSize - kHeapObjectTag;
565         __ Call(Operand(reg, entry));
566       }
567       RecordCallPosition(instr);
568       break;
569     }
570     case kArchTailCallCodeObject: {
571       AssembleDeconstructActivationRecord();
572       if (HasImmediateInput(instr, 0)) {
573         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
574         __ jmp(code, RelocInfo::CODE_TARGET);
575       } else {
576         Register reg = i.InputRegister(0);
577         int entry = Code::kHeaderSize - kHeapObjectTag;
578         __ jmp(Operand(reg, entry));
579       }
580       break;
581     }
582     case kArchCallJSFunction: {
583       EnsureSpaceForLazyDeopt();
584       Register func = i.InputRegister(0);
585       if (FLAG_debug_code) {
586         // Check the function's context matches the context argument.
587         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
588         __ Assert(equal, kWrongFunctionContext);
589       }
590       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
591       RecordCallPosition(instr);
592       break;
593     }
594     case kArchTailCallJSFunction: {
595       Register func = i.InputRegister(0);
596       if (FLAG_debug_code) {
597         // Check the function's context matches the context argument.
598         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
599         __ Assert(equal, kWrongFunctionContext);
600       }
601       AssembleDeconstructActivationRecord();
602       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
603       break;
604     }
605     case kArchJmp:
606       AssembleArchJump(i.InputRpo(0));
607       break;
608     case kArchLookupSwitch:
609       AssembleArchLookupSwitch(instr);
610       break;
611     case kArchTableSwitch:
612       AssembleArchTableSwitch(instr);
613       break;
614     case kArchNop:
615       // don't emit code for nops.
616       break;
617     case kArchDeoptimize: {
618       int deopt_state_id =
619           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
620       AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
621       break;
622     }
623     case kArchRet:
624       AssembleReturn();
625       break;
626     case kArchStackPointer:
627       __ movq(i.OutputRegister(), rsp);
628       break;
629     case kArchFramePointer:
630       __ movq(i.OutputRegister(), rbp);
631       break;
632     case kArchTruncateDoubleToI: {
633       auto result = i.OutputRegister();
634       auto input = i.InputDoubleRegister(0);
635       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
636       __ cvttsd2siq(result, input);
637       __ cmpq(result, Immediate(1));
638       __ j(overflow, ool->entry());
639       __ bind(ool->exit());
640       break;
641     }
642     case kX64Add32:
643       ASSEMBLE_BINOP(addl);
644       break;
645     case kX64Add:
646       ASSEMBLE_BINOP(addq);
647       break;
648     case kX64Sub32:
649       ASSEMBLE_BINOP(subl);
650       break;
651     case kX64Sub:
652       ASSEMBLE_BINOP(subq);
653       break;
654     case kX64And32:
655       ASSEMBLE_BINOP(andl);
656       break;
657     case kX64And:
658       ASSEMBLE_BINOP(andq);
659       break;
660     case kX64Cmp32:
661       ASSEMBLE_BINOP(cmpl);
662       break;
663     case kX64Cmp:
664       ASSEMBLE_BINOP(cmpq);
665       break;
666     case kX64Test32:
667       ASSEMBLE_BINOP(testl);
668       break;
669     case kX64Test:
670       ASSEMBLE_BINOP(testq);
671       break;
672     case kX64Imul32:
673       ASSEMBLE_MULT(imull);
674       break;
675     case kX64Imul:
676       ASSEMBLE_MULT(imulq);
677       break;
678     case kX64ImulHigh32:
679       if (instr->InputAt(1)->IsRegister()) {
680         __ imull(i.InputRegister(1));
681       } else {
682         __ imull(i.InputOperand(1));
683       }
684       break;
685     case kX64UmulHigh32:
686       if (instr->InputAt(1)->IsRegister()) {
687         __ mull(i.InputRegister(1));
688       } else {
689         __ mull(i.InputOperand(1));
690       }
691       break;
692     case kX64Idiv32:
693       __ cdq();
694       __ idivl(i.InputRegister(1));
695       break;
696     case kX64Idiv:
697       __ cqo();
698       __ idivq(i.InputRegister(1));
699       break;
700     case kX64Udiv32:
701       __ xorl(rdx, rdx);
702       __ divl(i.InputRegister(1));
703       break;
704     case kX64Udiv:
705       __ xorq(rdx, rdx);
706       __ divq(i.InputRegister(1));
707       break;
708     case kX64Not:
709       ASSEMBLE_UNOP(notq);
710       break;
711     case kX64Not32:
712       ASSEMBLE_UNOP(notl);
713       break;
714     case kX64Neg:
715       ASSEMBLE_UNOP(negq);
716       break;
717     case kX64Neg32:
718       ASSEMBLE_UNOP(negl);
719       break;
720     case kX64Or32:
721       ASSEMBLE_BINOP(orl);
722       break;
723     case kX64Or:
724       ASSEMBLE_BINOP(orq);
725       break;
726     case kX64Xor32:
727       ASSEMBLE_BINOP(xorl);
728       break;
729     case kX64Xor:
730       ASSEMBLE_BINOP(xorq);
731       break;
732     case kX64Shl32:
733       ASSEMBLE_SHIFT(shll, 5);
734       break;
735     case kX64Shl:
736       ASSEMBLE_SHIFT(shlq, 6);
737       break;
738     case kX64Shr32:
739       ASSEMBLE_SHIFT(shrl, 5);
740       break;
741     case kX64Shr:
742       ASSEMBLE_SHIFT(shrq, 6);
743       break;
744     case kX64Sar32:
745       ASSEMBLE_SHIFT(sarl, 5);
746       break;
747     case kX64Sar:
748       ASSEMBLE_SHIFT(sarq, 6);
749       break;
750     case kX64Ror32:
751       ASSEMBLE_SHIFT(rorl, 5);
752       break;
753     case kX64Ror:
754       ASSEMBLE_SHIFT(rorq, 6);
755       break;
756     case kX64Lzcnt32:
757       if (instr->InputAt(0)->IsRegister()) {
758         __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
759       } else {
760         __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
761       }
762       break;
763     case kSSEFloat32Cmp:
764       ASSEMBLE_SSE_BINOP(ucomiss);
765       break;
766     case kSSEFloat32Add:
767       ASSEMBLE_SSE_BINOP(addss);
768       break;
769     case kSSEFloat32Sub:
770       ASSEMBLE_SSE_BINOP(subss);
771       break;
772     case kSSEFloat32Mul:
773       ASSEMBLE_SSE_BINOP(mulss);
774       break;
775     case kSSEFloat32Div:
776       ASSEMBLE_SSE_BINOP(divss);
777       // Don't delete this mov. It may improve performance on some CPUs,
778       // when there is a (v)mulss depending on the result.
779       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
780       break;
781     case kSSEFloat32Abs: {
782       // TODO(bmeurer): Use RIP relative 128-bit constants.
783       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
784       __ psrlq(kScratchDoubleReg, 33);
785       __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
786       break;
787     }
788     case kSSEFloat32Neg: {
789       // TODO(bmeurer): Use RIP relative 128-bit constants.
790       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
791       __ psllq(kScratchDoubleReg, 31);
792       __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
793       break;
794     }
795     case kSSEFloat32Sqrt:
796       ASSEMBLE_SSE_UNOP(sqrtss);
797       break;
798     case kSSEFloat32Max:
799       ASSEMBLE_SSE_BINOP(maxss);
800       break;
801     case kSSEFloat32Min:
802       ASSEMBLE_SSE_BINOP(minss);
803       break;
804     case kSSEFloat32ToFloat64:
805       ASSEMBLE_SSE_UNOP(cvtss2sd);
806       break;
807     case kSSEFloat64Cmp:
808       ASSEMBLE_SSE_BINOP(ucomisd);
809       break;
810     case kSSEFloat64Add:
811       ASSEMBLE_SSE_BINOP(addsd);
812       break;
813     case kSSEFloat64Sub:
814       ASSEMBLE_SSE_BINOP(subsd);
815       break;
816     case kSSEFloat64Mul:
817       ASSEMBLE_SSE_BINOP(mulsd);
818       break;
819     case kSSEFloat64Div:
820       ASSEMBLE_SSE_BINOP(divsd);
821       // Don't delete this mov. It may improve performance on some CPUs,
822       // when there is a (v)mulsd depending on the result.
823       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
824       break;
825     case kSSEFloat64Mod: {
826       __ subq(rsp, Immediate(kDoubleSize));
827       // Move values to st(0) and st(1).
828       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
829       __ fld_d(Operand(rsp, 0));
830       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
831       __ fld_d(Operand(rsp, 0));
832       // Loop while fprem isn't done.
833       Label mod_loop;
834       __ bind(&mod_loop);
835       // This instructions traps on all kinds inputs, but we are assuming the
836       // floating point control word is set to ignore them all.
837       __ fprem();
838       // The following 2 instruction implicitly use rax.
839       __ fnstsw_ax();
840       if (CpuFeatures::IsSupported(SAHF)) {
841         CpuFeatureScope sahf_scope(masm(), SAHF);
842         __ sahf();
843       } else {
844         __ shrl(rax, Immediate(8));
845         __ andl(rax, Immediate(0xFF));
846         __ pushq(rax);
847         __ popfq();
848       }
849       __ j(parity_even, &mod_loop);
850       // Move output to stack and clean up.
851       __ fstp(1);
852       __ fstp_d(Operand(rsp, 0));
853       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
854       __ addq(rsp, Immediate(kDoubleSize));
855       break;
856     }
857     case kSSEFloat64Max:
858       ASSEMBLE_SSE_BINOP(maxsd);
859       break;
860     case kSSEFloat64Min:
861       ASSEMBLE_SSE_BINOP(minsd);
862       break;
863     case kSSEFloat64Abs: {
864       // TODO(bmeurer): Use RIP relative 128-bit constants.
865       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
866       __ psrlq(kScratchDoubleReg, 1);
867       __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
868       break;
869     }
870     case kSSEFloat64Neg: {
871       // TODO(bmeurer): Use RIP relative 128-bit constants.
872       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
873       __ psllq(kScratchDoubleReg, 63);
874       __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
875       break;
876     }
877     case kSSEFloat64Sqrt:
878       ASSEMBLE_SSE_UNOP(sqrtsd);
879       break;
880     case kSSEFloat64Round: {
881       CpuFeatureScope sse_scope(masm(), SSE4_1);
882       RoundingMode const mode =
883           static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
884       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
885       break;
886     }
887     case kSSEFloat64ToFloat32:
888       ASSEMBLE_SSE_UNOP(cvtsd2ss);
889       break;
890     case kSSEFloat64ToInt32:
891       if (instr->InputAt(0)->IsDoubleRegister()) {
892         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
893       } else {
894         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
895       }
896       break;
897     case kSSEFloat64ToUint32: {
898       if (instr->InputAt(0)->IsDoubleRegister()) {
899         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
900       } else {
901         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
902       }
903       __ AssertZeroExtended(i.OutputRegister());
904       break;
905     }
906     case kSSEInt32ToFloat64:
907       if (instr->InputAt(0)->IsRegister()) {
908         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
909       } else {
910         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
911       }
912       break;
913     case kSSEUint32ToFloat64:
914       if (instr->InputAt(0)->IsRegister()) {
915         __ movl(kScratchRegister, i.InputRegister(0));
916       } else {
917         __ movl(kScratchRegister, i.InputOperand(0));
918       }
919       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
920       break;
921     case kSSEFloat64ExtractLowWord32:
922       if (instr->InputAt(0)->IsDoubleStackSlot()) {
923         __ movl(i.OutputRegister(), i.InputOperand(0));
924       } else {
925         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
926       }
927       break;
928     case kSSEFloat64ExtractHighWord32:
929       if (instr->InputAt(0)->IsDoubleStackSlot()) {
930         __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
931       } else {
932         __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
933       }
934       break;
935     case kSSEFloat64InsertLowWord32:
936       if (instr->InputAt(1)->IsRegister()) {
937         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
938       } else {
939         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
940       }
941       break;
942     case kSSEFloat64InsertHighWord32:
943       if (instr->InputAt(1)->IsRegister()) {
944         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
945       } else {
946         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
947       }
948       break;
949     case kSSEFloat64LoadLowWord32:
950       if (instr->InputAt(0)->IsRegister()) {
951         __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
952       } else {
953         __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
954       }
955       break;
956     case kAVXFloat32Cmp: {
957       CpuFeatureScope avx_scope(masm(), AVX);
958       if (instr->InputAt(1)->IsDoubleRegister()) {
959         __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
960       } else {
961         __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
962       }
963       break;
964     }
965     case kAVXFloat32Add:
966       ASSEMBLE_AVX_BINOP(vaddss);
967       break;
968     case kAVXFloat32Sub:
969       ASSEMBLE_AVX_BINOP(vsubss);
970       break;
971     case kAVXFloat32Mul:
972       ASSEMBLE_AVX_BINOP(vmulss);
973       break;
974     case kAVXFloat32Div:
975       ASSEMBLE_AVX_BINOP(vdivss);
976       // Don't delete this mov. It may improve performance on some CPUs,
977       // when there is a (v)mulss depending on the result.
978       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
979       break;
980     case kAVXFloat32Max:
981       ASSEMBLE_AVX_BINOP(vmaxss);
982       break;
983     case kAVXFloat32Min:
984       ASSEMBLE_AVX_BINOP(vminss);
985       break;
986     case kAVXFloat64Cmp: {
987       CpuFeatureScope avx_scope(masm(), AVX);
988       if (instr->InputAt(1)->IsDoubleRegister()) {
989         __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
990       } else {
991         __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
992       }
993       break;
994     }
995     case kAVXFloat64Add:
996       ASSEMBLE_AVX_BINOP(vaddsd);
997       break;
998     case kAVXFloat64Sub:
999       ASSEMBLE_AVX_BINOP(vsubsd);
1000       break;
1001     case kAVXFloat64Mul:
1002       ASSEMBLE_AVX_BINOP(vmulsd);
1003       break;
1004     case kAVXFloat64Div:
1005       ASSEMBLE_AVX_BINOP(vdivsd);
1006       // Don't delete this mov. It may improve performance on some CPUs,
1007       // when there is a (v)mulsd depending on the result.
1008       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1009       break;
1010     case kAVXFloat64Max:
1011       ASSEMBLE_AVX_BINOP(vmaxsd);
1012       break;
1013     case kAVXFloat64Min:
1014       ASSEMBLE_AVX_BINOP(vminsd);
1015       break;
1016     case kAVXFloat32Abs: {
1017       // TODO(bmeurer): Use RIP relative 128-bit constants.
1018       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1019       __ psrlq(kScratchDoubleReg, 33);
1020       CpuFeatureScope avx_scope(masm(), AVX);
1021       if (instr->InputAt(0)->IsDoubleRegister()) {
1022         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1023                   i.InputDoubleRegister(0));
1024       } else {
1025         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1026                   i.InputOperand(0));
1027       }
1028       break;
1029     }
1030     case kAVXFloat32Neg: {
1031       // TODO(bmeurer): Use RIP relative 128-bit constants.
1032       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1033       __ psllq(kScratchDoubleReg, 31);
1034       CpuFeatureScope avx_scope(masm(), AVX);
1035       if (instr->InputAt(0)->IsDoubleRegister()) {
1036         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1037                   i.InputDoubleRegister(0));
1038       } else {
1039         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1040                   i.InputOperand(0));
1041       }
1042       break;
1043     }
1044     case kAVXFloat64Abs: {
1045       // TODO(bmeurer): Use RIP relative 128-bit constants.
1046       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1047       __ psrlq(kScratchDoubleReg, 1);
1048       CpuFeatureScope avx_scope(masm(), AVX);
1049       if (instr->InputAt(0)->IsDoubleRegister()) {
1050         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1051                   i.InputDoubleRegister(0));
1052       } else {
1053         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1054                   i.InputOperand(0));
1055       }
1056       break;
1057     }
1058     case kAVXFloat64Neg: {
1059       // TODO(bmeurer): Use RIP relative 128-bit constants.
1060       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1061       __ psllq(kScratchDoubleReg, 63);
1062       CpuFeatureScope avx_scope(masm(), AVX);
1063       if (instr->InputAt(0)->IsDoubleRegister()) {
1064         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1065                   i.InputDoubleRegister(0));
1066       } else {
1067         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1068                   i.InputOperand(0));
1069       }
1070       break;
1071     }
1072     case kX64Movsxbl:
1073       ASSEMBLE_MOVX(movsxbl);
1074       __ AssertZeroExtended(i.OutputRegister());
1075       break;
1076     case kX64Movzxbl:
1077       ASSEMBLE_MOVX(movzxbl);
1078       __ AssertZeroExtended(i.OutputRegister());
1079       break;
1080     case kX64Movb: {
1081       size_t index = 0;
1082       Operand operand = i.MemoryOperand(&index);
1083       if (HasImmediateInput(instr, index)) {
1084         __ movb(operand, Immediate(i.InputInt8(index)));
1085       } else {
1086         __ movb(operand, i.InputRegister(index));
1087       }
1088       break;
1089     }
1090     case kX64Movsxwl:
1091       ASSEMBLE_MOVX(movsxwl);
1092       __ AssertZeroExtended(i.OutputRegister());
1093       break;
1094     case kX64Movzxwl:
1095       ASSEMBLE_MOVX(movzxwl);
1096       __ AssertZeroExtended(i.OutputRegister());
1097       break;
1098     case kX64Movw: {
1099       size_t index = 0;
1100       Operand operand = i.MemoryOperand(&index);
1101       if (HasImmediateInput(instr, index)) {
1102         __ movw(operand, Immediate(i.InputInt16(index)));
1103       } else {
1104         __ movw(operand, i.InputRegister(index));
1105       }
1106       break;
1107     }
1108     case kX64Movl:
1109       if (instr->HasOutput()) {
1110         if (instr->addressing_mode() == kMode_None) {
1111           if (instr->InputAt(0)->IsRegister()) {
1112             __ movl(i.OutputRegister(), i.InputRegister(0));
1113           } else {
1114             __ movl(i.OutputRegister(), i.InputOperand(0));
1115           }
1116         } else {
1117           __ movl(i.OutputRegister(), i.MemoryOperand());
1118         }
1119         __ AssertZeroExtended(i.OutputRegister());
1120       } else {
1121         size_t index = 0;
1122         Operand operand = i.MemoryOperand(&index);
1123         if (HasImmediateInput(instr, index)) {
1124           __ movl(operand, i.InputImmediate(index));
1125         } else {
1126           __ movl(operand, i.InputRegister(index));
1127         }
1128       }
1129       break;
1130     case kX64Movsxlq:
1131       ASSEMBLE_MOVX(movsxlq);
1132       break;
1133     case kX64Movq:
1134       if (instr->HasOutput()) {
1135         __ movq(i.OutputRegister(), i.MemoryOperand());
1136       } else {
1137         size_t index = 0;
1138         Operand operand = i.MemoryOperand(&index);
1139         if (HasImmediateInput(instr, index)) {
1140           __ movq(operand, i.InputImmediate(index));
1141         } else {
1142           __ movq(operand, i.InputRegister(index));
1143         }
1144       }
1145       break;
1146     case kX64Movss:
1147       if (instr->HasOutput()) {
1148         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1149       } else {
1150         size_t index = 0;
1151         Operand operand = i.MemoryOperand(&index);
1152         __ movss(operand, i.InputDoubleRegister(index));
1153       }
1154       break;
1155     case kX64Movsd:
1156       if (instr->HasOutput()) {
1157         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1158       } else {
1159         size_t index = 0;
1160         Operand operand = i.MemoryOperand(&index);
1161         __ movsd(operand, i.InputDoubleRegister(index));
1162       }
1163       break;
1164     case kX64Lea32: {
1165       AddressingMode mode = AddressingModeField::decode(instr->opcode());
1166       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1167       // and addressing mode just happens to work out. The "addl"/"subl" forms
1168       // in these cases are faster based on measurements.
1169       if (i.InputRegister(0).is(i.OutputRegister())) {
1170         if (mode == kMode_MRI) {
1171           int32_t constant_summand = i.InputInt32(1);
1172           if (constant_summand > 0) {
1173             __ addl(i.OutputRegister(), Immediate(constant_summand));
1174           } else if (constant_summand < 0) {
1175             __ subl(i.OutputRegister(), Immediate(-constant_summand));
1176           }
1177         } else if (mode == kMode_MR1) {
1178           if (i.InputRegister(1).is(i.OutputRegister())) {
1179             __ shll(i.OutputRegister(), Immediate(1));
1180           } else {
1181             __ leal(i.OutputRegister(), i.MemoryOperand());
1182           }
1183         } else if (mode == kMode_M2) {
1184           __ shll(i.OutputRegister(), Immediate(1));
1185         } else if (mode == kMode_M4) {
1186           __ shll(i.OutputRegister(), Immediate(2));
1187         } else if (mode == kMode_M8) {
1188           __ shll(i.OutputRegister(), Immediate(3));
1189         } else {
1190           __ leal(i.OutputRegister(), i.MemoryOperand());
1191         }
1192       } else {
1193         __ leal(i.OutputRegister(), i.MemoryOperand());
1194       }
1195       __ AssertZeroExtended(i.OutputRegister());
1196       break;
1197     }
1198     case kX64Lea:
1199       __ leaq(i.OutputRegister(), i.MemoryOperand());
1200       break;
1201     case kX64Dec32:
1202       __ decl(i.OutputRegister());
1203       break;
1204     case kX64Inc32:
1205       __ incl(i.OutputRegister());
1206       break;
1207     case kX64Push:
1208       if (HasImmediateInput(instr, 0)) {
1209         __ pushq(i.InputImmediate(0));
1210       } else {
1211         if (instr->InputAt(0)->IsRegister()) {
1212           __ pushq(i.InputRegister(0));
1213         } else {
1214           __ pushq(i.InputOperand(0));
1215         }
1216       }
1217       break;
1218     case kX64StoreWriteBarrier: {
1219       Register object = i.InputRegister(0);
1220       Register value = i.InputRegister(2);
1221       SaveFPRegsMode mode =
1222           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1223       if (HasImmediateInput(instr, 1)) {
1224         int index = i.InputInt32(1);
1225         Register scratch = i.TempRegister(1);
1226         __ movq(Operand(object, index), value);
1227         __ RecordWriteContextSlot(object, index, value, scratch, mode);
1228       } else {
1229         Register index = i.InputRegister(1);
1230         __ movq(Operand(object, index, times_1, 0), value);
1231         __ leaq(index, Operand(object, index, times_1, 0));
1232         __ RecordWrite(object, index, value, mode);
1233       }
1234       break;
1235     }
1236     case kCheckedLoadInt8:
1237       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1238       break;
1239     case kCheckedLoadUint8:
1240       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1241       break;
1242     case kCheckedLoadInt16:
1243       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1244       break;
1245     case kCheckedLoadUint16:
1246       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1247       break;
1248     case kCheckedLoadWord32:
1249       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1250       break;
1251     case kCheckedLoadFloat32:
1252       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1253       break;
1254     case kCheckedLoadFloat64:
1255       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1256       break;
1257     case kCheckedStoreWord8:
1258       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1259       break;
1260     case kCheckedStoreWord16:
1261       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1262       break;
1263     case kCheckedStoreWord32:
1264       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1265       break;
1266     case kCheckedStoreFloat32:
1267       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1268       break;
1269     case kCheckedStoreFloat64:
1270       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1271       break;
1272     case kX64StackCheck:
1273       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1274       break;
1275   }
1276 }  // NOLINT(readability/fn_size)
1277
1278
1279 // Assembles branches after this instruction.
1280 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1281   X64OperandConverter i(this, instr);
1282   Label::Distance flabel_distance =
1283       branch->fallthru ? Label::kNear : Label::kFar;
1284   Label* tlabel = branch->true_label;
1285   Label* flabel = branch->false_label;
1286   switch (branch->condition) {
1287     case kUnorderedEqual:
1288       __ j(parity_even, flabel, flabel_distance);
1289     // Fall through.
1290     case kEqual:
1291       __ j(equal, tlabel);
1292       break;
1293     case kUnorderedNotEqual:
1294       __ j(parity_even, tlabel);
1295     // Fall through.
1296     case kNotEqual:
1297       __ j(not_equal, tlabel);
1298       break;
1299     case kSignedLessThan:
1300       __ j(less, tlabel);
1301       break;
1302     case kSignedGreaterThanOrEqual:
1303       __ j(greater_equal, tlabel);
1304       break;
1305     case kSignedLessThanOrEqual:
1306       __ j(less_equal, tlabel);
1307       break;
1308     case kSignedGreaterThan:
1309       __ j(greater, tlabel);
1310       break;
1311     case kUnsignedLessThan:
1312       __ j(below, tlabel);
1313       break;
1314     case kUnsignedGreaterThanOrEqual:
1315       __ j(above_equal, tlabel);
1316       break;
1317     case kUnsignedLessThanOrEqual:
1318       __ j(below_equal, tlabel);
1319       break;
1320     case kUnsignedGreaterThan:
1321       __ j(above, tlabel);
1322       break;
1323     case kOverflow:
1324       __ j(overflow, tlabel);
1325       break;
1326     case kNotOverflow:
1327       __ j(no_overflow, tlabel);
1328       break;
1329   }
1330   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1331 }
1332
1333
1334 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1335   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1336 }
1337
1338
1339 // Assembles boolean materializations after this instruction.
1340 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1341                                         FlagsCondition condition) {
1342   X64OperandConverter i(this, instr);
1343   Label done;
1344
1345   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1346   // last output of the instruction.
1347   Label check;
1348   DCHECK_NE(0u, instr->OutputCount());
1349   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1350   Condition cc = no_condition;
1351   switch (condition) {
1352     case kUnorderedEqual:
1353       __ j(parity_odd, &check, Label::kNear);
1354       __ movl(reg, Immediate(0));
1355       __ jmp(&done, Label::kNear);
1356     // Fall through.
1357     case kEqual:
1358       cc = equal;
1359       break;
1360     case kUnorderedNotEqual:
1361       __ j(parity_odd, &check, Label::kNear);
1362       __ movl(reg, Immediate(1));
1363       __ jmp(&done, Label::kNear);
1364     // Fall through.
1365     case kNotEqual:
1366       cc = not_equal;
1367       break;
1368     case kSignedLessThan:
1369       cc = less;
1370       break;
1371     case kSignedGreaterThanOrEqual:
1372       cc = greater_equal;
1373       break;
1374     case kSignedLessThanOrEqual:
1375       cc = less_equal;
1376       break;
1377     case kSignedGreaterThan:
1378       cc = greater;
1379       break;
1380     case kUnsignedLessThan:
1381       cc = below;
1382       break;
1383     case kUnsignedGreaterThanOrEqual:
1384       cc = above_equal;
1385       break;
1386     case kUnsignedLessThanOrEqual:
1387       cc = below_equal;
1388       break;
1389     case kUnsignedGreaterThan:
1390       cc = above;
1391       break;
1392     case kOverflow:
1393       cc = overflow;
1394       break;
1395     case kNotOverflow:
1396       cc = no_overflow;
1397       break;
1398   }
1399   __ bind(&check);
1400   __ setcc(cc, reg);
1401   __ movzxbl(reg, reg);
1402   __ bind(&done);
1403 }
1404
1405
1406 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1407   X64OperandConverter i(this, instr);
1408   Register input = i.InputRegister(0);
1409   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1410     __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1411     __ j(equal, GetLabel(i.InputRpo(index + 1)));
1412   }
1413   AssembleArchJump(i.InputRpo(1));
1414 }
1415
1416
1417 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1418   X64OperandConverter i(this, instr);
1419   Register input = i.InputRegister(0);
1420   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1421   Label** cases = zone()->NewArray<Label*>(case_count);
1422   for (int32_t index = 0; index < case_count; ++index) {
1423     cases[index] = GetLabel(i.InputRpo(index + 2));
1424   }
1425   Label* const table = AddJumpTable(cases, case_count);
1426   __ cmpl(input, Immediate(case_count));
1427   __ j(above_equal, GetLabel(i.InputRpo(1)));
1428   __ leaq(kScratchRegister, Operand(table));
1429   __ jmp(Operand(kScratchRegister, input, times_8, 0));
1430 }
1431
1432
1433 void CodeGenerator::AssembleDeoptimizerCall(
1434     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1435   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1436       isolate(), deoptimization_id, bailout_type);
1437   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1438 }
1439
1440
1441 void CodeGenerator::AssemblePrologue() {
1442   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1443   int stack_slots = frame()->GetSpillSlotCount();
1444   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1445     __ pushq(rbp);
1446     __ movq(rbp, rsp);
1447     const RegList saves = descriptor->CalleeSavedRegisters();
1448     if (saves != 0) {  // Save callee-saved registers.
1449       int register_save_area_size = 0;
1450       for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1451         if (!((1 << i) & saves)) continue;
1452         __ pushq(Register::from_code(i));
1453         register_save_area_size += kPointerSize;
1454       }
1455       frame()->SetRegisterSaveAreaSize(register_save_area_size);
1456     }
1457   } else if (descriptor->IsJSFunctionCall()) {
1458     CompilationInfo* info = this->info();
1459     __ Prologue(info->IsCodePreAgingActive());
1460     frame()->SetRegisterSaveAreaSize(
1461         StandardFrameConstants::kFixedFrameSizeFromFp);
1462   } else if (needs_frame_) {
1463     __ StubPrologue();
1464     frame()->SetRegisterSaveAreaSize(
1465         StandardFrameConstants::kFixedFrameSizeFromFp);
1466   }
1467
1468   if (info()->is_osr()) {
1469     // TurboFan OSR-compiled functions cannot be entered directly.
1470     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1471
1472     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1473     // frame is still on the stack. Optimized code uses OSR values directly from
1474     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1475     // remaining stack slots.
1476     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1477     osr_pc_offset_ = __ pc_offset();
1478     // TODO(titzer): cannot address target function == local #-1
1479     __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1480     DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1481     stack_slots -= frame()->GetOsrStackSlotCount();
1482   }
1483
1484   if (stack_slots > 0) {
1485     __ subq(rsp, Immediate(stack_slots * kPointerSize));
1486   }
1487 }
1488
1489
1490 void CodeGenerator::AssembleReturn() {
1491   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1492   int stack_slots = frame()->GetSpillSlotCount();
1493   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1494     if (frame()->GetRegisterSaveAreaSize() > 0) {
1495       // Remove this frame's spill slots first.
1496       if (stack_slots > 0) {
1497         __ addq(rsp, Immediate(stack_slots * kPointerSize));
1498       }
1499       const RegList saves = descriptor->CalleeSavedRegisters();
1500       // Restore registers.
1501       if (saves != 0) {
1502         for (int i = 0; i < Register::kNumRegisters; i++) {
1503           if (!((1 << i) & saves)) continue;
1504           __ popq(Register::from_code(i));
1505         }
1506       }
1507       __ popq(rbp);  // Pop caller's frame pointer.
1508       __ ret(0);
1509     } else {
1510       // No saved registers.
1511       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1512       __ popq(rbp);       // Pop caller's frame pointer.
1513       __ ret(0);
1514     }
1515   } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
1516     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1517     __ popq(rbp);       // Pop caller's frame pointer.
1518     int pop_count = descriptor->IsJSFunctionCall()
1519                         ? static_cast<int>(descriptor->JSParameterCount())
1520                         : 0;
1521     __ Ret(pop_count * kPointerSize, rbx);
1522   } else {
1523     __ ret(0);
1524   }
1525 }
1526
1527
1528 void CodeGenerator::AssembleMove(InstructionOperand* source,
1529                                  InstructionOperand* destination) {
1530   X64OperandConverter g(this, NULL);
1531   // Dispatch on the source and destination operand kinds.  Not all
1532   // combinations are possible.
1533   if (source->IsRegister()) {
1534     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1535     Register src = g.ToRegister(source);
1536     if (destination->IsRegister()) {
1537       __ movq(g.ToRegister(destination), src);
1538     } else {
1539       __ movq(g.ToOperand(destination), src);
1540     }
1541   } else if (source->IsStackSlot()) {
1542     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1543     Operand src = g.ToOperand(source);
1544     if (destination->IsRegister()) {
1545       Register dst = g.ToRegister(destination);
1546       __ movq(dst, src);
1547     } else {
1548       // Spill on demand to use a temporary register for memory-to-memory
1549       // moves.
1550       Register tmp = kScratchRegister;
1551       Operand dst = g.ToOperand(destination);
1552       __ movq(tmp, src);
1553       __ movq(dst, tmp);
1554     }
1555   } else if (source->IsConstant()) {
1556     ConstantOperand* constant_source = ConstantOperand::cast(source);
1557     Constant src = g.ToConstant(constant_source);
1558     if (destination->IsRegister() || destination->IsStackSlot()) {
1559       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1560                                                : kScratchRegister;
1561       switch (src.type()) {
1562         case Constant::kInt32:
1563           // TODO(dcarney): don't need scratch in this case.
1564           __ Set(dst, src.ToInt32());
1565           break;
1566         case Constant::kInt64:
1567           __ Set(dst, src.ToInt64());
1568           break;
1569         case Constant::kFloat32:
1570           __ Move(dst,
1571                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1572           break;
1573         case Constant::kFloat64:
1574           __ Move(dst,
1575                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1576           break;
1577         case Constant::kExternalReference:
1578           __ Move(dst, src.ToExternalReference());
1579           break;
1580         case Constant::kHeapObject: {
1581           Handle<HeapObject> src_object = src.ToHeapObject();
1582           Heap::RootListIndex index;
1583           int offset;
1584           if (IsMaterializableFromFrame(src_object, &offset)) {
1585             __ movp(dst, Operand(rbp, offset));
1586           } else if (IsMaterializableFromRoot(src_object, &index)) {
1587             __ LoadRoot(dst, index);
1588           } else {
1589             __ Move(dst, src_object);
1590           }
1591           break;
1592         }
1593         case Constant::kRpoNumber:
1594           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1595           break;
1596       }
1597       if (destination->IsStackSlot()) {
1598         __ movq(g.ToOperand(destination), kScratchRegister);
1599       }
1600     } else if (src.type() == Constant::kFloat32) {
1601       // TODO(turbofan): Can we do better here?
1602       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1603       if (destination->IsDoubleRegister()) {
1604         __ Move(g.ToDoubleRegister(destination), src_const);
1605       } else {
1606         DCHECK(destination->IsDoubleStackSlot());
1607         Operand dst = g.ToOperand(destination);
1608         __ movl(dst, Immediate(src_const));
1609       }
1610     } else {
1611       DCHECK_EQ(Constant::kFloat64, src.type());
1612       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1613       if (destination->IsDoubleRegister()) {
1614         __ Move(g.ToDoubleRegister(destination), src_const);
1615       } else {
1616         DCHECK(destination->IsDoubleStackSlot());
1617         __ movq(kScratchRegister, src_const);
1618         __ movq(g.ToOperand(destination), kScratchRegister);
1619       }
1620     }
1621   } else if (source->IsDoubleRegister()) {
1622     XMMRegister src = g.ToDoubleRegister(source);
1623     if (destination->IsDoubleRegister()) {
1624       XMMRegister dst = g.ToDoubleRegister(destination);
1625       __ movaps(dst, src);
1626     } else {
1627       DCHECK(destination->IsDoubleStackSlot());
1628       Operand dst = g.ToOperand(destination);
1629       __ movsd(dst, src);
1630     }
1631   } else if (source->IsDoubleStackSlot()) {
1632     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1633     Operand src = g.ToOperand(source);
1634     if (destination->IsDoubleRegister()) {
1635       XMMRegister dst = g.ToDoubleRegister(destination);
1636       __ movsd(dst, src);
1637     } else {
1638       // We rely on having xmm0 available as a fixed scratch register.
1639       Operand dst = g.ToOperand(destination);
1640       __ movsd(xmm0, src);
1641       __ movsd(dst, xmm0);
1642     }
1643   } else {
1644     UNREACHABLE();
1645   }
1646 }
1647
1648
1649 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1650                                  InstructionOperand* destination) {
1651   X64OperandConverter g(this, NULL);
1652   // Dispatch on the source and destination operand kinds.  Not all
1653   // combinations are possible.
1654   if (source->IsRegister() && destination->IsRegister()) {
1655     // Register-register.
1656     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1657   } else if (source->IsRegister() && destination->IsStackSlot()) {
1658     Register src = g.ToRegister(source);
1659     Operand dst = g.ToOperand(destination);
1660     __ xchgq(src, dst);
1661   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1662              (source->IsDoubleStackSlot() &&
1663               destination->IsDoubleStackSlot())) {
1664     // Memory-memory.
1665     Register tmp = kScratchRegister;
1666     Operand src = g.ToOperand(source);
1667     Operand dst = g.ToOperand(destination);
1668     __ movq(tmp, dst);
1669     __ xchgq(tmp, src);
1670     __ movq(dst, tmp);
1671   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1672     // XMM register-register swap. We rely on having xmm0
1673     // available as a fixed scratch register.
1674     XMMRegister src = g.ToDoubleRegister(source);
1675     XMMRegister dst = g.ToDoubleRegister(destination);
1676     __ movaps(xmm0, src);
1677     __ movaps(src, dst);
1678     __ movaps(dst, xmm0);
1679   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1680     // XMM register-memory swap.  We rely on having xmm0
1681     // available as a fixed scratch register.
1682     XMMRegister src = g.ToDoubleRegister(source);
1683     Operand dst = g.ToOperand(destination);
1684     __ movsd(xmm0, src);
1685     __ movsd(src, dst);
1686     __ movsd(dst, xmm0);
1687   } else {
1688     // No other combinations are possible.
1689     UNREACHABLE();
1690   }
1691 }
1692
1693
1694 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1695   for (size_t index = 0; index < target_count; ++index) {
1696     __ dq(targets[index]);
1697   }
1698 }
1699
1700
1701 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1702
1703
1704 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1705   int space_needed = Deoptimizer::patch_size();
1706   if (!info()->IsStub()) {
1707     // Ensure that we have enough space after the previous lazy-bailout
1708     // instruction for patching the code here.
1709     int current_pc = masm()->pc_offset();
1710     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1711       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1712       __ Nop(padding_size);
1713     }
1714   }
1715   MarkLazyDeoptSite();
1716 }
1717
1718 #undef __
1719
1720 }  // namespace internal
1721 }  // namespace compiler
1722 }  // namespace v8