[presubmit] Enable readability/namespace linter checking.
[platform/upstream/v8.git] / src / compiler / x64 / code-generator-x64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
14
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18
19 #define __ masm()->
20
21
22 #define kScratchDoubleReg xmm0
23
24
25 // Adds X64 specific methods for decoding operands.
26 class X64OperandConverter : public InstructionOperandConverter {
27  public:
28   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
29       : InstructionOperandConverter(gen, instr) {}
30
31   Immediate InputImmediate(size_t index) {
32     return ToImmediate(instr_->InputAt(index));
33   }
34
35   Operand InputOperand(size_t index, int extra = 0) {
36     return ToOperand(instr_->InputAt(index), extra);
37   }
38
39   Operand OutputOperand() { return ToOperand(instr_->Output()); }
40
41   Immediate ToImmediate(InstructionOperand* operand) {
42     Constant constant = ToConstant(operand);
43     if (constant.type() == Constant::kFloat64) {
44       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
45       return Immediate(0);
46     }
47     return Immediate(constant.ToInt32());
48   }
49
50   Operand ToOperand(InstructionOperand* op, int extra = 0) {
51     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
52     FrameOffset offset =
53         linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
54     return Operand(offset.from_stack_pointer() ? rsp : rbp,
55                    offset.offset() + extra);
56   }
57
58   static size_t NextOffset(size_t* offset) {
59     size_t i = *offset;
60     (*offset)++;
61     return i;
62   }
63
64   static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
65     STATIC_ASSERT(0 == static_cast<int>(times_1));
66     STATIC_ASSERT(1 == static_cast<int>(times_2));
67     STATIC_ASSERT(2 == static_cast<int>(times_4));
68     STATIC_ASSERT(3 == static_cast<int>(times_8));
69     int scale = static_cast<int>(mode - one);
70     DCHECK(scale >= 0 && scale < 4);
71     return static_cast<ScaleFactor>(scale);
72   }
73
74   Operand MemoryOperand(size_t* offset) {
75     AddressingMode mode = AddressingModeField::decode(instr_->opcode());
76     switch (mode) {
77       case kMode_MR: {
78         Register base = InputRegister(NextOffset(offset));
79         int32_t disp = 0;
80         return Operand(base, disp);
81       }
82       case kMode_MRI: {
83         Register base = InputRegister(NextOffset(offset));
84         int32_t disp = InputInt32(NextOffset(offset));
85         return Operand(base, disp);
86       }
87       case kMode_MR1:
88       case kMode_MR2:
89       case kMode_MR4:
90       case kMode_MR8: {
91         Register base = InputRegister(NextOffset(offset));
92         Register index = InputRegister(NextOffset(offset));
93         ScaleFactor scale = ScaleFor(kMode_MR1, mode);
94         int32_t disp = 0;
95         return Operand(base, index, scale, disp);
96       }
97       case kMode_MR1I:
98       case kMode_MR2I:
99       case kMode_MR4I:
100       case kMode_MR8I: {
101         Register base = InputRegister(NextOffset(offset));
102         Register index = InputRegister(NextOffset(offset));
103         ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
104         int32_t disp = InputInt32(NextOffset(offset));
105         return Operand(base, index, scale, disp);
106       }
107       case kMode_M1: {
108         Register base = InputRegister(NextOffset(offset));
109         int32_t disp = 0;
110         return Operand(base, disp);
111       }
112       case kMode_M2:
113         UNREACHABLE();  // Should use kModeMR with more compact encoding instead
114         return Operand(no_reg, 0);
115       case kMode_M4:
116       case kMode_M8: {
117         Register index = InputRegister(NextOffset(offset));
118         ScaleFactor scale = ScaleFor(kMode_M1, mode);
119         int32_t disp = 0;
120         return Operand(index, scale, disp);
121       }
122       case kMode_M1I:
123       case kMode_M2I:
124       case kMode_M4I:
125       case kMode_M8I: {
126         Register index = InputRegister(NextOffset(offset));
127         ScaleFactor scale = ScaleFor(kMode_M1I, mode);
128         int32_t disp = InputInt32(NextOffset(offset));
129         return Operand(index, scale, disp);
130       }
131       case kMode_None:
132         UNREACHABLE();
133         return Operand(no_reg, 0);
134     }
135     UNREACHABLE();
136     return Operand(no_reg, 0);
137   }
138
139   Operand MemoryOperand(size_t first_input = 0) {
140     return MemoryOperand(&first_input);
141   }
142 };
143
144
145 namespace {
146
147 bool HasImmediateInput(Instruction* instr, size_t index) {
148   return instr->InputAt(index)->IsImmediate();
149 }
150
151
152 class OutOfLineLoadZero final : public OutOfLineCode {
153  public:
154   OutOfLineLoadZero(CodeGenerator* gen, Register result)
155       : OutOfLineCode(gen), result_(result) {}
156
157   void Generate() final { __ xorl(result_, result_); }
158
159  private:
160   Register const result_;
161 };
162
163
164 class OutOfLineLoadNaN final : public OutOfLineCode {
165  public:
166   OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
167       : OutOfLineCode(gen), result_(result) {}
168
169   void Generate() final { __ pcmpeqd(result_, result_); }
170
171  private:
172   XMMRegister const result_;
173 };
174
175
176 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
177  public:
178   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
179                              XMMRegister input)
180       : OutOfLineCode(gen), result_(result), input_(input) {}
181
182   void Generate() final {
183     __ subp(rsp, Immediate(kDoubleSize));
184     __ movsd(MemOperand(rsp, 0), input_);
185     __ SlowTruncateToI(result_, rsp, 0);
186     __ addp(rsp, Immediate(kDoubleSize));
187   }
188
189  private:
190   Register const result_;
191   XMMRegister const input_;
192 };
193
194 }  // namespace
195
196
197 #define ASSEMBLE_UNOP(asm_instr)         \
198   do {                                   \
199     if (instr->Output()->IsRegister()) { \
200       __ asm_instr(i.OutputRegister());  \
201     } else {                             \
202       __ asm_instr(i.OutputOperand());   \
203     }                                    \
204   } while (0)
205
206
207 #define ASSEMBLE_BINOP(asm_instr)                              \
208   do {                                                         \
209     if (HasImmediateInput(instr, 1)) {                         \
210       if (instr->InputAt(0)->IsRegister()) {                   \
211         __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
212       } else {                                                 \
213         __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
214       }                                                        \
215     } else {                                                   \
216       if (instr->InputAt(1)->IsRegister()) {                   \
217         __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
218       } else {                                                 \
219         __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
220       }                                                        \
221     }                                                          \
222   } while (0)
223
224
225 #define ASSEMBLE_MULT(asm_instr)                              \
226   do {                                                        \
227     if (HasImmediateInput(instr, 1)) {                        \
228       if (instr->InputAt(0)->IsRegister()) {                  \
229         __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
230                      i.InputImmediate(1));                    \
231       } else {                                                \
232         __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
233                      i.InputImmediate(1));                    \
234       }                                                       \
235     } else {                                                  \
236       if (instr->InputAt(1)->IsRegister()) {                  \
237         __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
238       } else {                                                \
239         __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
240       }                                                       \
241     }                                                         \
242   } while (0)
243
244
245 #define ASSEMBLE_SHIFT(asm_instr, width)                                   \
246   do {                                                                     \
247     if (HasImmediateInput(instr, 1)) {                                     \
248       if (instr->Output()->IsRegister()) {                                 \
249         __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
250       } else {                                                             \
251         __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
252       }                                                                    \
253     } else {                                                               \
254       if (instr->Output()->IsRegister()) {                                 \
255         __ asm_instr##_cl(i.OutputRegister());                             \
256       } else {                                                             \
257         __ asm_instr##_cl(i.OutputOperand());                              \
258       }                                                                    \
259     }                                                                      \
260   } while (0)
261
262
263 #define ASSEMBLE_MOVX(asm_instr)                            \
264   do {                                                      \
265     if (instr->addressing_mode() != kMode_None) {           \
266       __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
267     } else if (instr->InputAt(0)->IsRegister()) {           \
268       __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
269     } else {                                                \
270       __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
271     }                                                       \
272   } while (0)
273
274
275 #define ASSEMBLE_SSE_BINOP(asm_instr)                                   \
276   do {                                                                  \
277     if (instr->InputAt(1)->IsDoubleRegister()) {                        \
278       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
279     } else {                                                            \
280       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
281     }                                                                   \
282   } while (0)
283
284
285 #define ASSEMBLE_SSE_UNOP(asm_instr)                                    \
286   do {                                                                  \
287     if (instr->InputAt(0)->IsDoubleRegister()) {                        \
288       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
289     } else {                                                            \
290       __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0));        \
291     }                                                                   \
292   } while (0)
293
294
295 #define ASSEMBLE_AVX_BINOP(asm_instr)                                  \
296   do {                                                                 \
297     CpuFeatureScope avx_scope(masm(), AVX);                            \
298     if (instr->InputAt(1)->IsDoubleRegister()) {                       \
299       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
300                    i.InputDoubleRegister(1));                          \
301     } else {                                                           \
302       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
303                    i.InputOperand(1));                                 \
304     }                                                                  \
305   } while (0)
306
307
308 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
309   do {                                                                       \
310     auto result = i.OutputDoubleRegister();                                  \
311     auto buffer = i.InputRegister(0);                                        \
312     auto index1 = i.InputRegister(1);                                        \
313     auto index2 = i.InputInt32(2);                                           \
314     OutOfLineCode* ool;                                                      \
315     if (instr->InputAt(3)->IsRegister()) {                                   \
316       auto length = i.InputRegister(3);                                      \
317       DCHECK_EQ(0, index2);                                                  \
318       __ cmpl(index1, length);                                               \
319       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
320     } else {                                                                 \
321       auto length = i.InputInt32(3);                                         \
322       DCHECK_LE(index2, length);                                             \
323       __ cmpq(index1, Immediate(length - index2));                           \
324       class OutOfLineLoadFloat final : public OutOfLineCode {                \
325        public:                                                               \
326         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
327                            Register buffer, Register index1, int32_t index2, \
328                            int32_t length)                                   \
329             : OutOfLineCode(gen),                                            \
330               result_(result),                                               \
331               buffer_(buffer),                                               \
332               index1_(index1),                                               \
333               index2_(index2),                                               \
334               length_(length) {}                                             \
335                                                                              \
336         void Generate() final {                                              \
337           __ leal(kScratchRegister, Operand(index1_, index2_));              \
338           __ pcmpeqd(result_, result_);                                      \
339           __ cmpl(kScratchRegister, Immediate(length_));                     \
340           __ j(above_equal, exit());                                         \
341           __ asm_instr(result_,                                              \
342                        Operand(buffer_, kScratchRegister, times_1, 0));      \
343         }                                                                    \
344                                                                              \
345        private:                                                              \
346         XMMRegister const result_;                                           \
347         Register const buffer_;                                              \
348         Register const index1_;                                              \
349         int32_t const index2_;                                               \
350         int32_t const length_;                                               \
351       };                                                                     \
352       ool = new (zone())                                                     \
353           OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
354     }                                                                        \
355     __ j(above_equal, ool->entry());                                         \
356     __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
357     __ bind(ool->exit());                                                    \
358   } while (false)
359
360
361 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
362   do {                                                                         \
363     auto result = i.OutputRegister();                                          \
364     auto buffer = i.InputRegister(0);                                          \
365     auto index1 = i.InputRegister(1);                                          \
366     auto index2 = i.InputInt32(2);                                             \
367     OutOfLineCode* ool;                                                        \
368     if (instr->InputAt(3)->IsRegister()) {                                     \
369       auto length = i.InputRegister(3);                                        \
370       DCHECK_EQ(0, index2);                                                    \
371       __ cmpl(index1, length);                                                 \
372       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
373     } else {                                                                   \
374       auto length = i.InputInt32(3);                                           \
375       DCHECK_LE(index2, length);                                               \
376       __ cmpq(index1, Immediate(length - index2));                             \
377       class OutOfLineLoadInteger final : public OutOfLineCode {                \
378        public:                                                                 \
379         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
380                              Register buffer, Register index1, int32_t index2, \
381                              int32_t length)                                   \
382             : OutOfLineCode(gen),                                              \
383               result_(result),                                                 \
384               buffer_(buffer),                                                 \
385               index1_(index1),                                                 \
386               index2_(index2),                                                 \
387               length_(length) {}                                               \
388                                                                                \
389         void Generate() final {                                                \
390           Label oob;                                                           \
391           __ leal(kScratchRegister, Operand(index1_, index2_));                \
392           __ cmpl(kScratchRegister, Immediate(length_));                       \
393           __ j(above_equal, &oob, Label::kNear);                               \
394           __ asm_instr(result_,                                                \
395                        Operand(buffer_, kScratchRegister, times_1, 0));        \
396           __ jmp(exit());                                                      \
397           __ bind(&oob);                                                       \
398           __ xorl(result_, result_);                                           \
399         }                                                                      \
400                                                                                \
401        private:                                                                \
402         Register const result_;                                                \
403         Register const buffer_;                                                \
404         Register const index1_;                                                \
405         int32_t const index2_;                                                 \
406         int32_t const length_;                                                 \
407       };                                                                       \
408       ool = new (zone())                                                       \
409           OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
410     }                                                                          \
411     __ j(above_equal, ool->entry());                                           \
412     __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
413     __ bind(ool->exit());                                                      \
414   } while (false)
415
416
417 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
418   do {                                                                       \
419     auto buffer = i.InputRegister(0);                                        \
420     auto index1 = i.InputRegister(1);                                        \
421     auto index2 = i.InputInt32(2);                                           \
422     auto value = i.InputDoubleRegister(4);                                   \
423     if (instr->InputAt(3)->IsRegister()) {                                   \
424       auto length = i.InputRegister(3);                                      \
425       DCHECK_EQ(0, index2);                                                  \
426       Label done;                                                            \
427       __ cmpl(index1, length);                                               \
428       __ j(above_equal, &done, Label::kNear);                                \
429       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
430       __ bind(&done);                                                        \
431     } else {                                                                 \
432       auto length = i.InputInt32(3);                                         \
433       DCHECK_LE(index2, length);                                             \
434       __ cmpq(index1, Immediate(length - index2));                           \
435       class OutOfLineStoreFloat final : public OutOfLineCode {               \
436        public:                                                               \
437         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
438                             Register index1, int32_t index2, int32_t length, \
439                             XMMRegister value)                               \
440             : OutOfLineCode(gen),                                            \
441               buffer_(buffer),                                               \
442               index1_(index1),                                               \
443               index2_(index2),                                               \
444               length_(length),                                               \
445               value_(value) {}                                               \
446                                                                              \
447         void Generate() final {                                              \
448           __ leal(kScratchRegister, Operand(index1_, index2_));              \
449           __ cmpl(kScratchRegister, Immediate(length_));                     \
450           __ j(above_equal, exit());                                         \
451           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
452                        value_);                                              \
453         }                                                                    \
454                                                                              \
455        private:                                                              \
456         Register const buffer_;                                              \
457         Register const index1_;                                              \
458         int32_t const index2_;                                               \
459         int32_t const length_;                                               \
460         XMMRegister const value_;                                            \
461       };                                                                     \
462       auto ool = new (zone())                                                \
463           OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
464       __ j(above_equal, ool->entry());                                       \
465       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
466       __ bind(ool->exit());                                                  \
467     }                                                                        \
468   } while (false)
469
470
471 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
472   do {                                                                         \
473     auto buffer = i.InputRegister(0);                                          \
474     auto index1 = i.InputRegister(1);                                          \
475     auto index2 = i.InputInt32(2);                                             \
476     if (instr->InputAt(3)->IsRegister()) {                                     \
477       auto length = i.InputRegister(3);                                        \
478       DCHECK_EQ(0, index2);                                                    \
479       Label done;                                                              \
480       __ cmpl(index1, length);                                                 \
481       __ j(above_equal, &done, Label::kNear);                                  \
482       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
483       __ bind(&done);                                                          \
484     } else {                                                                   \
485       auto length = i.InputInt32(3);                                           \
486       DCHECK_LE(index2, length);                                               \
487       __ cmpq(index1, Immediate(length - index2));                             \
488       class OutOfLineStoreInteger final : public OutOfLineCode {               \
489        public:                                                                 \
490         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
491                               Register index1, int32_t index2, int32_t length, \
492                               Value value)                                     \
493             : OutOfLineCode(gen),                                              \
494               buffer_(buffer),                                                 \
495               index1_(index1),                                                 \
496               index2_(index2),                                                 \
497               length_(length),                                                 \
498               value_(value) {}                                                 \
499                                                                                \
500         void Generate() final {                                                \
501           __ leal(kScratchRegister, Operand(index1_, index2_));                \
502           __ cmpl(kScratchRegister, Immediate(length_));                       \
503           __ j(above_equal, exit());                                           \
504           __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
505                        value_);                                                \
506         }                                                                      \
507                                                                                \
508        private:                                                                \
509         Register const buffer_;                                                \
510         Register const index1_;                                                \
511         int32_t const index2_;                                                 \
512         int32_t const length_;                                                 \
513         Value const value_;                                                    \
514       };                                                                       \
515       auto ool = new (zone())                                                  \
516           OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
517       __ j(above_equal, ool->entry());                                         \
518       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
519       __ bind(ool->exit());                                                    \
520     }                                                                          \
521   } while (false)
522
523
524 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
525   do {                                                           \
526     if (instr->InputAt(4)->IsRegister()) {                       \
527       Register value = i.InputRegister(4);                       \
528       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
529     } else {                                                     \
530       Immediate value = i.InputImmediate(4);                     \
531       ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
532     }                                                            \
533   } while (false)
534
535
536 void CodeGenerator::AssembleDeconstructActivationRecord() {
537   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
538   int stack_slots = frame()->GetSpillSlotCount();
539   if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
540     __ movq(rsp, rbp);
541     __ popq(rbp);
542   }
543 }
544
545
546 // Assembles an instruction after register allocation, producing machine code.
547 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
548   X64OperandConverter i(this, instr);
549
550   switch (ArchOpcodeField::decode(instr->opcode())) {
551     case kArchCallCodeObject: {
552       EnsureSpaceForLazyDeopt();
553       if (HasImmediateInput(instr, 0)) {
554         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
555         __ Call(code, RelocInfo::CODE_TARGET);
556       } else {
557         Register reg = i.InputRegister(0);
558         int entry = Code::kHeaderSize - kHeapObjectTag;
559         __ Call(Operand(reg, entry));
560       }
561       RecordCallPosition(instr);
562       break;
563     }
564     case kArchTailCallCodeObject: {
565       AssembleDeconstructActivationRecord();
566       if (HasImmediateInput(instr, 0)) {
567         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
568         __ jmp(code, RelocInfo::CODE_TARGET);
569       } else {
570         Register reg = i.InputRegister(0);
571         __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
572         __ jmp(reg);
573       }
574       break;
575     }
576     case kArchCallJSFunction: {
577       EnsureSpaceForLazyDeopt();
578       Register func = i.InputRegister(0);
579       if (FLAG_debug_code) {
580         // Check the function's context matches the context argument.
581         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
582         __ Assert(equal, kWrongFunctionContext);
583       }
584       __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
585       RecordCallPosition(instr);
586       break;
587     }
588     case kArchTailCallJSFunction: {
589       Register func = i.InputRegister(0);
590       if (FLAG_debug_code) {
591         // Check the function's context matches the context argument.
592         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
593         __ Assert(equal, kWrongFunctionContext);
594       }
595       AssembleDeconstructActivationRecord();
596       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
597       break;
598     }
599     case kArchPrepareCallCFunction: {
600       int const num_parameters = MiscField::decode(instr->opcode());
601       __ PrepareCallCFunction(num_parameters);
602       break;
603     }
604     case kArchCallCFunction: {
605       int const num_parameters = MiscField::decode(instr->opcode());
606       if (HasImmediateInput(instr, 0)) {
607         ExternalReference ref = i.InputExternalReference(0);
608         __ CallCFunction(ref, num_parameters);
609       } else {
610         Register func = i.InputRegister(0);
611         __ CallCFunction(func, num_parameters);
612       }
613       break;
614     }
615     case kArchJmp:
616       AssembleArchJump(i.InputRpo(0));
617       break;
618     case kArchLookupSwitch:
619       AssembleArchLookupSwitch(instr);
620       break;
621     case kArchTableSwitch:
622       AssembleArchTableSwitch(instr);
623       break;
624     case kArchNop:
625       // don't emit code for nops.
626       break;
627     case kArchDeoptimize: {
628       int deopt_state_id =
629           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
630       AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
631       break;
632     }
633     case kArchRet:
634       AssembleReturn();
635       break;
636     case kArchStackPointer:
637       __ movq(i.OutputRegister(), rsp);
638       break;
639     case kArchFramePointer:
640       __ movq(i.OutputRegister(), rbp);
641       break;
642     case kArchTruncateDoubleToI: {
643       auto result = i.OutputRegister();
644       auto input = i.InputDoubleRegister(0);
645       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
646       __ cvttsd2siq(result, input);
647       __ cmpq(result, Immediate(1));
648       __ j(overflow, ool->entry());
649       __ bind(ool->exit());
650       break;
651     }
652     case kX64Add32:
653       ASSEMBLE_BINOP(addl);
654       break;
655     case kX64Add:
656       ASSEMBLE_BINOP(addq);
657       break;
658     case kX64Sub32:
659       ASSEMBLE_BINOP(subl);
660       break;
661     case kX64Sub:
662       ASSEMBLE_BINOP(subq);
663       break;
664     case kX64And32:
665       ASSEMBLE_BINOP(andl);
666       break;
667     case kX64And:
668       ASSEMBLE_BINOP(andq);
669       break;
670     case kX64Cmp32:
671       ASSEMBLE_BINOP(cmpl);
672       break;
673     case kX64Cmp:
674       ASSEMBLE_BINOP(cmpq);
675       break;
676     case kX64Test32:
677       ASSEMBLE_BINOP(testl);
678       break;
679     case kX64Test:
680       ASSEMBLE_BINOP(testq);
681       break;
682     case kX64Imul32:
683       ASSEMBLE_MULT(imull);
684       break;
685     case kX64Imul:
686       ASSEMBLE_MULT(imulq);
687       break;
688     case kX64ImulHigh32:
689       if (instr->InputAt(1)->IsRegister()) {
690         __ imull(i.InputRegister(1));
691       } else {
692         __ imull(i.InputOperand(1));
693       }
694       break;
695     case kX64UmulHigh32:
696       if (instr->InputAt(1)->IsRegister()) {
697         __ mull(i.InputRegister(1));
698       } else {
699         __ mull(i.InputOperand(1));
700       }
701       break;
702     case kX64Idiv32:
703       __ cdq();
704       __ idivl(i.InputRegister(1));
705       break;
706     case kX64Idiv:
707       __ cqo();
708       __ idivq(i.InputRegister(1));
709       break;
710     case kX64Udiv32:
711       __ xorl(rdx, rdx);
712       __ divl(i.InputRegister(1));
713       break;
714     case kX64Udiv:
715       __ xorq(rdx, rdx);
716       __ divq(i.InputRegister(1));
717       break;
718     case kX64Not:
719       ASSEMBLE_UNOP(notq);
720       break;
721     case kX64Not32:
722       ASSEMBLE_UNOP(notl);
723       break;
724     case kX64Neg:
725       ASSEMBLE_UNOP(negq);
726       break;
727     case kX64Neg32:
728       ASSEMBLE_UNOP(negl);
729       break;
730     case kX64Or32:
731       ASSEMBLE_BINOP(orl);
732       break;
733     case kX64Or:
734       ASSEMBLE_BINOP(orq);
735       break;
736     case kX64Xor32:
737       ASSEMBLE_BINOP(xorl);
738       break;
739     case kX64Xor:
740       ASSEMBLE_BINOP(xorq);
741       break;
742     case kX64Shl32:
743       ASSEMBLE_SHIFT(shll, 5);
744       break;
745     case kX64Shl:
746       ASSEMBLE_SHIFT(shlq, 6);
747       break;
748     case kX64Shr32:
749       ASSEMBLE_SHIFT(shrl, 5);
750       break;
751     case kX64Shr:
752       ASSEMBLE_SHIFT(shrq, 6);
753       break;
754     case kX64Sar32:
755       ASSEMBLE_SHIFT(sarl, 5);
756       break;
757     case kX64Sar:
758       ASSEMBLE_SHIFT(sarq, 6);
759       break;
760     case kX64Ror32:
761       ASSEMBLE_SHIFT(rorl, 5);
762       break;
763     case kX64Ror:
764       ASSEMBLE_SHIFT(rorq, 6);
765       break;
766     case kX64Lzcnt32:
767       if (instr->InputAt(0)->IsRegister()) {
768         __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
769       } else {
770         __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
771       }
772       break;
773     case kSSEFloat32Cmp:
774       ASSEMBLE_SSE_BINOP(ucomiss);
775       break;
776     case kSSEFloat32Add:
777       ASSEMBLE_SSE_BINOP(addss);
778       break;
779     case kSSEFloat32Sub:
780       ASSEMBLE_SSE_BINOP(subss);
781       break;
782     case kSSEFloat32Mul:
783       ASSEMBLE_SSE_BINOP(mulss);
784       break;
785     case kSSEFloat32Div:
786       ASSEMBLE_SSE_BINOP(divss);
787       // Don't delete this mov. It may improve performance on some CPUs,
788       // when there is a (v)mulss depending on the result.
789       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
790       break;
791     case kSSEFloat32Abs: {
792       // TODO(bmeurer): Use RIP relative 128-bit constants.
793       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
794       __ psrlq(kScratchDoubleReg, 33);
795       __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
796       break;
797     }
798     case kSSEFloat32Neg: {
799       // TODO(bmeurer): Use RIP relative 128-bit constants.
800       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
801       __ psllq(kScratchDoubleReg, 31);
802       __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
803       break;
804     }
805     case kSSEFloat32Sqrt:
806       ASSEMBLE_SSE_UNOP(sqrtss);
807       break;
808     case kSSEFloat32Max:
809       ASSEMBLE_SSE_BINOP(maxss);
810       break;
811     case kSSEFloat32Min:
812       ASSEMBLE_SSE_BINOP(minss);
813       break;
814     case kSSEFloat32ToFloat64:
815       ASSEMBLE_SSE_UNOP(cvtss2sd);
816       break;
817     case kSSEFloat64Cmp:
818       ASSEMBLE_SSE_BINOP(ucomisd);
819       break;
820     case kSSEFloat64Add:
821       ASSEMBLE_SSE_BINOP(addsd);
822       break;
823     case kSSEFloat64Sub:
824       ASSEMBLE_SSE_BINOP(subsd);
825       break;
826     case kSSEFloat64Mul:
827       ASSEMBLE_SSE_BINOP(mulsd);
828       break;
829     case kSSEFloat64Div:
830       ASSEMBLE_SSE_BINOP(divsd);
831       // Don't delete this mov. It may improve performance on some CPUs,
832       // when there is a (v)mulsd depending on the result.
833       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
834       break;
835     case kSSEFloat64Mod: {
836       __ subq(rsp, Immediate(kDoubleSize));
837       // Move values to st(0) and st(1).
838       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
839       __ fld_d(Operand(rsp, 0));
840       __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
841       __ fld_d(Operand(rsp, 0));
842       // Loop while fprem isn't done.
843       Label mod_loop;
844       __ bind(&mod_loop);
845       // This instructions traps on all kinds inputs, but we are assuming the
846       // floating point control word is set to ignore them all.
847       __ fprem();
848       // The following 2 instruction implicitly use rax.
849       __ fnstsw_ax();
850       if (CpuFeatures::IsSupported(SAHF)) {
851         CpuFeatureScope sahf_scope(masm(), SAHF);
852         __ sahf();
853       } else {
854         __ shrl(rax, Immediate(8));
855         __ andl(rax, Immediate(0xFF));
856         __ pushq(rax);
857         __ popfq();
858       }
859       __ j(parity_even, &mod_loop);
860       // Move output to stack and clean up.
861       __ fstp(1);
862       __ fstp_d(Operand(rsp, 0));
863       __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
864       __ addq(rsp, Immediate(kDoubleSize));
865       break;
866     }
867     case kSSEFloat64Max:
868       ASSEMBLE_SSE_BINOP(maxsd);
869       break;
870     case kSSEFloat64Min:
871       ASSEMBLE_SSE_BINOP(minsd);
872       break;
873     case kSSEFloat64Abs: {
874       // TODO(bmeurer): Use RIP relative 128-bit constants.
875       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
876       __ psrlq(kScratchDoubleReg, 1);
877       __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
878       break;
879     }
880     case kSSEFloat64Neg: {
881       // TODO(bmeurer): Use RIP relative 128-bit constants.
882       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
883       __ psllq(kScratchDoubleReg, 63);
884       __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
885       break;
886     }
887     case kSSEFloat64Sqrt:
888       ASSEMBLE_SSE_UNOP(sqrtsd);
889       break;
890     case kSSEFloat64Round: {
891       CpuFeatureScope sse_scope(masm(), SSE4_1);
892       RoundingMode const mode =
893           static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
894       __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
895       break;
896     }
897     case kSSEFloat64ToFloat32:
898       ASSEMBLE_SSE_UNOP(cvtsd2ss);
899       break;
900     case kSSEFloat64ToInt32:
901       if (instr->InputAt(0)->IsDoubleRegister()) {
902         __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
903       } else {
904         __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
905       }
906       break;
907     case kSSEFloat64ToUint32: {
908       if (instr->InputAt(0)->IsDoubleRegister()) {
909         __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
910       } else {
911         __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
912       }
913       __ AssertZeroExtended(i.OutputRegister());
914       break;
915     }
916     case kSSEInt32ToFloat64:
917       if (instr->InputAt(0)->IsRegister()) {
918         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
919       } else {
920         __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
921       }
922       break;
923     case kSSEUint32ToFloat64:
924       if (instr->InputAt(0)->IsRegister()) {
925         __ movl(kScratchRegister, i.InputRegister(0));
926       } else {
927         __ movl(kScratchRegister, i.InputOperand(0));
928       }
929       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
930       break;
931     case kSSEFloat64ExtractLowWord32:
932       if (instr->InputAt(0)->IsDoubleStackSlot()) {
933         __ movl(i.OutputRegister(), i.InputOperand(0));
934       } else {
935         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
936       }
937       break;
938     case kSSEFloat64ExtractHighWord32:
939       if (instr->InputAt(0)->IsDoubleStackSlot()) {
940         __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
941       } else {
942         __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
943       }
944       break;
945     case kSSEFloat64InsertLowWord32:
946       if (instr->InputAt(1)->IsRegister()) {
947         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
948       } else {
949         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
950       }
951       break;
952     case kSSEFloat64InsertHighWord32:
953       if (instr->InputAt(1)->IsRegister()) {
954         __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
955       } else {
956         __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
957       }
958       break;
959     case kSSEFloat64LoadLowWord32:
960       if (instr->InputAt(0)->IsRegister()) {
961         __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
962       } else {
963         __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
964       }
965       break;
966     case kAVXFloat32Cmp: {
967       CpuFeatureScope avx_scope(masm(), AVX);
968       if (instr->InputAt(1)->IsDoubleRegister()) {
969         __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
970       } else {
971         __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
972       }
973       break;
974     }
975     case kAVXFloat32Add:
976       ASSEMBLE_AVX_BINOP(vaddss);
977       break;
978     case kAVXFloat32Sub:
979       ASSEMBLE_AVX_BINOP(vsubss);
980       break;
981     case kAVXFloat32Mul:
982       ASSEMBLE_AVX_BINOP(vmulss);
983       break;
984     case kAVXFloat32Div:
985       ASSEMBLE_AVX_BINOP(vdivss);
986       // Don't delete this mov. It may improve performance on some CPUs,
987       // when there is a (v)mulss depending on the result.
988       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
989       break;
990     case kAVXFloat32Max:
991       ASSEMBLE_AVX_BINOP(vmaxss);
992       break;
993     case kAVXFloat32Min:
994       ASSEMBLE_AVX_BINOP(vminss);
995       break;
996     case kAVXFloat64Cmp: {
997       CpuFeatureScope avx_scope(masm(), AVX);
998       if (instr->InputAt(1)->IsDoubleRegister()) {
999         __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1000       } else {
1001         __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1002       }
1003       break;
1004     }
1005     case kAVXFloat64Add:
1006       ASSEMBLE_AVX_BINOP(vaddsd);
1007       break;
1008     case kAVXFloat64Sub:
1009       ASSEMBLE_AVX_BINOP(vsubsd);
1010       break;
1011     case kAVXFloat64Mul:
1012       ASSEMBLE_AVX_BINOP(vmulsd);
1013       break;
1014     case kAVXFloat64Div:
1015       ASSEMBLE_AVX_BINOP(vdivsd);
1016       // Don't delete this mov. It may improve performance on some CPUs,
1017       // when there is a (v)mulsd depending on the result.
1018       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1019       break;
1020     case kAVXFloat64Max:
1021       ASSEMBLE_AVX_BINOP(vmaxsd);
1022       break;
1023     case kAVXFloat64Min:
1024       ASSEMBLE_AVX_BINOP(vminsd);
1025       break;
1026     case kAVXFloat32Abs: {
1027       // TODO(bmeurer): Use RIP relative 128-bit constants.
1028       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1029       __ psrlq(kScratchDoubleReg, 33);
1030       CpuFeatureScope avx_scope(masm(), AVX);
1031       if (instr->InputAt(0)->IsDoubleRegister()) {
1032         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1033                   i.InputDoubleRegister(0));
1034       } else {
1035         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1036                   i.InputOperand(0));
1037       }
1038       break;
1039     }
1040     case kAVXFloat32Neg: {
1041       // TODO(bmeurer): Use RIP relative 128-bit constants.
1042       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1043       __ psllq(kScratchDoubleReg, 31);
1044       CpuFeatureScope avx_scope(masm(), AVX);
1045       if (instr->InputAt(0)->IsDoubleRegister()) {
1046         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1047                   i.InputDoubleRegister(0));
1048       } else {
1049         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1050                   i.InputOperand(0));
1051       }
1052       break;
1053     }
1054     case kAVXFloat64Abs: {
1055       // TODO(bmeurer): Use RIP relative 128-bit constants.
1056       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1057       __ psrlq(kScratchDoubleReg, 1);
1058       CpuFeatureScope avx_scope(masm(), AVX);
1059       if (instr->InputAt(0)->IsDoubleRegister()) {
1060         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1061                   i.InputDoubleRegister(0));
1062       } else {
1063         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1064                   i.InputOperand(0));
1065       }
1066       break;
1067     }
1068     case kAVXFloat64Neg: {
1069       // TODO(bmeurer): Use RIP relative 128-bit constants.
1070       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1071       __ psllq(kScratchDoubleReg, 63);
1072       CpuFeatureScope avx_scope(masm(), AVX);
1073       if (instr->InputAt(0)->IsDoubleRegister()) {
1074         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1075                   i.InputDoubleRegister(0));
1076       } else {
1077         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1078                   i.InputOperand(0));
1079       }
1080       break;
1081     }
1082     case kX64Movsxbl:
1083       ASSEMBLE_MOVX(movsxbl);
1084       __ AssertZeroExtended(i.OutputRegister());
1085       break;
1086     case kX64Movzxbl:
1087       ASSEMBLE_MOVX(movzxbl);
1088       __ AssertZeroExtended(i.OutputRegister());
1089       break;
1090     case kX64Movb: {
1091       size_t index = 0;
1092       Operand operand = i.MemoryOperand(&index);
1093       if (HasImmediateInput(instr, index)) {
1094         __ movb(operand, Immediate(i.InputInt8(index)));
1095       } else {
1096         __ movb(operand, i.InputRegister(index));
1097       }
1098       break;
1099     }
1100     case kX64Movsxwl:
1101       ASSEMBLE_MOVX(movsxwl);
1102       __ AssertZeroExtended(i.OutputRegister());
1103       break;
1104     case kX64Movzxwl:
1105       ASSEMBLE_MOVX(movzxwl);
1106       __ AssertZeroExtended(i.OutputRegister());
1107       break;
1108     case kX64Movw: {
1109       size_t index = 0;
1110       Operand operand = i.MemoryOperand(&index);
1111       if (HasImmediateInput(instr, index)) {
1112         __ movw(operand, Immediate(i.InputInt16(index)));
1113       } else {
1114         __ movw(operand, i.InputRegister(index));
1115       }
1116       break;
1117     }
1118     case kX64Movl:
1119       if (instr->HasOutput()) {
1120         if (instr->addressing_mode() == kMode_None) {
1121           if (instr->InputAt(0)->IsRegister()) {
1122             __ movl(i.OutputRegister(), i.InputRegister(0));
1123           } else {
1124             __ movl(i.OutputRegister(), i.InputOperand(0));
1125           }
1126         } else {
1127           __ movl(i.OutputRegister(), i.MemoryOperand());
1128         }
1129         __ AssertZeroExtended(i.OutputRegister());
1130       } else {
1131         size_t index = 0;
1132         Operand operand = i.MemoryOperand(&index);
1133         if (HasImmediateInput(instr, index)) {
1134           __ movl(operand, i.InputImmediate(index));
1135         } else {
1136           __ movl(operand, i.InputRegister(index));
1137         }
1138       }
1139       break;
1140     case kX64Movsxlq:
1141       ASSEMBLE_MOVX(movsxlq);
1142       break;
1143     case kX64Movq:
1144       if (instr->HasOutput()) {
1145         __ movq(i.OutputRegister(), i.MemoryOperand());
1146       } else {
1147         size_t index = 0;
1148         Operand operand = i.MemoryOperand(&index);
1149         if (HasImmediateInput(instr, index)) {
1150           __ movq(operand, i.InputImmediate(index));
1151         } else {
1152           __ movq(operand, i.InputRegister(index));
1153         }
1154       }
1155       break;
1156     case kX64Movss:
1157       if (instr->HasOutput()) {
1158         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1159       } else {
1160         size_t index = 0;
1161         Operand operand = i.MemoryOperand(&index);
1162         __ movss(operand, i.InputDoubleRegister(index));
1163       }
1164       break;
1165     case kX64Movsd:
1166       if (instr->HasOutput()) {
1167         __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1168       } else {
1169         size_t index = 0;
1170         Operand operand = i.MemoryOperand(&index);
1171         __ movsd(operand, i.InputDoubleRegister(index));
1172       }
1173       break;
1174     case kX64BitcastFI:
1175       if (instr->InputAt(0)->IsDoubleStackSlot()) {
1176         __ movl(i.OutputRegister(), i.InputOperand(0));
1177       } else {
1178         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
1179       }
1180       break;
1181     case kX64BitcastDL:
1182       if (instr->InputAt(0)->IsDoubleStackSlot()) {
1183         __ movq(i.OutputRegister(), i.InputOperand(0));
1184       } else {
1185         __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
1186       }
1187       break;
1188     case kX64BitcastIF:
1189       if (instr->InputAt(0)->IsRegister()) {
1190         __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
1191       } else {
1192         __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1193       }
1194       break;
1195     case kX64BitcastLD:
1196       if (instr->InputAt(0)->IsRegister()) {
1197         __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
1198       } else {
1199         __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
1200       }
1201       break;
1202     case kX64Lea32: {
1203       AddressingMode mode = AddressingModeField::decode(instr->opcode());
1204       // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1205       // and addressing mode just happens to work out. The "addl"/"subl" forms
1206       // in these cases are faster based on measurements.
1207       if (i.InputRegister(0).is(i.OutputRegister())) {
1208         if (mode == kMode_MRI) {
1209           int32_t constant_summand = i.InputInt32(1);
1210           if (constant_summand > 0) {
1211             __ addl(i.OutputRegister(), Immediate(constant_summand));
1212           } else if (constant_summand < 0) {
1213             __ subl(i.OutputRegister(), Immediate(-constant_summand));
1214           }
1215         } else if (mode == kMode_MR1) {
1216           if (i.InputRegister(1).is(i.OutputRegister())) {
1217             __ shll(i.OutputRegister(), Immediate(1));
1218           } else {
1219             __ leal(i.OutputRegister(), i.MemoryOperand());
1220           }
1221         } else if (mode == kMode_M2) {
1222           __ shll(i.OutputRegister(), Immediate(1));
1223         } else if (mode == kMode_M4) {
1224           __ shll(i.OutputRegister(), Immediate(2));
1225         } else if (mode == kMode_M8) {
1226           __ shll(i.OutputRegister(), Immediate(3));
1227         } else {
1228           __ leal(i.OutputRegister(), i.MemoryOperand());
1229         }
1230       } else {
1231         __ leal(i.OutputRegister(), i.MemoryOperand());
1232       }
1233       __ AssertZeroExtended(i.OutputRegister());
1234       break;
1235     }
1236     case kX64Lea:
1237       __ leaq(i.OutputRegister(), i.MemoryOperand());
1238       break;
1239     case kX64Dec32:
1240       __ decl(i.OutputRegister());
1241       break;
1242     case kX64Inc32:
1243       __ incl(i.OutputRegister());
1244       break;
1245     case kX64Push:
1246       if (HasImmediateInput(instr, 0)) {
1247         __ pushq(i.InputImmediate(0));
1248       } else {
1249         if (instr->InputAt(0)->IsRegister()) {
1250           __ pushq(i.InputRegister(0));
1251         } else if (instr->InputAt(0)->IsDoubleRegister()) {
1252           // TODO(titzer): use another machine instruction?
1253           __ subq(rsp, Immediate(kDoubleSize));
1254           __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
1255         } else {
1256           __ pushq(i.InputOperand(0));
1257         }
1258       }
1259       break;
1260     case kX64Poke: {
1261       int const slot = MiscField::decode(instr->opcode());
1262       if (HasImmediateInput(instr, 0)) {
1263         __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1264       } else {
1265         __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1266       }
1267       break;
1268     }
1269     case kX64StoreWriteBarrier: {
1270       Register object = i.InputRegister(0);
1271       Register value = i.InputRegister(2);
1272       SaveFPRegsMode mode =
1273           frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1274       if (HasImmediateInput(instr, 1)) {
1275         int index = i.InputInt32(1);
1276         Register scratch = i.TempRegister(1);
1277         __ movq(Operand(object, index), value);
1278         __ RecordWriteContextSlot(object, index, value, scratch, mode);
1279       } else {
1280         Register index = i.InputRegister(1);
1281         __ movq(Operand(object, index, times_1, 0), value);
1282         __ leaq(index, Operand(object, index, times_1, 0));
1283         __ RecordWrite(object, index, value, mode);
1284       }
1285       break;
1286     }
1287     case kCheckedLoadInt8:
1288       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1289       break;
1290     case kCheckedLoadUint8:
1291       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1292       break;
1293     case kCheckedLoadInt16:
1294       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1295       break;
1296     case kCheckedLoadUint16:
1297       ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1298       break;
1299     case kCheckedLoadWord32:
1300       ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1301       break;
1302     case kCheckedLoadWord64:
1303       ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
1304       break;
1305     case kCheckedLoadFloat32:
1306       ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1307       break;
1308     case kCheckedLoadFloat64:
1309       ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1310       break;
1311     case kCheckedStoreWord8:
1312       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1313       break;
1314     case kCheckedStoreWord16:
1315       ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1316       break;
1317     case kCheckedStoreWord32:
1318       ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1319       break;
1320     case kCheckedStoreWord64:
1321       ASSEMBLE_CHECKED_STORE_INTEGER(movq);
1322       break;
1323     case kCheckedStoreFloat32:
1324       ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1325       break;
1326     case kCheckedStoreFloat64:
1327       ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1328       break;
1329     case kX64StackCheck:
1330       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1331       break;
1332   }
1333 }  // NOLINT(readability/fn_size)
1334
1335
1336 // Assembles branches after this instruction.
1337 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1338   X64OperandConverter i(this, instr);
1339   Label::Distance flabel_distance =
1340       branch->fallthru ? Label::kNear : Label::kFar;
1341   Label* tlabel = branch->true_label;
1342   Label* flabel = branch->false_label;
1343   switch (branch->condition) {
1344     case kUnorderedEqual:
1345       __ j(parity_even, flabel, flabel_distance);
1346     // Fall through.
1347     case kEqual:
1348       __ j(equal, tlabel);
1349       break;
1350     case kUnorderedNotEqual:
1351       __ j(parity_even, tlabel);
1352     // Fall through.
1353     case kNotEqual:
1354       __ j(not_equal, tlabel);
1355       break;
1356     case kSignedLessThan:
1357       __ j(less, tlabel);
1358       break;
1359     case kSignedGreaterThanOrEqual:
1360       __ j(greater_equal, tlabel);
1361       break;
1362     case kSignedLessThanOrEqual:
1363       __ j(less_equal, tlabel);
1364       break;
1365     case kSignedGreaterThan:
1366       __ j(greater, tlabel);
1367       break;
1368     case kUnsignedLessThan:
1369       __ j(below, tlabel);
1370       break;
1371     case kUnsignedGreaterThanOrEqual:
1372       __ j(above_equal, tlabel);
1373       break;
1374     case kUnsignedLessThanOrEqual:
1375       __ j(below_equal, tlabel);
1376       break;
1377     case kUnsignedGreaterThan:
1378       __ j(above, tlabel);
1379       break;
1380     case kOverflow:
1381       __ j(overflow, tlabel);
1382       break;
1383     case kNotOverflow:
1384       __ j(no_overflow, tlabel);
1385       break;
1386     default:
1387       UNREACHABLE();
1388       break;
1389   }
1390   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1391 }
1392
1393
1394 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1395   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1396 }
1397
1398
1399 // Assembles boolean materializations after this instruction.
1400 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1401                                         FlagsCondition condition) {
1402   X64OperandConverter i(this, instr);
1403   Label done;
1404
1405   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1406   // last output of the instruction.
1407   Label check;
1408   DCHECK_NE(0u, instr->OutputCount());
1409   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1410   Condition cc = no_condition;
1411   switch (condition) {
1412     case kUnorderedEqual:
1413       __ j(parity_odd, &check, Label::kNear);
1414       __ movl(reg, Immediate(0));
1415       __ jmp(&done, Label::kNear);
1416     // Fall through.
1417     case kEqual:
1418       cc = equal;
1419       break;
1420     case kUnorderedNotEqual:
1421       __ j(parity_odd, &check, Label::kNear);
1422       __ movl(reg, Immediate(1));
1423       __ jmp(&done, Label::kNear);
1424     // Fall through.
1425     case kNotEqual:
1426       cc = not_equal;
1427       break;
1428     case kSignedLessThan:
1429       cc = less;
1430       break;
1431     case kSignedGreaterThanOrEqual:
1432       cc = greater_equal;
1433       break;
1434     case kSignedLessThanOrEqual:
1435       cc = less_equal;
1436       break;
1437     case kSignedGreaterThan:
1438       cc = greater;
1439       break;
1440     case kUnsignedLessThan:
1441       cc = below;
1442       break;
1443     case kUnsignedGreaterThanOrEqual:
1444       cc = above_equal;
1445       break;
1446     case kUnsignedLessThanOrEqual:
1447       cc = below_equal;
1448       break;
1449     case kUnsignedGreaterThan:
1450       cc = above;
1451       break;
1452     case kOverflow:
1453       cc = overflow;
1454       break;
1455     case kNotOverflow:
1456       cc = no_overflow;
1457       break;
1458     default:
1459       UNREACHABLE();
1460       break;
1461   }
1462   __ bind(&check);
1463   __ setcc(cc, reg);
1464   __ movzxbl(reg, reg);
1465   __ bind(&done);
1466 }
1467
1468
1469 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1470   X64OperandConverter i(this, instr);
1471   Register input = i.InputRegister(0);
1472   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1473     __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1474     __ j(equal, GetLabel(i.InputRpo(index + 1)));
1475   }
1476   AssembleArchJump(i.InputRpo(1));
1477 }
1478
1479
1480 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1481   X64OperandConverter i(this, instr);
1482   Register input = i.InputRegister(0);
1483   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1484   Label** cases = zone()->NewArray<Label*>(case_count);
1485   for (int32_t index = 0; index < case_count; ++index) {
1486     cases[index] = GetLabel(i.InputRpo(index + 2));
1487   }
1488   Label* const table = AddJumpTable(cases, case_count);
1489   __ cmpl(input, Immediate(case_count));
1490   __ j(above_equal, GetLabel(i.InputRpo(1)));
1491   __ leaq(kScratchRegister, Operand(table));
1492   __ jmp(Operand(kScratchRegister, input, times_8, 0));
1493 }
1494
1495
1496 void CodeGenerator::AssembleDeoptimizerCall(
1497     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1498   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1499       isolate(), deoptimization_id, bailout_type);
1500   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1501 }
1502
1503
1504 namespace {
1505
1506 static const int kQuadWordSize = 16;
1507
1508 }  // namespace
1509
1510
1511 void CodeGenerator::AssemblePrologue() {
1512   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1513   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1514     __ pushq(rbp);
1515     __ movq(rbp, rsp);
1516   } else if (descriptor->IsJSFunctionCall()) {
1517     CompilationInfo* info = this->info();
1518     __ Prologue(info->IsCodePreAgingActive());
1519   } else if (needs_frame_) {
1520     __ StubPrologue();
1521   } else {
1522     frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
1523   }
1524
1525   int stack_shrink_slots = frame()->GetSpillSlotCount();
1526   if (info()->is_osr()) {
1527     // TurboFan OSR-compiled functions cannot be entered directly.
1528     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1529
1530     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1531     // frame is still on the stack. Optimized code uses OSR values directly from
1532     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1533     // remaining stack slots.
1534     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1535     osr_pc_offset_ = __ pc_offset();
1536     // TODO(titzer): cannot address target function == local #-1
1537     __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1538     stack_shrink_slots -=
1539         static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1540   }
1541
1542   const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1543   if (saves_fp != 0) {
1544     stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1545   }
1546   if (stack_shrink_slots > 0) {
1547     __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
1548   }
1549
1550   if (saves_fp != 0) {  // Save callee-saved XMM registers.
1551     const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1552     const int stack_size = saves_fp_count * kQuadWordSize;
1553     // Adjust the stack pointer.
1554     __ subp(rsp, Immediate(stack_size));
1555     // Store the registers on the stack.
1556     int slot_idx = 0;
1557     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1558       if (!((1 << i) & saves_fp)) continue;
1559       __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
1560                 XMMRegister::from_code(i));
1561       slot_idx++;
1562     }
1563     frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
1564                                               (kQuadWordSize / kPointerSize));
1565   }
1566
1567   const RegList saves = descriptor->CalleeSavedRegisters();
1568   if (saves != 0) {  // Save callee-saved registers.
1569     for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1570       if (!((1 << i) & saves)) continue;
1571       __ pushq(Register::from_code(i));
1572       frame()->AllocateSavedCalleeRegisterSlots(1);
1573     }
1574   }
1575 }
1576
1577
1578 void CodeGenerator::AssembleReturn() {
1579   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1580
1581   // Restore registers.
1582   const RegList saves = descriptor->CalleeSavedRegisters();
1583   if (saves != 0) {
1584     for (int i = 0; i < Register::kNumRegisters; i++) {
1585       if (!((1 << i) & saves)) continue;
1586       __ popq(Register::from_code(i));
1587     }
1588   }
1589   const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1590   if (saves_fp != 0) {
1591     const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1592     const int stack_size = saves_fp_count * kQuadWordSize;
1593     // Load the registers from the stack.
1594     int slot_idx = 0;
1595     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1596       if (!((1 << i) & saves_fp)) continue;
1597       __ movdqu(XMMRegister::from_code(i),
1598                 Operand(rsp, kQuadWordSize * slot_idx));
1599       slot_idx++;
1600     }
1601     // Adjust the stack pointer.
1602     __ addp(rsp, Immediate(stack_size));
1603   }
1604
1605   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1606     __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1607     __ popq(rbp);       // Pop caller's frame pointer.
1608   } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
1609     // Canonicalize JSFunction return sites for now.
1610     if (return_label_.is_bound()) {
1611       __ jmp(&return_label_);
1612       return;
1613     } else {
1614       __ bind(&return_label_);
1615       __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
1616       __ popq(rbp);       // Pop caller's frame pointer.
1617     }
1618   }
1619   size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
1620   // Might need rcx for scratch if pop_size is too big.
1621   DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit());
1622   __ Ret(static_cast<int>(pop_size), rcx);
1623 }
1624
1625
1626 void CodeGenerator::AssembleMove(InstructionOperand* source,
1627                                  InstructionOperand* destination) {
1628   X64OperandConverter g(this, NULL);
1629   // Dispatch on the source and destination operand kinds.  Not all
1630   // combinations are possible.
1631   if (source->IsRegister()) {
1632     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1633     Register src = g.ToRegister(source);
1634     if (destination->IsRegister()) {
1635       __ movq(g.ToRegister(destination), src);
1636     } else {
1637       __ movq(g.ToOperand(destination), src);
1638     }
1639   } else if (source->IsStackSlot()) {
1640     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1641     Operand src = g.ToOperand(source);
1642     if (destination->IsRegister()) {
1643       Register dst = g.ToRegister(destination);
1644       __ movq(dst, src);
1645     } else {
1646       // Spill on demand to use a temporary register for memory-to-memory
1647       // moves.
1648       Register tmp = kScratchRegister;
1649       Operand dst = g.ToOperand(destination);
1650       __ movq(tmp, src);
1651       __ movq(dst, tmp);
1652     }
1653   } else if (source->IsConstant()) {
1654     ConstantOperand* constant_source = ConstantOperand::cast(source);
1655     Constant src = g.ToConstant(constant_source);
1656     if (destination->IsRegister() || destination->IsStackSlot()) {
1657       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1658                                                : kScratchRegister;
1659       switch (src.type()) {
1660         case Constant::kInt32:
1661           // TODO(dcarney): don't need scratch in this case.
1662           __ Set(dst, src.ToInt32());
1663           break;
1664         case Constant::kInt64:
1665           __ Set(dst, src.ToInt64());
1666           break;
1667         case Constant::kFloat32:
1668           __ Move(dst,
1669                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1670           break;
1671         case Constant::kFloat64:
1672           __ Move(dst,
1673                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1674           break;
1675         case Constant::kExternalReference:
1676           __ Move(dst, src.ToExternalReference());
1677           break;
1678         case Constant::kHeapObject: {
1679           Handle<HeapObject> src_object = src.ToHeapObject();
1680           Heap::RootListIndex index;
1681           int offset;
1682           if (IsMaterializableFromFrame(src_object, &offset)) {
1683             __ movp(dst, Operand(rbp, offset));
1684           } else if (IsMaterializableFromRoot(src_object, &index)) {
1685             __ LoadRoot(dst, index);
1686           } else {
1687             __ Move(dst, src_object);
1688           }
1689           break;
1690         }
1691         case Constant::kRpoNumber:
1692           UNREACHABLE();  // TODO(dcarney): load of labels on x64.
1693           break;
1694       }
1695       if (destination->IsStackSlot()) {
1696         __ movq(g.ToOperand(destination), kScratchRegister);
1697       }
1698     } else if (src.type() == Constant::kFloat32) {
1699       // TODO(turbofan): Can we do better here?
1700       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1701       if (destination->IsDoubleRegister()) {
1702         __ Move(g.ToDoubleRegister(destination), src_const);
1703       } else {
1704         DCHECK(destination->IsDoubleStackSlot());
1705         Operand dst = g.ToOperand(destination);
1706         __ movl(dst, Immediate(src_const));
1707       }
1708     } else {
1709       DCHECK_EQ(Constant::kFloat64, src.type());
1710       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1711       if (destination->IsDoubleRegister()) {
1712         __ Move(g.ToDoubleRegister(destination), src_const);
1713       } else {
1714         DCHECK(destination->IsDoubleStackSlot());
1715         __ movq(kScratchRegister, src_const);
1716         __ movq(g.ToOperand(destination), kScratchRegister);
1717       }
1718     }
1719   } else if (source->IsDoubleRegister()) {
1720     XMMRegister src = g.ToDoubleRegister(source);
1721     if (destination->IsDoubleRegister()) {
1722       XMMRegister dst = g.ToDoubleRegister(destination);
1723       __ movaps(dst, src);
1724     } else {
1725       DCHECK(destination->IsDoubleStackSlot());
1726       Operand dst = g.ToOperand(destination);
1727       __ movsd(dst, src);
1728     }
1729   } else if (source->IsDoubleStackSlot()) {
1730     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1731     Operand src = g.ToOperand(source);
1732     if (destination->IsDoubleRegister()) {
1733       XMMRegister dst = g.ToDoubleRegister(destination);
1734       __ movsd(dst, src);
1735     } else {
1736       // We rely on having xmm0 available as a fixed scratch register.
1737       Operand dst = g.ToOperand(destination);
1738       __ movsd(xmm0, src);
1739       __ movsd(dst, xmm0);
1740     }
1741   } else {
1742     UNREACHABLE();
1743   }
1744 }
1745
1746
1747 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1748                                  InstructionOperand* destination) {
1749   X64OperandConverter g(this, NULL);
1750   // Dispatch on the source and destination operand kinds.  Not all
1751   // combinations are possible.
1752   if (source->IsRegister() && destination->IsRegister()) {
1753     // Register-register.
1754     __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1755   } else if (source->IsRegister() && destination->IsStackSlot()) {
1756     Register src = g.ToRegister(source);
1757     Operand dst = g.ToOperand(destination);
1758     __ xchgq(src, dst);
1759   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1760              (source->IsDoubleStackSlot() &&
1761               destination->IsDoubleStackSlot())) {
1762     // Memory-memory.
1763     Register tmp = kScratchRegister;
1764     Operand src = g.ToOperand(source);
1765     Operand dst = g.ToOperand(destination);
1766     __ movq(tmp, dst);
1767     __ xchgq(tmp, src);
1768     __ movq(dst, tmp);
1769   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1770     // XMM register-register swap. We rely on having xmm0
1771     // available as a fixed scratch register.
1772     XMMRegister src = g.ToDoubleRegister(source);
1773     XMMRegister dst = g.ToDoubleRegister(destination);
1774     __ movaps(xmm0, src);
1775     __ movaps(src, dst);
1776     __ movaps(dst, xmm0);
1777   } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1778     // XMM register-memory swap.  We rely on having xmm0
1779     // available as a fixed scratch register.
1780     XMMRegister src = g.ToDoubleRegister(source);
1781     Operand dst = g.ToOperand(destination);
1782     __ movsd(xmm0, src);
1783     __ movsd(src, dst);
1784     __ movsd(dst, xmm0);
1785   } else {
1786     // No other combinations are possible.
1787     UNREACHABLE();
1788   }
1789 }
1790
1791
1792 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1793   for (size_t index = 0; index < target_count; ++index) {
1794     __ dq(targets[index]);
1795   }
1796 }
1797
1798
1799 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1800
1801
1802 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1803   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1804     return;
1805   }
1806
1807   int space_needed = Deoptimizer::patch_size();
1808   // Ensure that we have enough space after the previous lazy-bailout
1809   // instruction for patching the code here.
1810   int current_pc = masm()->pc_offset();
1811   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1812     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1813     __ Nop(padding_size);
1814   }
1815 }
1816
1817 #undef __
1818
1819 }  // namespace compiler
1820 }  // namespace internal
1821 }  // namespace v8