1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
21 #define kScratchDoubleReg xmm0
24 // Adds X64 specific methods for decoding operands.
25 class X64OperandConverter : public InstructionOperandConverter {
27 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
30 Immediate InputImmediate(size_t index) {
31 return ToImmediate(instr_->InputAt(index));
34 Operand InputOperand(size_t index, int extra = 0) {
35 return ToOperand(instr_->InputAt(index), extra);
38 Operand OutputOperand() { return ToOperand(instr_->Output()); }
40 Immediate ToImmediate(InstructionOperand* operand) {
41 return Immediate(ToConstant(operand).ToInt32());
44 Operand ToOperand(InstructionOperand* op, int extra = 0) {
45 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
46 // The linkage computes where all spill slots are located.
47 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
48 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
51 static size_t NextOffset(size_t* offset) {
57 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
58 STATIC_ASSERT(0 == static_cast<int>(times_1));
59 STATIC_ASSERT(1 == static_cast<int>(times_2));
60 STATIC_ASSERT(2 == static_cast<int>(times_4));
61 STATIC_ASSERT(3 == static_cast<int>(times_8));
62 int scale = static_cast<int>(mode - one);
63 DCHECK(scale >= 0 && scale < 4);
64 return static_cast<ScaleFactor>(scale);
67 Operand MemoryOperand(size_t* offset) {
68 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
71 Register base = InputRegister(NextOffset(offset));
73 return Operand(base, disp);
76 Register base = InputRegister(NextOffset(offset));
77 int32_t disp = InputInt32(NextOffset(offset));
78 return Operand(base, disp);
84 Register base = InputRegister(NextOffset(offset));
85 Register index = InputRegister(NextOffset(offset));
86 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
88 return Operand(base, index, scale, disp);
94 Register base = InputRegister(NextOffset(offset));
95 Register index = InputRegister(NextOffset(offset));
96 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
97 int32_t disp = InputInt32(NextOffset(offset));
98 return Operand(base, index, scale, disp);
101 Register base = InputRegister(NextOffset(offset));
103 return Operand(base, disp);
106 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
107 return Operand(no_reg, 0);
110 Register index = InputRegister(NextOffset(offset));
111 ScaleFactor scale = ScaleFor(kMode_M1, mode);
113 return Operand(index, scale, disp);
119 Register index = InputRegister(NextOffset(offset));
120 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
121 int32_t disp = InputInt32(NextOffset(offset));
122 return Operand(index, scale, disp);
126 return Operand(no_reg, 0);
129 return Operand(no_reg, 0);
132 Operand MemoryOperand(size_t first_input = 0) {
133 return MemoryOperand(&first_input);
140 bool HasImmediateInput(Instruction* instr, size_t index) {
141 return instr->InputAt(index)->IsImmediate();
145 class OutOfLineLoadZero FINAL : public OutOfLineCode {
147 OutOfLineLoadZero(CodeGenerator* gen, Register result)
148 : OutOfLineCode(gen), result_(result) {}
150 void Generate() FINAL { __ xorl(result_, result_); }
153 Register const result_;
157 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
159 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
160 : OutOfLineCode(gen), result_(result) {}
162 void Generate() FINAL { __ pcmpeqd(result_, result_); }
165 XMMRegister const result_;
169 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
171 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
173 : OutOfLineCode(gen), result_(result), input_(input) {}
175 void Generate() FINAL {
176 __ subp(rsp, Immediate(kDoubleSize));
177 __ movsd(MemOperand(rsp, 0), input_);
178 __ SlowTruncateToI(result_, rsp, 0);
179 __ addp(rsp, Immediate(kDoubleSize));
183 Register const result_;
184 XMMRegister const input_;
190 #define ASSEMBLE_UNOP(asm_instr) \
192 if (instr->Output()->IsRegister()) { \
193 __ asm_instr(i.OutputRegister()); \
195 __ asm_instr(i.OutputOperand()); \
200 #define ASSEMBLE_BINOP(asm_instr) \
202 if (HasImmediateInput(instr, 1)) { \
203 if (instr->InputAt(0)->IsRegister()) { \
204 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
206 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
209 if (instr->InputAt(1)->IsRegister()) { \
210 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
212 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
218 #define ASSEMBLE_MULT(asm_instr) \
220 if (HasImmediateInput(instr, 1)) { \
221 if (instr->InputAt(0)->IsRegister()) { \
222 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
223 i.InputImmediate(1)); \
225 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
226 i.InputImmediate(1)); \
229 if (instr->InputAt(1)->IsRegister()) { \
230 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
232 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
238 #define ASSEMBLE_SHIFT(asm_instr, width) \
240 if (HasImmediateInput(instr, 1)) { \
241 if (instr->Output()->IsRegister()) { \
242 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
244 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
247 if (instr->Output()->IsRegister()) { \
248 __ asm_instr##_cl(i.OutputRegister()); \
250 __ asm_instr##_cl(i.OutputOperand()); \
256 #define ASSEMBLE_MOVX(asm_instr) \
258 if (instr->addressing_mode() != kMode_None) { \
259 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
260 } else if (instr->InputAt(0)->IsRegister()) { \
261 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
263 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
268 #define ASSEMBLE_SSE_BINOP(asm_instr) \
270 if (instr->InputAt(1)->IsDoubleRegister()) { \
271 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
273 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
278 #define ASSEMBLE_SSE_UNOP(asm_instr) \
280 if (instr->InputAt(0)->IsDoubleRegister()) { \
281 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
283 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
288 #define ASSEMBLE_AVX_BINOP(asm_instr) \
290 CpuFeatureScope avx_scope(masm(), AVX); \
291 if (instr->InputAt(1)->IsDoubleRegister()) { \
292 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
293 i.InputDoubleRegister(1)); \
295 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
296 i.InputOperand(1)); \
301 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
303 auto result = i.OutputDoubleRegister(); \
304 auto buffer = i.InputRegister(0); \
305 auto index1 = i.InputRegister(1); \
306 auto index2 = i.InputInt32(2); \
307 OutOfLineCode* ool; \
308 if (instr->InputAt(3)->IsRegister()) { \
309 auto length = i.InputRegister(3); \
310 DCHECK_EQ(0, index2); \
311 __ cmpl(index1, length); \
312 ool = new (zone()) OutOfLineLoadNaN(this, result); \
314 auto length = i.InputInt32(3); \
315 DCHECK_LE(index2, length); \
316 __ cmpq(index1, Immediate(length - index2)); \
317 class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
319 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
320 Register buffer, Register index1, int32_t index2, \
322 : OutOfLineCode(gen), \
329 void Generate() FINAL { \
330 __ leal(kScratchRegister, Operand(index1_, index2_)); \
331 __ pcmpeqd(result_, result_); \
332 __ cmpl(kScratchRegister, Immediate(length_)); \
333 __ j(above_equal, exit()); \
334 __ asm_instr(result_, \
335 Operand(buffer_, kScratchRegister, times_1, 0)); \
339 XMMRegister const result_; \
340 Register const buffer_; \
341 Register const index1_; \
342 int32_t const index2_; \
343 int32_t const length_; \
346 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
348 __ j(above_equal, ool->entry()); \
349 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
350 __ bind(ool->exit()); \
354 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
356 auto result = i.OutputRegister(); \
357 auto buffer = i.InputRegister(0); \
358 auto index1 = i.InputRegister(1); \
359 auto index2 = i.InputInt32(2); \
360 OutOfLineCode* ool; \
361 if (instr->InputAt(3)->IsRegister()) { \
362 auto length = i.InputRegister(3); \
363 DCHECK_EQ(0, index2); \
364 __ cmpl(index1, length); \
365 ool = new (zone()) OutOfLineLoadZero(this, result); \
367 auto length = i.InputInt32(3); \
368 DCHECK_LE(index2, length); \
369 __ cmpq(index1, Immediate(length - index2)); \
370 class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
372 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
373 Register buffer, Register index1, int32_t index2, \
375 : OutOfLineCode(gen), \
382 void Generate() FINAL { \
384 __ leal(kScratchRegister, Operand(index1_, index2_)); \
385 __ cmpl(kScratchRegister, Immediate(length_)); \
386 __ j(above_equal, &oob, Label::kNear); \
387 __ asm_instr(result_, \
388 Operand(buffer_, kScratchRegister, times_1, 0)); \
391 __ xorl(result_, result_); \
395 Register const result_; \
396 Register const buffer_; \
397 Register const index1_; \
398 int32_t const index2_; \
399 int32_t const length_; \
402 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
404 __ j(above_equal, ool->entry()); \
405 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
406 __ bind(ool->exit()); \
410 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
412 auto buffer = i.InputRegister(0); \
413 auto index1 = i.InputRegister(1); \
414 auto index2 = i.InputInt32(2); \
415 auto value = i.InputDoubleRegister(4); \
416 if (instr->InputAt(3)->IsRegister()) { \
417 auto length = i.InputRegister(3); \
418 DCHECK_EQ(0, index2); \
420 __ cmpl(index1, length); \
421 __ j(above_equal, &done, Label::kNear); \
422 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
425 auto length = i.InputInt32(3); \
426 DCHECK_LE(index2, length); \
427 __ cmpq(index1, Immediate(length - index2)); \
428 class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
430 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
431 Register index1, int32_t index2, int32_t length, \
433 : OutOfLineCode(gen), \
440 void Generate() FINAL { \
441 __ leal(kScratchRegister, Operand(index1_, index2_)); \
442 __ cmpl(kScratchRegister, Immediate(length_)); \
443 __ j(above_equal, exit()); \
444 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
449 Register const buffer_; \
450 Register const index1_; \
451 int32_t const index2_; \
452 int32_t const length_; \
453 XMMRegister const value_; \
455 auto ool = new (zone()) \
456 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
457 __ j(above_equal, ool->entry()); \
458 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
459 __ bind(ool->exit()); \
464 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
466 auto buffer = i.InputRegister(0); \
467 auto index1 = i.InputRegister(1); \
468 auto index2 = i.InputInt32(2); \
469 if (instr->InputAt(3)->IsRegister()) { \
470 auto length = i.InputRegister(3); \
471 DCHECK_EQ(0, index2); \
473 __ cmpl(index1, length); \
474 __ j(above_equal, &done, Label::kNear); \
475 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
478 auto length = i.InputInt32(3); \
479 DCHECK_LE(index2, length); \
480 __ cmpq(index1, Immediate(length - index2)); \
481 class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
483 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
484 Register index1, int32_t index2, int32_t length, \
486 : OutOfLineCode(gen), \
493 void Generate() FINAL { \
494 __ leal(kScratchRegister, Operand(index1_, index2_)); \
495 __ cmpl(kScratchRegister, Immediate(length_)); \
496 __ j(above_equal, exit()); \
497 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
502 Register const buffer_; \
503 Register const index1_; \
504 int32_t const index2_; \
505 int32_t const length_; \
506 Value const value_; \
508 auto ool = new (zone()) \
509 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
510 __ j(above_equal, ool->entry()); \
511 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
512 __ bind(ool->exit()); \
517 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
519 if (instr->InputAt(4)->IsRegister()) { \
520 Register value = i.InputRegister(4); \
521 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
523 Immediate value = i.InputImmediate(4); \
524 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
529 // Assembles an instruction after register allocation, producing machine code.
530 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
531 X64OperandConverter i(this, instr);
533 switch (ArchOpcodeField::decode(instr->opcode())) {
534 case kArchCallCodeObject: {
535 EnsureSpaceForLazyDeopt();
536 if (HasImmediateInput(instr, 0)) {
537 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
538 __ Call(code, RelocInfo::CODE_TARGET);
540 Register reg = i.InputRegister(0);
541 int entry = Code::kHeaderSize - kHeapObjectTag;
542 __ Call(Operand(reg, entry));
544 RecordCallPosition(instr);
547 case kArchCallJSFunction: {
548 EnsureSpaceForLazyDeopt();
549 Register func = i.InputRegister(0);
550 if (FLAG_debug_code) {
551 // Check the function's context matches the context argument.
552 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
553 __ Assert(equal, kWrongFunctionContext);
555 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
556 RecordCallPosition(instr);
560 AssembleArchJump(i.InputRpo(0));
562 case kArchLookupSwitch:
563 AssembleArchLookupSwitch(instr);
565 case kArchTableSwitch:
566 AssembleArchTableSwitch(instr);
569 // don't emit code for nops.
571 case kArchDeoptimize: {
573 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
574 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
580 case kArchStackPointer:
581 __ movq(i.OutputRegister(), rsp);
583 case kArchTruncateDoubleToI: {
584 auto result = i.OutputRegister();
585 auto input = i.InputDoubleRegister(0);
586 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
587 __ cvttsd2siq(result, input);
588 __ cmpq(result, Immediate(1));
589 __ j(overflow, ool->entry());
590 __ bind(ool->exit());
594 ASSEMBLE_BINOP(addl);
597 ASSEMBLE_BINOP(addq);
600 ASSEMBLE_BINOP(subl);
603 ASSEMBLE_BINOP(subq);
606 ASSEMBLE_BINOP(andl);
609 ASSEMBLE_BINOP(andq);
612 ASSEMBLE_BINOP(cmpl);
615 ASSEMBLE_BINOP(cmpq);
618 ASSEMBLE_BINOP(testl);
621 ASSEMBLE_BINOP(testq);
624 ASSEMBLE_MULT(imull);
627 ASSEMBLE_MULT(imulq);
630 if (instr->InputAt(1)->IsRegister()) {
631 __ imull(i.InputRegister(1));
633 __ imull(i.InputOperand(1));
637 if (instr->InputAt(1)->IsRegister()) {
638 __ mull(i.InputRegister(1));
640 __ mull(i.InputOperand(1));
645 __ idivl(i.InputRegister(1));
649 __ idivq(i.InputRegister(1));
653 __ divl(i.InputRegister(1));
657 __ divq(i.InputRegister(1));
678 ASSEMBLE_BINOP(xorl);
681 ASSEMBLE_BINOP(xorq);
684 ASSEMBLE_SHIFT(shll, 5);
687 ASSEMBLE_SHIFT(shlq, 6);
690 ASSEMBLE_SHIFT(shrl, 5);
693 ASSEMBLE_SHIFT(shrq, 6);
696 ASSEMBLE_SHIFT(sarl, 5);
699 ASSEMBLE_SHIFT(sarq, 6);
702 ASSEMBLE_SHIFT(rorl, 5);
705 ASSEMBLE_SHIFT(rorq, 6);
708 if (instr->InputAt(0)->IsRegister()) {
709 __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
711 __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
715 ASSEMBLE_SSE_BINOP(ucomiss);
718 ASSEMBLE_SSE_BINOP(addss);
721 ASSEMBLE_SSE_BINOP(subss);
724 ASSEMBLE_SSE_BINOP(mulss);
727 ASSEMBLE_SSE_BINOP(divss);
729 case kSSEFloat32Abs: {
730 // TODO(bmeurer): Use RIP relative 128-bit constants.
731 // TODO(turbofan): Add AVX version with relaxed register constraints.
732 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
733 __ psrlq(kScratchDoubleReg, 33);
734 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
737 case kSSEFloat32Neg: {
738 // TODO(bmeurer): Use RIP relative 128-bit constants.
739 // TODO(turbofan): Add AVX version with relaxed register constraints.
740 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
741 __ psllq(kScratchDoubleReg, 31);
742 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
745 case kSSEFloat32Sqrt:
746 ASSEMBLE_SSE_UNOP(sqrtss);
749 ASSEMBLE_SSE_BINOP(maxss);
752 ASSEMBLE_SSE_BINOP(minss);
754 case kSSEFloat32ToFloat64:
755 ASSEMBLE_SSE_UNOP(cvtss2sd);
758 ASSEMBLE_SSE_BINOP(ucomisd);
761 ASSEMBLE_SSE_BINOP(addsd);
764 ASSEMBLE_SSE_BINOP(subsd);
767 ASSEMBLE_SSE_BINOP(mulsd);
770 ASSEMBLE_SSE_BINOP(divsd);
772 case kSSEFloat64Mod: {
773 __ subq(rsp, Immediate(kDoubleSize));
774 // Move values to st(0) and st(1).
775 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
776 __ fld_d(Operand(rsp, 0));
777 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
778 __ fld_d(Operand(rsp, 0));
779 // Loop while fprem isn't done.
782 // This instructions traps on all kinds inputs, but we are assuming the
783 // floating point control word is set to ignore them all.
785 // The following 2 instruction implicitly use rax.
787 if (CpuFeatures::IsSupported(SAHF)) {
788 CpuFeatureScope sahf_scope(masm(), SAHF);
791 __ shrl(rax, Immediate(8));
792 __ andl(rax, Immediate(0xFF));
796 __ j(parity_even, &mod_loop);
797 // Move output to stack and clean up.
799 __ fstp_d(Operand(rsp, 0));
800 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
801 __ addq(rsp, Immediate(kDoubleSize));
805 ASSEMBLE_SSE_BINOP(maxsd);
808 ASSEMBLE_SSE_BINOP(minsd);
810 case kSSEFloat64Abs: {
811 // TODO(bmeurer): Use RIP relative 128-bit constants.
812 // TODO(turbofan): Add AVX version with relaxed register constraints.
813 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
814 __ psrlq(kScratchDoubleReg, 1);
815 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
818 case kSSEFloat64Neg: {
819 // TODO(bmeurer): Use RIP relative 128-bit constants.
820 // TODO(turbofan): Add AVX version with relaxed register constraints.
821 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
822 __ psllq(kScratchDoubleReg, 63);
823 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
826 case kSSEFloat64Sqrt:
827 ASSEMBLE_SSE_UNOP(sqrtsd);
829 case kSSEFloat64Round: {
830 CpuFeatureScope sse_scope(masm(), SSE4_1);
831 RoundingMode const mode =
832 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
833 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
836 case kSSEFloat64ToFloat32:
837 ASSEMBLE_SSE_UNOP(cvtsd2ss);
839 case kSSEFloat64ToInt32:
840 if (instr->InputAt(0)->IsDoubleRegister()) {
841 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
843 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
846 case kSSEFloat64ToUint32: {
847 if (instr->InputAt(0)->IsDoubleRegister()) {
848 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
850 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
852 __ AssertZeroExtended(i.OutputRegister());
855 case kSSEInt32ToFloat64:
856 if (instr->InputAt(0)->IsRegister()) {
857 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
859 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
862 case kSSEUint32ToFloat64:
863 if (instr->InputAt(0)->IsRegister()) {
864 __ movl(kScratchRegister, i.InputRegister(0));
866 __ movl(kScratchRegister, i.InputOperand(0));
868 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
870 case kSSEFloat64ExtractLowWord32:
871 if (instr->InputAt(0)->IsDoubleStackSlot()) {
872 __ movl(i.OutputRegister(), i.InputOperand(0));
874 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
877 case kSSEFloat64ExtractHighWord32:
878 if (instr->InputAt(0)->IsDoubleStackSlot()) {
879 __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
881 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
884 case kSSEFloat64InsertLowWord32:
885 if (instr->InputAt(1)->IsRegister()) {
886 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
888 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
891 case kSSEFloat64InsertHighWord32:
892 if (instr->InputAt(1)->IsRegister()) {
893 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
895 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
898 case kSSEFloat64LoadLowWord32:
899 if (instr->InputAt(0)->IsRegister()) {
900 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
902 __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
905 case kAVXFloat32Cmp: {
906 CpuFeatureScope avx_scope(masm(), AVX);
907 if (instr->InputAt(1)->IsDoubleRegister()) {
908 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
910 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
915 ASSEMBLE_AVX_BINOP(vaddss);
918 ASSEMBLE_AVX_BINOP(vsubss);
921 ASSEMBLE_AVX_BINOP(vmulss);
924 ASSEMBLE_AVX_BINOP(vdivss);
927 ASSEMBLE_AVX_BINOP(vmaxss);
930 ASSEMBLE_AVX_BINOP(vminss);
932 case kAVXFloat64Cmp: {
933 CpuFeatureScope avx_scope(masm(), AVX);
934 if (instr->InputAt(1)->IsDoubleRegister()) {
935 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
937 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
942 ASSEMBLE_AVX_BINOP(vaddsd);
945 ASSEMBLE_AVX_BINOP(vsubsd);
948 ASSEMBLE_AVX_BINOP(vmulsd);
951 ASSEMBLE_AVX_BINOP(vdivsd);
954 ASSEMBLE_AVX_BINOP(vmaxsd);
957 ASSEMBLE_AVX_BINOP(vminsd);
960 ASSEMBLE_MOVX(movsxbl);
961 __ AssertZeroExtended(i.OutputRegister());
964 ASSEMBLE_MOVX(movzxbl);
965 __ AssertZeroExtended(i.OutputRegister());
969 Operand operand = i.MemoryOperand(&index);
970 if (HasImmediateInput(instr, index)) {
971 __ movb(operand, Immediate(i.InputInt8(index)));
973 __ movb(operand, i.InputRegister(index));
978 ASSEMBLE_MOVX(movsxwl);
979 __ AssertZeroExtended(i.OutputRegister());
982 ASSEMBLE_MOVX(movzxwl);
983 __ AssertZeroExtended(i.OutputRegister());
987 Operand operand = i.MemoryOperand(&index);
988 if (HasImmediateInput(instr, index)) {
989 __ movw(operand, Immediate(i.InputInt16(index)));
991 __ movw(operand, i.InputRegister(index));
996 if (instr->HasOutput()) {
997 if (instr->addressing_mode() == kMode_None) {
998 if (instr->InputAt(0)->IsRegister()) {
999 __ movl(i.OutputRegister(), i.InputRegister(0));
1001 __ movl(i.OutputRegister(), i.InputOperand(0));
1004 __ movl(i.OutputRegister(), i.MemoryOperand());
1006 __ AssertZeroExtended(i.OutputRegister());
1009 Operand operand = i.MemoryOperand(&index);
1010 if (HasImmediateInput(instr, index)) {
1011 __ movl(operand, i.InputImmediate(index));
1013 __ movl(operand, i.InputRegister(index));
1018 ASSEMBLE_MOVX(movsxlq);
1021 if (instr->HasOutput()) {
1022 __ movq(i.OutputRegister(), i.MemoryOperand());
1025 Operand operand = i.MemoryOperand(&index);
1026 if (HasImmediateInput(instr, index)) {
1027 __ movq(operand, i.InputImmediate(index));
1029 __ movq(operand, i.InputRegister(index));
1034 if (instr->HasOutput()) {
1035 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1038 Operand operand = i.MemoryOperand(&index);
1039 __ movss(operand, i.InputDoubleRegister(index));
1043 if (instr->HasOutput()) {
1044 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1047 Operand operand = i.MemoryOperand(&index);
1048 __ movsd(operand, i.InputDoubleRegister(index));
1052 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1053 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1054 // and addressing mode just happens to work out. The "addl"/"subl" forms
1055 // in these cases are faster based on measurements.
1056 if (i.InputRegister(0).is(i.OutputRegister())) {
1057 if (mode == kMode_MRI) {
1058 int32_t constant_summand = i.InputInt32(1);
1059 if (constant_summand > 0) {
1060 __ addl(i.OutputRegister(), Immediate(constant_summand));
1061 } else if (constant_summand < 0) {
1062 __ subl(i.OutputRegister(), Immediate(-constant_summand));
1064 } else if (mode == kMode_MR1) {
1065 if (i.InputRegister(1).is(i.OutputRegister())) {
1066 __ shll(i.OutputRegister(), Immediate(1));
1068 __ leal(i.OutputRegister(), i.MemoryOperand());
1070 } else if (mode == kMode_M2) {
1071 __ shll(i.OutputRegister(), Immediate(1));
1072 } else if (mode == kMode_M4) {
1073 __ shll(i.OutputRegister(), Immediate(2));
1074 } else if (mode == kMode_M8) {
1075 __ shll(i.OutputRegister(), Immediate(3));
1077 __ leal(i.OutputRegister(), i.MemoryOperand());
1080 __ leal(i.OutputRegister(), i.MemoryOperand());
1082 __ AssertZeroExtended(i.OutputRegister());
1086 __ leaq(i.OutputRegister(), i.MemoryOperand());
1089 __ decl(i.OutputRegister());
1092 __ incl(i.OutputRegister());
1095 if (HasImmediateInput(instr, 0)) {
1096 __ pushq(i.InputImmediate(0));
1098 if (instr->InputAt(0)->IsRegister()) {
1099 __ pushq(i.InputRegister(0));
1101 __ pushq(i.InputOperand(0));
1105 case kX64StoreWriteBarrier: {
1106 Register object = i.InputRegister(0);
1107 Register index = i.InputRegister(1);
1108 Register value = i.InputRegister(2);
1109 __ movq(Operand(object, index, times_1, 0), value);
1110 __ leaq(index, Operand(object, index, times_1, 0));
1111 SaveFPRegsMode mode =
1112 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1113 __ RecordWrite(object, index, value, mode);
1116 case kCheckedLoadInt8:
1117 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1119 case kCheckedLoadUint8:
1120 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1122 case kCheckedLoadInt16:
1123 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1125 case kCheckedLoadUint16:
1126 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1128 case kCheckedLoadWord32:
1129 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1131 case kCheckedLoadFloat32:
1132 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1134 case kCheckedLoadFloat64:
1135 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1137 case kCheckedStoreWord8:
1138 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1140 case kCheckedStoreWord16:
1141 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1143 case kCheckedStoreWord32:
1144 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1146 case kCheckedStoreFloat32:
1147 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1149 case kCheckedStoreFloat64:
1150 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1152 case kX64StackCheck:
1153 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1156 } // NOLINT(readability/fn_size)
1159 // Assembles branches after this instruction.
1160 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1161 X64OperandConverter i(this, instr);
1162 Label::Distance flabel_distance =
1163 branch->fallthru ? Label::kNear : Label::kFar;
1164 Label* tlabel = branch->true_label;
1165 Label* flabel = branch->false_label;
1166 switch (branch->condition) {
1167 case kUnorderedEqual:
1168 __ j(parity_even, flabel, flabel_distance);
1171 __ j(equal, tlabel);
1173 case kUnorderedNotEqual:
1174 __ j(parity_even, tlabel);
1177 __ j(not_equal, tlabel);
1179 case kSignedLessThan:
1182 case kSignedGreaterThanOrEqual:
1183 __ j(greater_equal, tlabel);
1185 case kSignedLessThanOrEqual:
1186 __ j(less_equal, tlabel);
1188 case kSignedGreaterThan:
1189 __ j(greater, tlabel);
1191 case kUnsignedLessThan:
1192 __ j(below, tlabel);
1194 case kUnsignedGreaterThanOrEqual:
1195 __ j(above_equal, tlabel);
1197 case kUnsignedLessThanOrEqual:
1198 __ j(below_equal, tlabel);
1200 case kUnsignedGreaterThan:
1201 __ j(above, tlabel);
1204 __ j(overflow, tlabel);
1207 __ j(no_overflow, tlabel);
1210 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1214 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1215 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1219 // Assembles boolean materializations after this instruction.
1220 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1221 FlagsCondition condition) {
1222 X64OperandConverter i(this, instr);
1225 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1226 // last output of the instruction.
1228 DCHECK_NE(0u, instr->OutputCount());
1229 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1230 Condition cc = no_condition;
1231 switch (condition) {
1232 case kUnorderedEqual:
1233 __ j(parity_odd, &check, Label::kNear);
1234 __ movl(reg, Immediate(0));
1235 __ jmp(&done, Label::kNear);
1240 case kUnorderedNotEqual:
1241 __ j(parity_odd, &check, Label::kNear);
1242 __ movl(reg, Immediate(1));
1243 __ jmp(&done, Label::kNear);
1248 case kSignedLessThan:
1251 case kSignedGreaterThanOrEqual:
1254 case kSignedLessThanOrEqual:
1257 case kSignedGreaterThan:
1260 case kUnsignedLessThan:
1263 case kUnsignedGreaterThanOrEqual:
1266 case kUnsignedLessThanOrEqual:
1269 case kUnsignedGreaterThan:
1281 __ movzxbl(reg, reg);
1286 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1287 X64OperandConverter i(this, instr);
1288 Register input = i.InputRegister(0);
1289 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1290 __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1291 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1293 AssembleArchJump(i.InputRpo(1));
1297 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1298 X64OperandConverter i(this, instr);
1299 Register input = i.InputRegister(0);
1300 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1301 Label** cases = zone()->NewArray<Label*>(case_count);
1302 for (int32_t index = 0; index < case_count; ++index) {
1303 cases[index] = GetLabel(i.InputRpo(index + 2));
1305 Label* const table = AddJumpTable(cases, case_count);
1306 __ cmpl(input, Immediate(case_count));
1307 __ j(above_equal, GetLabel(i.InputRpo(1)));
1308 __ leaq(kScratchRegister, Operand(table));
1309 __ jmp(Operand(kScratchRegister, input, times_8, 0));
1313 void CodeGenerator::AssembleDeoptimizerCall(
1314 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1315 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1316 isolate(), deoptimization_id, bailout_type);
1317 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1321 void CodeGenerator::AssemblePrologue() {
1322 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1323 int stack_slots = frame()->GetSpillSlotCount();
1324 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1327 const RegList saves = descriptor->CalleeSavedRegisters();
1328 if (saves != 0) { // Save callee-saved registers.
1329 int register_save_area_size = 0;
1330 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1331 if (!((1 << i) & saves)) continue;
1332 __ pushq(Register::from_code(i));
1333 register_save_area_size += kPointerSize;
1335 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1337 } else if (descriptor->IsJSFunctionCall()) {
1338 CompilationInfo* info = this->info();
1339 __ Prologue(info->IsCodePreAgingActive());
1340 frame()->SetRegisterSaveAreaSize(
1341 StandardFrameConstants::kFixedFrameSizeFromFp);
1342 } else if (stack_slots > 0) {
1344 frame()->SetRegisterSaveAreaSize(
1345 StandardFrameConstants::kFixedFrameSizeFromFp);
1348 if (info()->is_osr()) {
1349 // TurboFan OSR-compiled functions cannot be entered directly.
1350 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1352 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1353 // frame is still on the stack. Optimized code uses OSR values directly from
1354 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1355 // remaining stack slots.
1356 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1357 osr_pc_offset_ = __ pc_offset();
1358 // TODO(titzer): cannot address target function == local #-1
1359 __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1360 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1361 stack_slots -= frame()->GetOsrStackSlotCount();
1364 if (stack_slots > 0) {
1365 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1370 void CodeGenerator::AssembleReturn() {
1371 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1372 int stack_slots = frame()->GetSpillSlotCount();
1373 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1374 if (frame()->GetRegisterSaveAreaSize() > 0) {
1375 // Remove this frame's spill slots first.
1376 if (stack_slots > 0) {
1377 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1379 const RegList saves = descriptor->CalleeSavedRegisters();
1380 // Restore registers.
1382 for (int i = 0; i < Register::kNumRegisters; i++) {
1383 if (!((1 << i) & saves)) continue;
1384 __ popq(Register::from_code(i));
1387 __ popq(rbp); // Pop caller's frame pointer.
1390 // No saved registers.
1391 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1392 __ popq(rbp); // Pop caller's frame pointer.
1395 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1396 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1397 __ popq(rbp); // Pop caller's frame pointer.
1398 int pop_count = descriptor->IsJSFunctionCall()
1399 ? static_cast<int>(descriptor->JSParameterCount())
1401 __ ret(pop_count * kPointerSize);
1408 void CodeGenerator::AssembleMove(InstructionOperand* source,
1409 InstructionOperand* destination) {
1410 X64OperandConverter g(this, NULL);
1411 // Dispatch on the source and destination operand kinds. Not all
1412 // combinations are possible.
1413 if (source->IsRegister()) {
1414 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1415 Register src = g.ToRegister(source);
1416 if (destination->IsRegister()) {
1417 __ movq(g.ToRegister(destination), src);
1419 __ movq(g.ToOperand(destination), src);
1421 } else if (source->IsStackSlot()) {
1422 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1423 Operand src = g.ToOperand(source);
1424 if (destination->IsRegister()) {
1425 Register dst = g.ToRegister(destination);
1428 // Spill on demand to use a temporary register for memory-to-memory
1430 Register tmp = kScratchRegister;
1431 Operand dst = g.ToOperand(destination);
1435 } else if (source->IsConstant()) {
1436 ConstantOperand* constant_source = ConstantOperand::cast(source);
1437 Constant src = g.ToConstant(constant_source);
1438 if (destination->IsRegister() || destination->IsStackSlot()) {
1439 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1441 switch (src.type()) {
1442 case Constant::kInt32:
1443 // TODO(dcarney): don't need scratch in this case.
1444 __ Set(dst, src.ToInt32());
1446 case Constant::kInt64:
1447 __ Set(dst, src.ToInt64());
1449 case Constant::kFloat32:
1451 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1453 case Constant::kFloat64:
1455 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1457 case Constant::kExternalReference:
1458 __ Move(dst, src.ToExternalReference());
1460 case Constant::kHeapObject: {
1461 Handle<HeapObject> src_object = src.ToHeapObject();
1462 if (info()->IsOptimizing() &&
1463 src_object.is_identical_to(info()->context())) {
1464 // Loading the context from the frame is way cheaper than
1465 // materializing the actual context heap object address.
1466 __ movp(dst, Operand(rbp, StandardFrameConstants::kContextOffset));
1467 } else if (info()->IsOptimizing() &&
1468 src_object.is_identical_to(info()->closure())) {
1469 // Loading the JSFunction from the frame is way cheaper than
1470 // materializing the actual JSFunction heap object address.
1472 Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1474 __ Move(dst, src_object);
1478 case Constant::kRpoNumber:
1479 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1482 if (destination->IsStackSlot()) {
1483 __ movq(g.ToOperand(destination), kScratchRegister);
1485 } else if (src.type() == Constant::kFloat32) {
1486 // TODO(turbofan): Can we do better here?
1487 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1488 if (destination->IsDoubleRegister()) {
1489 __ Move(g.ToDoubleRegister(destination), src_const);
1491 DCHECK(destination->IsDoubleStackSlot());
1492 Operand dst = g.ToOperand(destination);
1493 __ movl(dst, Immediate(src_const));
1496 DCHECK_EQ(Constant::kFloat64, src.type());
1497 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1498 if (destination->IsDoubleRegister()) {
1499 __ Move(g.ToDoubleRegister(destination), src_const);
1501 DCHECK(destination->IsDoubleStackSlot());
1502 __ movq(kScratchRegister, src_const);
1503 __ movq(g.ToOperand(destination), kScratchRegister);
1506 } else if (source->IsDoubleRegister()) {
1507 XMMRegister src = g.ToDoubleRegister(source);
1508 if (destination->IsDoubleRegister()) {
1509 XMMRegister dst = g.ToDoubleRegister(destination);
1510 __ movaps(dst, src);
1512 DCHECK(destination->IsDoubleStackSlot());
1513 Operand dst = g.ToOperand(destination);
1516 } else if (source->IsDoubleStackSlot()) {
1517 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1518 Operand src = g.ToOperand(source);
1519 if (destination->IsDoubleRegister()) {
1520 XMMRegister dst = g.ToDoubleRegister(destination);
1523 // We rely on having xmm0 available as a fixed scratch register.
1524 Operand dst = g.ToOperand(destination);
1525 __ movsd(xmm0, src);
1526 __ movsd(dst, xmm0);
1534 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1535 InstructionOperand* destination) {
1536 X64OperandConverter g(this, NULL);
1537 // Dispatch on the source and destination operand kinds. Not all
1538 // combinations are possible.
1539 if (source->IsRegister() && destination->IsRegister()) {
1540 // Register-register.
1541 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1542 } else if (source->IsRegister() && destination->IsStackSlot()) {
1543 Register src = g.ToRegister(source);
1544 Operand dst = g.ToOperand(destination);
1546 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1547 (source->IsDoubleStackSlot() &&
1548 destination->IsDoubleStackSlot())) {
1550 Register tmp = kScratchRegister;
1551 Operand src = g.ToOperand(source);
1552 Operand dst = g.ToOperand(destination);
1556 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1557 // XMM register-register swap. We rely on having xmm0
1558 // available as a fixed scratch register.
1559 XMMRegister src = g.ToDoubleRegister(source);
1560 XMMRegister dst = g.ToDoubleRegister(destination);
1561 __ movaps(xmm0, src);
1562 __ movaps(src, dst);
1563 __ movaps(dst, xmm0);
1564 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1565 // XMM register-memory swap. We rely on having xmm0
1566 // available as a fixed scratch register.
1567 XMMRegister src = g.ToDoubleRegister(source);
1568 Operand dst = g.ToOperand(destination);
1569 __ movsd(xmm0, src);
1571 __ movsd(dst, xmm0);
1573 // No other combinations are possible.
1579 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1580 for (size_t index = 0; index < target_count; ++index) {
1581 __ dq(targets[index]);
1586 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1589 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1590 int space_needed = Deoptimizer::patch_size();
1591 if (!info()->IsStub()) {
1592 // Ensure that we have enough space after the previous lazy-bailout
1593 // instruction for patching the code here.
1594 int current_pc = masm()->pc_offset();
1595 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1596 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1597 __ Nop(padding_size);
1600 MarkLazyDeoptSite();
1605 } // namespace internal
1606 } // namespace compiler