1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/scopes.h"
11 #include "src/x64/assembler-x64.h"
12 #include "src/x64/macro-assembler-x64.h"
21 #define kScratchDoubleReg xmm0
24 // Adds X64 specific methods for decoding operands.
25 class X64OperandConverter : public InstructionOperandConverter {
27 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
30 Immediate InputImmediate(size_t index) {
31 return ToImmediate(instr_->InputAt(index));
34 Operand InputOperand(size_t index, int extra = 0) {
35 return ToOperand(instr_->InputAt(index), extra);
38 Operand OutputOperand() { return ToOperand(instr_->Output()); }
40 Immediate ToImmediate(InstructionOperand* operand) {
41 return Immediate(ToConstant(operand).ToInt32());
44 Operand ToOperand(InstructionOperand* op, int extra = 0) {
45 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
46 // The linkage computes where all spill slots are located.
47 FrameOffset offset = linkage()->GetFrameOffset(
48 AllocatedOperand::cast(op)->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
52 static size_t NextOffset(size_t* offset) {
58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
59 STATIC_ASSERT(0 == static_cast<int>(times_1));
60 STATIC_ASSERT(1 == static_cast<int>(times_2));
61 STATIC_ASSERT(2 == static_cast<int>(times_4));
62 STATIC_ASSERT(3 == static_cast<int>(times_8));
63 int scale = static_cast<int>(mode - one);
64 DCHECK(scale >= 0 && scale < 4);
65 return static_cast<ScaleFactor>(scale);
68 Operand MemoryOperand(size_t* offset) {
69 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
72 Register base = InputRegister(NextOffset(offset));
74 return Operand(base, disp);
77 Register base = InputRegister(NextOffset(offset));
78 int32_t disp = InputInt32(NextOffset(offset));
79 return Operand(base, disp);
85 Register base = InputRegister(NextOffset(offset));
86 Register index = InputRegister(NextOffset(offset));
87 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
89 return Operand(base, index, scale, disp);
95 Register base = InputRegister(NextOffset(offset));
96 Register index = InputRegister(NextOffset(offset));
97 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
98 int32_t disp = InputInt32(NextOffset(offset));
99 return Operand(base, index, scale, disp);
102 Register base = InputRegister(NextOffset(offset));
104 return Operand(base, disp);
107 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
108 return Operand(no_reg, 0);
111 Register index = InputRegister(NextOffset(offset));
112 ScaleFactor scale = ScaleFor(kMode_M1, mode);
114 return Operand(index, scale, disp);
120 Register index = InputRegister(NextOffset(offset));
121 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
122 int32_t disp = InputInt32(NextOffset(offset));
123 return Operand(index, scale, disp);
127 return Operand(no_reg, 0);
130 return Operand(no_reg, 0);
133 Operand MemoryOperand(size_t first_input = 0) {
134 return MemoryOperand(&first_input);
141 bool HasImmediateInput(Instruction* instr, size_t index) {
142 return instr->InputAt(index)->IsImmediate();
146 class OutOfLineLoadZero FINAL : public OutOfLineCode {
148 OutOfLineLoadZero(CodeGenerator* gen, Register result)
149 : OutOfLineCode(gen), result_(result) {}
151 void Generate() FINAL { __ xorl(result_, result_); }
154 Register const result_;
158 class OutOfLineLoadNaN FINAL : public OutOfLineCode {
160 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
161 : OutOfLineCode(gen), result_(result) {}
163 void Generate() FINAL { __ pcmpeqd(result_, result_); }
166 XMMRegister const result_;
170 class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
172 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
174 : OutOfLineCode(gen), result_(result), input_(input) {}
176 void Generate() FINAL {
177 __ subp(rsp, Immediate(kDoubleSize));
178 __ movsd(MemOperand(rsp, 0), input_);
179 __ SlowTruncateToI(result_, rsp, 0);
180 __ addp(rsp, Immediate(kDoubleSize));
184 Register const result_;
185 XMMRegister const input_;
191 #define ASSEMBLE_UNOP(asm_instr) \
193 if (instr->Output()->IsRegister()) { \
194 __ asm_instr(i.OutputRegister()); \
196 __ asm_instr(i.OutputOperand()); \
201 #define ASSEMBLE_BINOP(asm_instr) \
203 if (HasImmediateInput(instr, 1)) { \
204 if (instr->InputAt(0)->IsRegister()) { \
205 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
207 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
210 if (instr->InputAt(1)->IsRegister()) { \
211 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
213 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
219 #define ASSEMBLE_MULT(asm_instr) \
221 if (HasImmediateInput(instr, 1)) { \
222 if (instr->InputAt(0)->IsRegister()) { \
223 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
224 i.InputImmediate(1)); \
226 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
227 i.InputImmediate(1)); \
230 if (instr->InputAt(1)->IsRegister()) { \
231 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
233 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
239 #define ASSEMBLE_SHIFT(asm_instr, width) \
241 if (HasImmediateInput(instr, 1)) { \
242 if (instr->Output()->IsRegister()) { \
243 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
245 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
248 if (instr->Output()->IsRegister()) { \
249 __ asm_instr##_cl(i.OutputRegister()); \
251 __ asm_instr##_cl(i.OutputOperand()); \
257 #define ASSEMBLE_MOVX(asm_instr) \
259 if (instr->addressing_mode() != kMode_None) { \
260 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
261 } else if (instr->InputAt(0)->IsRegister()) { \
262 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
264 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
269 #define ASSEMBLE_SSE_BINOP(asm_instr) \
271 if (instr->InputAt(1)->IsDoubleRegister()) { \
272 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
274 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
279 #define ASSEMBLE_SSE_UNOP(asm_instr) \
281 if (instr->InputAt(0)->IsDoubleRegister()) { \
282 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
284 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
289 #define ASSEMBLE_AVX_BINOP(asm_instr) \
291 CpuFeatureScope avx_scope(masm(), AVX); \
292 if (instr->InputAt(1)->IsDoubleRegister()) { \
293 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
294 i.InputDoubleRegister(1)); \
296 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
297 i.InputOperand(1)); \
302 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
304 auto result = i.OutputDoubleRegister(); \
305 auto buffer = i.InputRegister(0); \
306 auto index1 = i.InputRegister(1); \
307 auto index2 = i.InputInt32(2); \
308 OutOfLineCode* ool; \
309 if (instr->InputAt(3)->IsRegister()) { \
310 auto length = i.InputRegister(3); \
311 DCHECK_EQ(0, index2); \
312 __ cmpl(index1, length); \
313 ool = new (zone()) OutOfLineLoadNaN(this, result); \
315 auto length = i.InputInt32(3); \
316 DCHECK_LE(index2, length); \
317 __ cmpq(index1, Immediate(length - index2)); \
318 class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
320 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
321 Register buffer, Register index1, int32_t index2, \
323 : OutOfLineCode(gen), \
330 void Generate() FINAL { \
331 __ leal(kScratchRegister, Operand(index1_, index2_)); \
332 __ pcmpeqd(result_, result_); \
333 __ cmpl(kScratchRegister, Immediate(length_)); \
334 __ j(above_equal, exit()); \
335 __ asm_instr(result_, \
336 Operand(buffer_, kScratchRegister, times_1, 0)); \
340 XMMRegister const result_; \
341 Register const buffer_; \
342 Register const index1_; \
343 int32_t const index2_; \
344 int32_t const length_; \
347 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
349 __ j(above_equal, ool->entry()); \
350 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
351 __ bind(ool->exit()); \
355 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
357 auto result = i.OutputRegister(); \
358 auto buffer = i.InputRegister(0); \
359 auto index1 = i.InputRegister(1); \
360 auto index2 = i.InputInt32(2); \
361 OutOfLineCode* ool; \
362 if (instr->InputAt(3)->IsRegister()) { \
363 auto length = i.InputRegister(3); \
364 DCHECK_EQ(0, index2); \
365 __ cmpl(index1, length); \
366 ool = new (zone()) OutOfLineLoadZero(this, result); \
368 auto length = i.InputInt32(3); \
369 DCHECK_LE(index2, length); \
370 __ cmpq(index1, Immediate(length - index2)); \
371 class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
373 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
374 Register buffer, Register index1, int32_t index2, \
376 : OutOfLineCode(gen), \
383 void Generate() FINAL { \
385 __ leal(kScratchRegister, Operand(index1_, index2_)); \
386 __ cmpl(kScratchRegister, Immediate(length_)); \
387 __ j(above_equal, &oob, Label::kNear); \
388 __ asm_instr(result_, \
389 Operand(buffer_, kScratchRegister, times_1, 0)); \
392 __ xorl(result_, result_); \
396 Register const result_; \
397 Register const buffer_; \
398 Register const index1_; \
399 int32_t const index2_; \
400 int32_t const length_; \
403 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
405 __ j(above_equal, ool->entry()); \
406 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
407 __ bind(ool->exit()); \
411 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
413 auto buffer = i.InputRegister(0); \
414 auto index1 = i.InputRegister(1); \
415 auto index2 = i.InputInt32(2); \
416 auto value = i.InputDoubleRegister(4); \
417 if (instr->InputAt(3)->IsRegister()) { \
418 auto length = i.InputRegister(3); \
419 DCHECK_EQ(0, index2); \
421 __ cmpl(index1, length); \
422 __ j(above_equal, &done, Label::kNear); \
423 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
426 auto length = i.InputInt32(3); \
427 DCHECK_LE(index2, length); \
428 __ cmpq(index1, Immediate(length - index2)); \
429 class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
431 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
432 Register index1, int32_t index2, int32_t length, \
434 : OutOfLineCode(gen), \
441 void Generate() FINAL { \
442 __ leal(kScratchRegister, Operand(index1_, index2_)); \
443 __ cmpl(kScratchRegister, Immediate(length_)); \
444 __ j(above_equal, exit()); \
445 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
450 Register const buffer_; \
451 Register const index1_; \
452 int32_t const index2_; \
453 int32_t const length_; \
454 XMMRegister const value_; \
456 auto ool = new (zone()) \
457 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
458 __ j(above_equal, ool->entry()); \
459 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
460 __ bind(ool->exit()); \
465 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
467 auto buffer = i.InputRegister(0); \
468 auto index1 = i.InputRegister(1); \
469 auto index2 = i.InputInt32(2); \
470 if (instr->InputAt(3)->IsRegister()) { \
471 auto length = i.InputRegister(3); \
472 DCHECK_EQ(0, index2); \
474 __ cmpl(index1, length); \
475 __ j(above_equal, &done, Label::kNear); \
476 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
479 auto length = i.InputInt32(3); \
480 DCHECK_LE(index2, length); \
481 __ cmpq(index1, Immediate(length - index2)); \
482 class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
484 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
485 Register index1, int32_t index2, int32_t length, \
487 : OutOfLineCode(gen), \
494 void Generate() FINAL { \
495 __ leal(kScratchRegister, Operand(index1_, index2_)); \
496 __ cmpl(kScratchRegister, Immediate(length_)); \
497 __ j(above_equal, exit()); \
498 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
503 Register const buffer_; \
504 Register const index1_; \
505 int32_t const index2_; \
506 int32_t const length_; \
507 Value const value_; \
509 auto ool = new (zone()) \
510 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
511 __ j(above_equal, ool->entry()); \
512 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
513 __ bind(ool->exit()); \
518 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
520 if (instr->InputAt(4)->IsRegister()) { \
521 Register value = i.InputRegister(4); \
522 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
524 Immediate value = i.InputImmediate(4); \
525 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
530 // Assembles an instruction after register allocation, producing machine code.
531 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
532 X64OperandConverter i(this, instr);
534 switch (ArchOpcodeField::decode(instr->opcode())) {
535 case kArchCallCodeObject: {
536 EnsureSpaceForLazyDeopt();
537 if (HasImmediateInput(instr, 0)) {
538 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
539 __ Call(code, RelocInfo::CODE_TARGET);
541 Register reg = i.InputRegister(0);
542 int entry = Code::kHeaderSize - kHeapObjectTag;
543 __ Call(Operand(reg, entry));
545 RecordCallPosition(instr);
548 case kArchCallJSFunction: {
549 EnsureSpaceForLazyDeopt();
550 Register func = i.InputRegister(0);
551 if (FLAG_debug_code) {
552 // Check the function's context matches the context argument.
553 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
554 __ Assert(equal, kWrongFunctionContext);
556 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
557 RecordCallPosition(instr);
561 AssembleArchJump(i.InputRpo(0));
563 case kArchLookupSwitch:
564 AssembleArchLookupSwitch(instr);
566 case kArchTableSwitch:
567 AssembleArchTableSwitch(instr);
570 // don't emit code for nops.
572 case kArchDeoptimize: {
574 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
575 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
581 case kArchStackPointer:
582 __ movq(i.OutputRegister(), rsp);
584 case kArchTruncateDoubleToI: {
585 auto result = i.OutputRegister();
586 auto input = i.InputDoubleRegister(0);
587 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
588 __ cvttsd2siq(result, input);
589 __ cmpq(result, Immediate(1));
590 __ j(overflow, ool->entry());
591 __ bind(ool->exit());
595 ASSEMBLE_BINOP(addl);
598 ASSEMBLE_BINOP(addq);
601 ASSEMBLE_BINOP(subl);
604 ASSEMBLE_BINOP(subq);
607 ASSEMBLE_BINOP(andl);
610 ASSEMBLE_BINOP(andq);
613 ASSEMBLE_BINOP(cmpl);
616 ASSEMBLE_BINOP(cmpq);
619 ASSEMBLE_BINOP(testl);
622 ASSEMBLE_BINOP(testq);
625 ASSEMBLE_MULT(imull);
628 ASSEMBLE_MULT(imulq);
631 if (instr->InputAt(1)->IsRegister()) {
632 __ imull(i.InputRegister(1));
634 __ imull(i.InputOperand(1));
638 if (instr->InputAt(1)->IsRegister()) {
639 __ mull(i.InputRegister(1));
641 __ mull(i.InputOperand(1));
646 __ idivl(i.InputRegister(1));
650 __ idivq(i.InputRegister(1));
654 __ divl(i.InputRegister(1));
658 __ divq(i.InputRegister(1));
679 ASSEMBLE_BINOP(xorl);
682 ASSEMBLE_BINOP(xorq);
685 ASSEMBLE_SHIFT(shll, 5);
688 ASSEMBLE_SHIFT(shlq, 6);
691 ASSEMBLE_SHIFT(shrl, 5);
694 ASSEMBLE_SHIFT(shrq, 6);
697 ASSEMBLE_SHIFT(sarl, 5);
700 ASSEMBLE_SHIFT(sarq, 6);
703 ASSEMBLE_SHIFT(rorl, 5);
706 ASSEMBLE_SHIFT(rorq, 6);
709 if (instr->InputAt(0)->IsRegister()) {
710 __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
712 __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
716 ASSEMBLE_SSE_BINOP(ucomiss);
719 ASSEMBLE_SSE_BINOP(addss);
722 ASSEMBLE_SSE_BINOP(subss);
725 ASSEMBLE_SSE_BINOP(mulss);
728 ASSEMBLE_SSE_BINOP(divss);
730 case kSSEFloat32Abs: {
731 // TODO(bmeurer): Use RIP relative 128-bit constants.
732 // TODO(turbofan): Add AVX version with relaxed register constraints.
733 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
734 __ psrlq(kScratchDoubleReg, 33);
735 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
738 case kSSEFloat32Neg: {
739 // TODO(bmeurer): Use RIP relative 128-bit constants.
740 // TODO(turbofan): Add AVX version with relaxed register constraints.
741 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
742 __ psllq(kScratchDoubleReg, 31);
743 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
746 case kSSEFloat32Sqrt:
747 ASSEMBLE_SSE_UNOP(sqrtss);
750 ASSEMBLE_SSE_BINOP(maxss);
753 ASSEMBLE_SSE_BINOP(minss);
755 case kSSEFloat32ToFloat64:
756 ASSEMBLE_SSE_UNOP(cvtss2sd);
759 ASSEMBLE_SSE_BINOP(ucomisd);
762 ASSEMBLE_SSE_BINOP(addsd);
765 ASSEMBLE_SSE_BINOP(subsd);
768 ASSEMBLE_SSE_BINOP(mulsd);
771 ASSEMBLE_SSE_BINOP(divsd);
773 case kSSEFloat64Mod: {
774 __ subq(rsp, Immediate(kDoubleSize));
775 // Move values to st(0) and st(1).
776 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
777 __ fld_d(Operand(rsp, 0));
778 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
779 __ fld_d(Operand(rsp, 0));
780 // Loop while fprem isn't done.
783 // This instructions traps on all kinds inputs, but we are assuming the
784 // floating point control word is set to ignore them all.
786 // The following 2 instruction implicitly use rax.
788 if (CpuFeatures::IsSupported(SAHF)) {
789 CpuFeatureScope sahf_scope(masm(), SAHF);
792 __ shrl(rax, Immediate(8));
793 __ andl(rax, Immediate(0xFF));
797 __ j(parity_even, &mod_loop);
798 // Move output to stack and clean up.
800 __ fstp_d(Operand(rsp, 0));
801 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
802 __ addq(rsp, Immediate(kDoubleSize));
806 ASSEMBLE_SSE_BINOP(maxsd);
809 ASSEMBLE_SSE_BINOP(minsd);
811 case kSSEFloat64Abs: {
812 // TODO(bmeurer): Use RIP relative 128-bit constants.
813 // TODO(turbofan): Add AVX version with relaxed register constraints.
814 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
815 __ psrlq(kScratchDoubleReg, 1);
816 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
819 case kSSEFloat64Neg: {
820 // TODO(bmeurer): Use RIP relative 128-bit constants.
821 // TODO(turbofan): Add AVX version with relaxed register constraints.
822 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
823 __ psllq(kScratchDoubleReg, 63);
824 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
827 case kSSEFloat64Sqrt:
828 ASSEMBLE_SSE_UNOP(sqrtsd);
830 case kSSEFloat64Round: {
831 CpuFeatureScope sse_scope(masm(), SSE4_1);
832 RoundingMode const mode =
833 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
834 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
837 case kSSEFloat64ToFloat32:
838 ASSEMBLE_SSE_UNOP(cvtsd2ss);
840 case kSSEFloat64ToInt32:
841 if (instr->InputAt(0)->IsDoubleRegister()) {
842 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
844 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
847 case kSSEFloat64ToUint32: {
848 if (instr->InputAt(0)->IsDoubleRegister()) {
849 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
851 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
853 __ AssertZeroExtended(i.OutputRegister());
856 case kSSEInt32ToFloat64:
857 if (instr->InputAt(0)->IsRegister()) {
858 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
860 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
863 case kSSEUint32ToFloat64:
864 if (instr->InputAt(0)->IsRegister()) {
865 __ movl(kScratchRegister, i.InputRegister(0));
867 __ movl(kScratchRegister, i.InputOperand(0));
869 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
871 case kSSEFloat64ExtractLowWord32:
872 if (instr->InputAt(0)->IsDoubleStackSlot()) {
873 __ movl(i.OutputRegister(), i.InputOperand(0));
875 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
878 case kSSEFloat64ExtractHighWord32:
879 if (instr->InputAt(0)->IsDoubleStackSlot()) {
880 __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
882 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
885 case kSSEFloat64InsertLowWord32:
886 if (instr->InputAt(1)->IsRegister()) {
887 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
889 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
892 case kSSEFloat64InsertHighWord32:
893 if (instr->InputAt(1)->IsRegister()) {
894 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
896 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
899 case kSSEFloat64LoadLowWord32:
900 if (instr->InputAt(0)->IsRegister()) {
901 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
903 __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
906 case kAVXFloat32Cmp: {
907 CpuFeatureScope avx_scope(masm(), AVX);
908 if (instr->InputAt(1)->IsDoubleRegister()) {
909 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
911 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
916 ASSEMBLE_AVX_BINOP(vaddss);
919 ASSEMBLE_AVX_BINOP(vsubss);
922 ASSEMBLE_AVX_BINOP(vmulss);
925 ASSEMBLE_AVX_BINOP(vdivss);
928 ASSEMBLE_AVX_BINOP(vmaxss);
931 ASSEMBLE_AVX_BINOP(vminss);
933 case kAVXFloat64Cmp: {
934 CpuFeatureScope avx_scope(masm(), AVX);
935 if (instr->InputAt(1)->IsDoubleRegister()) {
936 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
938 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
943 ASSEMBLE_AVX_BINOP(vaddsd);
946 ASSEMBLE_AVX_BINOP(vsubsd);
949 ASSEMBLE_AVX_BINOP(vmulsd);
952 ASSEMBLE_AVX_BINOP(vdivsd);
955 ASSEMBLE_AVX_BINOP(vmaxsd);
958 ASSEMBLE_AVX_BINOP(vminsd);
961 ASSEMBLE_MOVX(movsxbl);
962 __ AssertZeroExtended(i.OutputRegister());
965 ASSEMBLE_MOVX(movzxbl);
966 __ AssertZeroExtended(i.OutputRegister());
970 Operand operand = i.MemoryOperand(&index);
971 if (HasImmediateInput(instr, index)) {
972 __ movb(operand, Immediate(i.InputInt8(index)));
974 __ movb(operand, i.InputRegister(index));
979 ASSEMBLE_MOVX(movsxwl);
980 __ AssertZeroExtended(i.OutputRegister());
983 ASSEMBLE_MOVX(movzxwl);
984 __ AssertZeroExtended(i.OutputRegister());
988 Operand operand = i.MemoryOperand(&index);
989 if (HasImmediateInput(instr, index)) {
990 __ movw(operand, Immediate(i.InputInt16(index)));
992 __ movw(operand, i.InputRegister(index));
997 if (instr->HasOutput()) {
998 if (instr->addressing_mode() == kMode_None) {
999 if (instr->InputAt(0)->IsRegister()) {
1000 __ movl(i.OutputRegister(), i.InputRegister(0));
1002 __ movl(i.OutputRegister(), i.InputOperand(0));
1005 __ movl(i.OutputRegister(), i.MemoryOperand());
1007 __ AssertZeroExtended(i.OutputRegister());
1010 Operand operand = i.MemoryOperand(&index);
1011 if (HasImmediateInput(instr, index)) {
1012 __ movl(operand, i.InputImmediate(index));
1014 __ movl(operand, i.InputRegister(index));
1019 ASSEMBLE_MOVX(movsxlq);
1022 if (instr->HasOutput()) {
1023 __ movq(i.OutputRegister(), i.MemoryOperand());
1026 Operand operand = i.MemoryOperand(&index);
1027 if (HasImmediateInput(instr, index)) {
1028 __ movq(operand, i.InputImmediate(index));
1030 __ movq(operand, i.InputRegister(index));
1035 if (instr->HasOutput()) {
1036 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1039 Operand operand = i.MemoryOperand(&index);
1040 __ movss(operand, i.InputDoubleRegister(index));
1044 if (instr->HasOutput()) {
1045 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1048 Operand operand = i.MemoryOperand(&index);
1049 __ movsd(operand, i.InputDoubleRegister(index));
1053 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1054 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1055 // and addressing mode just happens to work out. The "addl"/"subl" forms
1056 // in these cases are faster based on measurements.
1057 if (i.InputRegister(0).is(i.OutputRegister())) {
1058 if (mode == kMode_MRI) {
1059 int32_t constant_summand = i.InputInt32(1);
1060 if (constant_summand > 0) {
1061 __ addl(i.OutputRegister(), Immediate(constant_summand));
1062 } else if (constant_summand < 0) {
1063 __ subl(i.OutputRegister(), Immediate(-constant_summand));
1065 } else if (mode == kMode_MR1) {
1066 if (i.InputRegister(1).is(i.OutputRegister())) {
1067 __ shll(i.OutputRegister(), Immediate(1));
1069 __ leal(i.OutputRegister(), i.MemoryOperand());
1071 } else if (mode == kMode_M2) {
1072 __ shll(i.OutputRegister(), Immediate(1));
1073 } else if (mode == kMode_M4) {
1074 __ shll(i.OutputRegister(), Immediate(2));
1075 } else if (mode == kMode_M8) {
1076 __ shll(i.OutputRegister(), Immediate(3));
1078 __ leal(i.OutputRegister(), i.MemoryOperand());
1081 __ leal(i.OutputRegister(), i.MemoryOperand());
1083 __ AssertZeroExtended(i.OutputRegister());
1087 __ leaq(i.OutputRegister(), i.MemoryOperand());
1090 __ decl(i.OutputRegister());
1093 __ incl(i.OutputRegister());
1096 if (HasImmediateInput(instr, 0)) {
1097 __ pushq(i.InputImmediate(0));
1099 if (instr->InputAt(0)->IsRegister()) {
1100 __ pushq(i.InputRegister(0));
1102 __ pushq(i.InputOperand(0));
1106 case kX64StoreWriteBarrier: {
1107 Register object = i.InputRegister(0);
1108 Register value = i.InputRegister(2);
1109 SaveFPRegsMode mode =
1110 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
1111 if (HasImmediateInput(instr, 1)) {
1112 int index = i.InputInt32(1);
1113 Register scratch = i.TempRegister(1);
1114 __ movq(Operand(object, index), value);
1115 __ RecordWriteContextSlot(object, index, value, scratch, mode);
1117 Register index = i.InputRegister(1);
1118 __ movq(Operand(object, index, times_1, 0), value);
1119 __ leaq(index, Operand(object, index, times_1, 0));
1120 __ RecordWrite(object, index, value, mode);
1124 case kCheckedLoadInt8:
1125 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1127 case kCheckedLoadUint8:
1128 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1130 case kCheckedLoadInt16:
1131 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1133 case kCheckedLoadUint16:
1134 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1136 case kCheckedLoadWord32:
1137 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1139 case kCheckedLoadFloat32:
1140 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1142 case kCheckedLoadFloat64:
1143 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1145 case kCheckedStoreWord8:
1146 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1148 case kCheckedStoreWord16:
1149 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1151 case kCheckedStoreWord32:
1152 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1154 case kCheckedStoreFloat32:
1155 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1157 case kCheckedStoreFloat64:
1158 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1160 case kX64StackCheck:
1161 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1164 } // NOLINT(readability/fn_size)
1167 // Assembles branches after this instruction.
1168 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1169 X64OperandConverter i(this, instr);
1170 Label::Distance flabel_distance =
1171 branch->fallthru ? Label::kNear : Label::kFar;
1172 Label* tlabel = branch->true_label;
1173 Label* flabel = branch->false_label;
1174 switch (branch->condition) {
1175 case kUnorderedEqual:
1176 __ j(parity_even, flabel, flabel_distance);
1179 __ j(equal, tlabel);
1181 case kUnorderedNotEqual:
1182 __ j(parity_even, tlabel);
1185 __ j(not_equal, tlabel);
1187 case kSignedLessThan:
1190 case kSignedGreaterThanOrEqual:
1191 __ j(greater_equal, tlabel);
1193 case kSignedLessThanOrEqual:
1194 __ j(less_equal, tlabel);
1196 case kSignedGreaterThan:
1197 __ j(greater, tlabel);
1199 case kUnsignedLessThan:
1200 __ j(below, tlabel);
1202 case kUnsignedGreaterThanOrEqual:
1203 __ j(above_equal, tlabel);
1205 case kUnsignedLessThanOrEqual:
1206 __ j(below_equal, tlabel);
1208 case kUnsignedGreaterThan:
1209 __ j(above, tlabel);
1212 __ j(overflow, tlabel);
1215 __ j(no_overflow, tlabel);
1218 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1222 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1223 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1227 // Assembles boolean materializations after this instruction.
1228 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1229 FlagsCondition condition) {
1230 X64OperandConverter i(this, instr);
1233 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1234 // last output of the instruction.
1236 DCHECK_NE(0u, instr->OutputCount());
1237 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1238 Condition cc = no_condition;
1239 switch (condition) {
1240 case kUnorderedEqual:
1241 __ j(parity_odd, &check, Label::kNear);
1242 __ movl(reg, Immediate(0));
1243 __ jmp(&done, Label::kNear);
1248 case kUnorderedNotEqual:
1249 __ j(parity_odd, &check, Label::kNear);
1250 __ movl(reg, Immediate(1));
1251 __ jmp(&done, Label::kNear);
1256 case kSignedLessThan:
1259 case kSignedGreaterThanOrEqual:
1262 case kSignedLessThanOrEqual:
1265 case kSignedGreaterThan:
1268 case kUnsignedLessThan:
1271 case kUnsignedGreaterThanOrEqual:
1274 case kUnsignedLessThanOrEqual:
1277 case kUnsignedGreaterThan:
1289 __ movzxbl(reg, reg);
1294 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1295 X64OperandConverter i(this, instr);
1296 Register input = i.InputRegister(0);
1297 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1298 __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1299 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1301 AssembleArchJump(i.InputRpo(1));
1305 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1306 X64OperandConverter i(this, instr);
1307 Register input = i.InputRegister(0);
1308 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1309 Label** cases = zone()->NewArray<Label*>(case_count);
1310 for (int32_t index = 0; index < case_count; ++index) {
1311 cases[index] = GetLabel(i.InputRpo(index + 2));
1313 Label* const table = AddJumpTable(cases, case_count);
1314 __ cmpl(input, Immediate(case_count));
1315 __ j(above_equal, GetLabel(i.InputRpo(1)));
1316 __ leaq(kScratchRegister, Operand(table));
1317 __ jmp(Operand(kScratchRegister, input, times_8, 0));
1321 void CodeGenerator::AssembleDeoptimizerCall(
1322 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1323 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1324 isolate(), deoptimization_id, bailout_type);
1325 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1329 void CodeGenerator::AssemblePrologue() {
1330 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1331 int stack_slots = frame()->GetSpillSlotCount();
1332 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1335 const RegList saves = descriptor->CalleeSavedRegisters();
1336 if (saves != 0) { // Save callee-saved registers.
1337 int register_save_area_size = 0;
1338 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1339 if (!((1 << i) & saves)) continue;
1340 __ pushq(Register::from_code(i));
1341 register_save_area_size += kPointerSize;
1343 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1345 } else if (descriptor->IsJSFunctionCall()) {
1346 CompilationInfo* info = this->info();
1347 __ Prologue(info->IsCodePreAgingActive());
1348 frame()->SetRegisterSaveAreaSize(
1349 StandardFrameConstants::kFixedFrameSizeFromFp);
1350 } else if (stack_slots > 0) {
1352 frame()->SetRegisterSaveAreaSize(
1353 StandardFrameConstants::kFixedFrameSizeFromFp);
1356 if (info()->is_osr()) {
1357 // TurboFan OSR-compiled functions cannot be entered directly.
1358 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1360 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1361 // frame is still on the stack. Optimized code uses OSR values directly from
1362 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1363 // remaining stack slots.
1364 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1365 osr_pc_offset_ = __ pc_offset();
1366 // TODO(titzer): cannot address target function == local #-1
1367 __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1368 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1369 stack_slots -= frame()->GetOsrStackSlotCount();
1372 if (stack_slots > 0) {
1373 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1378 void CodeGenerator::AssembleReturn() {
1379 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1380 int stack_slots = frame()->GetSpillSlotCount();
1381 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1382 if (frame()->GetRegisterSaveAreaSize() > 0) {
1383 // Remove this frame's spill slots first.
1384 if (stack_slots > 0) {
1385 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1387 const RegList saves = descriptor->CalleeSavedRegisters();
1388 // Restore registers.
1390 for (int i = 0; i < Register::kNumRegisters; i++) {
1391 if (!((1 << i) & saves)) continue;
1392 __ popq(Register::from_code(i));
1395 __ popq(rbp); // Pop caller's frame pointer.
1398 // No saved registers.
1399 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1400 __ popq(rbp); // Pop caller's frame pointer.
1403 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1404 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1405 __ popq(rbp); // Pop caller's frame pointer.
1406 int pop_count = descriptor->IsJSFunctionCall()
1407 ? static_cast<int>(descriptor->JSParameterCount())
1409 __ ret(pop_count * kPointerSize);
1416 void CodeGenerator::AssembleMove(InstructionOperand* source,
1417 InstructionOperand* destination) {
1418 X64OperandConverter g(this, NULL);
1419 // Dispatch on the source and destination operand kinds. Not all
1420 // combinations are possible.
1421 if (source->IsRegister()) {
1422 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1423 Register src = g.ToRegister(source);
1424 if (destination->IsRegister()) {
1425 __ movq(g.ToRegister(destination), src);
1427 __ movq(g.ToOperand(destination), src);
1429 } else if (source->IsStackSlot()) {
1430 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1431 Operand src = g.ToOperand(source);
1432 if (destination->IsRegister()) {
1433 Register dst = g.ToRegister(destination);
1436 // Spill on demand to use a temporary register for memory-to-memory
1438 Register tmp = kScratchRegister;
1439 Operand dst = g.ToOperand(destination);
1443 } else if (source->IsConstant()) {
1444 ConstantOperand* constant_source = ConstantOperand::cast(source);
1445 Constant src = g.ToConstant(constant_source);
1446 if (destination->IsRegister() || destination->IsStackSlot()) {
1447 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1449 switch (src.type()) {
1450 case Constant::kInt32:
1451 // TODO(dcarney): don't need scratch in this case.
1452 __ Set(dst, src.ToInt32());
1454 case Constant::kInt64:
1455 __ Set(dst, src.ToInt64());
1457 case Constant::kFloat32:
1459 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1461 case Constant::kFloat64:
1463 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1465 case Constant::kExternalReference:
1466 __ Move(dst, src.ToExternalReference());
1468 case Constant::kHeapObject: {
1469 Handle<HeapObject> src_object = src.ToHeapObject();
1470 if (info()->IsOptimizing() &&
1471 src_object.is_identical_to(info()->context())) {
1472 // Loading the context from the frame is way cheaper than
1473 // materializing the actual context heap object address.
1474 __ movp(dst, Operand(rbp, StandardFrameConstants::kContextOffset));
1475 } else if (info()->IsOptimizing() &&
1476 src_object.is_identical_to(info()->closure())) {
1477 // Loading the JSFunction from the frame is way cheaper than
1478 // materializing the actual JSFunction heap object address.
1480 Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1482 __ Move(dst, src_object);
1486 case Constant::kRpoNumber:
1487 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
1490 if (destination->IsStackSlot()) {
1491 __ movq(g.ToOperand(destination), kScratchRegister);
1493 } else if (src.type() == Constant::kFloat32) {
1494 // TODO(turbofan): Can we do better here?
1495 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
1496 if (destination->IsDoubleRegister()) {
1497 __ Move(g.ToDoubleRegister(destination), src_const);
1499 DCHECK(destination->IsDoubleStackSlot());
1500 Operand dst = g.ToOperand(destination);
1501 __ movl(dst, Immediate(src_const));
1504 DCHECK_EQ(Constant::kFloat64, src.type());
1505 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1506 if (destination->IsDoubleRegister()) {
1507 __ Move(g.ToDoubleRegister(destination), src_const);
1509 DCHECK(destination->IsDoubleStackSlot());
1510 __ movq(kScratchRegister, src_const);
1511 __ movq(g.ToOperand(destination), kScratchRegister);
1514 } else if (source->IsDoubleRegister()) {
1515 XMMRegister src = g.ToDoubleRegister(source);
1516 if (destination->IsDoubleRegister()) {
1517 XMMRegister dst = g.ToDoubleRegister(destination);
1518 __ movaps(dst, src);
1520 DCHECK(destination->IsDoubleStackSlot());
1521 Operand dst = g.ToOperand(destination);
1524 } else if (source->IsDoubleStackSlot()) {
1525 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1526 Operand src = g.ToOperand(source);
1527 if (destination->IsDoubleRegister()) {
1528 XMMRegister dst = g.ToDoubleRegister(destination);
1531 // We rely on having xmm0 available as a fixed scratch register.
1532 Operand dst = g.ToOperand(destination);
1533 __ movsd(xmm0, src);
1534 __ movsd(dst, xmm0);
1542 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1543 InstructionOperand* destination) {
1544 X64OperandConverter g(this, NULL);
1545 // Dispatch on the source and destination operand kinds. Not all
1546 // combinations are possible.
1547 if (source->IsRegister() && destination->IsRegister()) {
1548 // Register-register.
1549 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1550 } else if (source->IsRegister() && destination->IsStackSlot()) {
1551 Register src = g.ToRegister(source);
1552 Operand dst = g.ToOperand(destination);
1554 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1555 (source->IsDoubleStackSlot() &&
1556 destination->IsDoubleStackSlot())) {
1558 Register tmp = kScratchRegister;
1559 Operand src = g.ToOperand(source);
1560 Operand dst = g.ToOperand(destination);
1564 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1565 // XMM register-register swap. We rely on having xmm0
1566 // available as a fixed scratch register.
1567 XMMRegister src = g.ToDoubleRegister(source);
1568 XMMRegister dst = g.ToDoubleRegister(destination);
1569 __ movaps(xmm0, src);
1570 __ movaps(src, dst);
1571 __ movaps(dst, xmm0);
1572 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
1573 // XMM register-memory swap. We rely on having xmm0
1574 // available as a fixed scratch register.
1575 XMMRegister src = g.ToDoubleRegister(source);
1576 Operand dst = g.ToOperand(destination);
1577 __ movsd(xmm0, src);
1579 __ movsd(dst, xmm0);
1581 // No other combinations are possible.
1587 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1588 for (size_t index = 0; index < target_count; ++index) {
1589 __ dq(targets[index]);
1594 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1597 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1598 int space_needed = Deoptimizer::patch_size();
1599 if (!info()->IsStub()) {
1600 // Ensure that we have enough space after the previous lazy-bailout
1601 // instruction for patching the code here.
1602 int current_pc = masm()->pc_offset();
1603 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1604 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1605 __ Nop(padding_size);
1608 MarkLazyDeoptSite();
1613 } // namespace internal
1614 } // namespace compiler