1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
21 // Adds Arm64-specific methods to convert InstructionOperands.
22 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
24 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
25 : InstructionOperandConverter(gen, instr) {}
27 Register InputRegister32(int index) {
28 return ToRegister(instr_->InputAt(index)).W();
31 Register InputRegister64(int index) { return InputRegister(index); }
33 Operand InputImmediate(int index) {
34 return ToImmediate(instr_->InputAt(index));
37 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
39 Operand InputOperand64(int index) { return InputOperand(index); }
41 Operand InputOperand32(int index) {
42 return ToOperand32(instr_->InputAt(index));
45 Register OutputRegister64() { return OutputRegister(); }
47 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
49 Operand InputOperand2_32(int index) {
50 switch (AddressingModeField::decode(instr_->opcode())) {
52 return InputOperand32(index);
53 case kMode_Operand2_R_LSL_I:
54 return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
55 case kMode_Operand2_R_LSR_I:
56 return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
57 case kMode_Operand2_R_ASR_I:
58 return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
59 case kMode_Operand2_R_ROR_I:
60 return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
69 Operand InputOperand2_64(int index) {
70 switch (AddressingModeField::decode(instr_->opcode())) {
72 return InputOperand64(index);
73 case kMode_Operand2_R_LSL_I:
74 return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
75 case kMode_Operand2_R_LSR_I:
76 return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
77 case kMode_Operand2_R_ASR_I:
78 return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
79 case kMode_Operand2_R_ROR_I:
80 return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
89 MemOperand MemoryOperand(int* first_index) {
90 const int index = *first_index;
91 switch (AddressingModeField::decode(instr_->opcode())) {
93 case kMode_Operand2_R_LSL_I:
94 case kMode_Operand2_R_LSR_I:
95 case kMode_Operand2_R_ASR_I:
96 case kMode_Operand2_R_ROR_I:
100 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
103 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
107 return MemOperand(no_reg);
110 MemOperand MemoryOperand() {
112 return MemoryOperand(&index);
115 Operand ToOperand(InstructionOperand* op) {
116 if (op->IsRegister()) {
117 return Operand(ToRegister(op));
119 return ToImmediate(op);
122 Operand ToOperand32(InstructionOperand* op) {
123 if (op->IsRegister()) {
124 return Operand(ToRegister(op).W());
126 return ToImmediate(op);
129 Operand ToImmediate(InstructionOperand* operand) {
130 Constant constant = ToConstant(operand);
131 switch (constant.type()) {
132 case Constant::kInt32:
133 return Operand(constant.ToInt32());
134 case Constant::kInt64:
135 return Operand(constant.ToInt64());
136 case Constant::kFloat32:
138 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
139 case Constant::kFloat64:
141 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
142 case Constant::kExternalReference:
143 return Operand(constant.ToExternalReference());
144 case Constant::kHeapObject:
145 return Operand(constant.ToHeapObject());
151 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
153 DCHECK(!op->IsRegister());
154 DCHECK(!op->IsDoubleRegister());
155 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
156 // The linkage computes where all spill slots are located.
157 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
158 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
164 #define ASSEMBLE_SHIFT(asm_instr, width) \
166 if (instr->InputAt(1)->IsRegister()) { \
167 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
168 i.InputRegister##width(1)); \
170 int64_t imm = i.InputOperand##width(1).immediate().value(); \
171 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
176 #define ASSEMBLE_TEST_AND_BRANCH(asm_instr, width) \
178 bool fallthrough = IsNextInAssemblyOrder(i.InputRpo(3)); \
179 __ asm_instr(i.InputRegister##width(0), i.InputInt6(1), \
180 code_->GetLabel(i.InputRpo(2))); \
181 if (!fallthrough) __ B(code_->GetLabel(i.InputRpo(3))); \
185 // Assembles an instruction after register allocation, producing machine code.
186 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
187 Arm64OperandConverter i(this, instr);
188 InstructionCode opcode = instr->opcode();
189 switch (ArchOpcodeField::decode(opcode)) {
190 case kArchCallCodeObject: {
191 EnsureSpaceForLazyDeopt();
192 if (instr->InputAt(0)->IsImmediate()) {
193 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
194 RelocInfo::CODE_TARGET);
196 Register target = i.InputRegister(0);
197 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
200 AddSafepointAndDeopt(instr);
203 case kArchCallJSFunction: {
204 EnsureSpaceForLazyDeopt();
205 Register func = i.InputRegister(0);
206 if (FLAG_debug_code) {
207 // Check the function's context matches the context argument.
208 UseScratchRegisterScope scope(masm());
209 Register temp = scope.AcquireX();
210 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
212 __ Assert(eq, kWrongFunctionContext);
214 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
216 AddSafepointAndDeopt(instr);
220 __ B(code_->GetLabel(i.InputRpo(0)));
223 // don't emit code for nops.
228 case kArchStackPointer:
229 __ mov(i.OutputRegister(), masm()->StackPointer());
231 case kArchTruncateDoubleToI:
232 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
234 case kArm64Float64Ceil:
235 __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
237 case kArm64Float64Floor:
238 __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
240 case kArm64Float64RoundTruncate:
241 __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
243 case kArm64Float64RoundTiesAway:
244 __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
247 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
250 if (FlagsModeField::decode(opcode) != kFlags_none) {
251 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
252 i.InputOperand2_32(1));
254 __ Add(i.OutputRegister32(), i.InputRegister32(0),
255 i.InputOperand2_32(1));
259 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
262 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
265 __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
268 __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
271 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
274 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
277 __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
280 __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
283 __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
287 __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
288 i.InputRegister32(2));
291 __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
295 __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
296 i.InputRegister32(2));
299 __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
302 __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
305 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
308 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
311 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
314 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
317 UseScratchRegisterScope scope(masm());
318 Register temp = scope.AcquireX();
319 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
320 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
324 UseScratchRegisterScope scope(masm());
325 Register temp = scope.AcquireW();
326 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
327 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
328 i.InputRegister32(0));
332 UseScratchRegisterScope scope(masm());
333 Register temp = scope.AcquireX();
334 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
335 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
339 UseScratchRegisterScope scope(masm());
340 Register temp = scope.AcquireW();
341 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
342 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
343 i.InputRegister32(0));
346 // TODO(dcarney): use mvn instr??
348 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
351 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
354 __ Neg(i.OutputRegister(), i.InputOperand(0));
357 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
360 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
363 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
366 __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
369 __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
372 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
375 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
378 __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
381 __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
384 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
387 if (FlagsModeField::decode(opcode) != kFlags_none) {
388 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
389 i.InputOperand2_32(1));
391 __ Sub(i.OutputRegister32(), i.InputRegister32(0),
392 i.InputOperand2_32(1));
396 ASSEMBLE_SHIFT(Lsl, 64);
399 ASSEMBLE_SHIFT(Lsl, 32);
402 ASSEMBLE_SHIFT(Lsr, 64);
405 ASSEMBLE_SHIFT(Lsr, 32);
408 ASSEMBLE_SHIFT(Asr, 64);
411 ASSEMBLE_SHIFT(Asr, 32);
414 ASSEMBLE_SHIFT(Ror, 64);
417 ASSEMBLE_SHIFT(Ror, 32);
420 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
423 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
426 __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
430 __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
434 ASSEMBLE_TEST_AND_BRANCH(Tbz, 64);
437 ASSEMBLE_TEST_AND_BRANCH(Tbz, 32);
440 ASSEMBLE_TEST_AND_BRANCH(Tbnz, 64);
443 ASSEMBLE_TEST_AND_BRANCH(Tbnz, 32);
446 int words = MiscField::decode(instr->opcode());
451 int slot = MiscField::decode(instr->opcode());
452 Operand operand(slot * kPointerSize);
453 __ Poke(i.InputRegister(0), operand);
456 case kArm64PokePairZero: {
457 // TODO(dcarney): test slot offset and register order.
458 int slot = MiscField::decode(instr->opcode()) - 1;
459 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
462 case kArm64PokePair: {
463 int slot = MiscField::decode(instr->opcode()) - 1;
464 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
468 __ Cmp(i.InputRegister(0), i.InputOperand(1));
471 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
474 __ Cmn(i.InputRegister(0), i.InputOperand(1));
477 __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
480 __ Tst(i.InputRegister(0), i.InputOperand(1));
483 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
485 case kArm64Float64Cmp:
486 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
488 case kArm64Float64Add:
489 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
490 i.InputDoubleRegister(1));
492 case kArm64Float64Sub:
493 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
494 i.InputDoubleRegister(1));
496 case kArm64Float64Mul:
497 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
498 i.InputDoubleRegister(1));
500 case kArm64Float64Div:
501 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
502 i.InputDoubleRegister(1));
504 case kArm64Float64Mod: {
505 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
506 FrameScope scope(masm(), StackFrame::MANUAL);
507 DCHECK(d0.is(i.InputDoubleRegister(0)));
508 DCHECK(d1.is(i.InputDoubleRegister(1)));
509 DCHECK(d0.is(i.OutputDoubleRegister()));
510 // TODO(dcarney): make sure this saves all relevant registers.
511 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
515 case kArm64Float64Sqrt:
516 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
518 case kArm64Float32ToFloat64:
519 __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
521 case kArm64Float64ToFloat32:
522 __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
524 case kArm64Float64ToInt32:
525 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
527 case kArm64Float64ToUint32:
528 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
530 case kArm64Int32ToFloat64:
531 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
533 case kArm64Uint32ToFloat64:
534 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
537 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
540 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
543 __ Strb(i.InputRegister(2), i.MemoryOperand());
546 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
549 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
552 __ Strh(i.InputRegister(2), i.MemoryOperand());
555 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
558 __ Str(i.InputRegister32(2), i.MemoryOperand());
561 __ Ldr(i.OutputRegister(), i.MemoryOperand());
564 __ Str(i.InputRegister(2), i.MemoryOperand());
567 __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
570 __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
573 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
576 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
578 case kArm64StoreWriteBarrier: {
579 Register object = i.InputRegister(0);
580 Register index = i.InputRegister(1);
581 Register value = i.InputRegister(2);
582 __ Add(index, object, Operand(index, SXTW));
583 __ Str(value, MemOperand(index));
584 SaveFPRegsMode mode =
585 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
586 // TODO(dcarney): we shouldn't test write barriers from c calls.
587 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
588 UseScratchRegisterScope scope(masm());
589 Register temp = no_reg;
590 if (csp.is(masm()->StackPointer())) {
591 temp = scope.AcquireX();
592 lr_status = kLRHasBeenSaved;
593 __ Push(lr, temp); // Need to push a pair
595 __ RecordWrite(object, index, value, lr_status, mode);
596 if (csp.is(masm()->StackPointer())) {
605 // Assemble branches after this instruction.
606 void CodeGenerator::AssembleArchBranch(Instruction* instr,
607 FlagsCondition condition) {
608 Arm64OperandConverter i(this, instr);
611 // Emit a branch. The true and false targets are always the last two inputs
612 // to the instruction.
613 BasicBlock::RpoNumber tblock =
614 i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
615 BasicBlock::RpoNumber fblock =
616 i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
617 bool fallthru = IsNextInAssemblyOrder(fblock);
618 Label* tlabel = code()->GetLabel(tblock);
619 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
621 case kUnorderedEqual:
627 case kUnorderedNotEqual:
633 case kSignedLessThan:
636 case kSignedGreaterThanOrEqual:
639 case kSignedLessThanOrEqual:
642 case kSignedGreaterThan:
645 case kUnorderedLessThan:
648 case kUnsignedLessThan:
651 case kUnorderedGreaterThanOrEqual:
654 case kUnsignedGreaterThanOrEqual:
657 case kUnorderedLessThanOrEqual:
660 case kUnsignedLessThanOrEqual:
663 case kUnorderedGreaterThan:
666 case kUnsignedGreaterThan:
676 if (!fallthru) __ B(flabel); // no fallthru to flabel.
681 // Assemble boolean materializations after this instruction.
682 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
683 FlagsCondition condition) {
684 Arm64OperandConverter i(this, instr);
687 // Materialize a full 64-bit 1 or 0 value. The result register is always the
688 // last output of the instruction.
690 DCHECK_NE(0, instr->OutputCount());
691 Register reg = i.OutputRegister(instr->OutputCount() - 1);
694 case kUnorderedEqual:
702 case kUnorderedNotEqual:
710 case kSignedLessThan:
713 case kSignedGreaterThanOrEqual:
716 case kSignedLessThanOrEqual:
719 case kSignedGreaterThan:
722 case kUnorderedLessThan:
727 case kUnsignedLessThan:
730 case kUnorderedGreaterThanOrEqual:
735 case kUnsignedGreaterThanOrEqual:
738 case kUnorderedLessThanOrEqual:
743 case kUnsignedLessThanOrEqual:
746 case kUnorderedGreaterThan:
751 case kUnsignedGreaterThan:
767 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
768 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
769 isolate(), deoptimization_id, Deoptimizer::LAZY);
770 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
774 // TODO(dcarney): increase stack slots in frame once before first use.
775 static int AlignedStackSlots(int stack_slots) {
776 if (stack_slots & 1) stack_slots++;
781 void CodeGenerator::AssemblePrologue() {
782 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
783 if (descriptor->kind() == CallDescriptor::kCallAddress) {
784 __ SetStackPointer(csp);
787 // TODO(dcarney): correct callee saved registers.
788 __ PushCalleeSavedRegisters();
789 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
790 } else if (descriptor->IsJSFunctionCall()) {
791 CompilationInfo* info = this->info();
792 __ SetStackPointer(jssp);
793 __ Prologue(info->IsCodePreAgingActive());
794 frame()->SetRegisterSaveAreaSize(
795 StandardFrameConstants::kFixedFrameSizeFromFp);
797 // Sloppy mode functions and builtins need to replace the receiver with the
798 // global proxy when called as functions (without an explicit receiver
800 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
801 if (info->strict_mode() == SLOPPY && !info->is_native()) {
803 // +2 for return address and saved frame pointer.
804 int receiver_slot = info->scope()->num_parameters() + 2;
805 __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
806 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
807 __ Ldr(x10, GlobalObjectMemOperand());
808 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
809 __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
814 __ SetStackPointer(jssp);
816 frame()->SetRegisterSaveAreaSize(
817 StandardFrameConstants::kFixedFrameSizeFromFp);
819 int stack_slots = frame()->GetSpillSlotCount();
820 if (stack_slots > 0) {
821 Register sp = __ StackPointer();
823 __ Sub(sp, sp, stack_slots * kPointerSize);
825 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
830 void CodeGenerator::AssembleReturn() {
831 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
832 if (descriptor->kind() == CallDescriptor::kCallAddress) {
833 if (frame()->GetRegisterSaveAreaSize() > 0) {
834 // Remove this frame's spill slots first.
835 int stack_slots = frame()->GetSpillSlotCount();
836 if (stack_slots > 0) {
837 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
839 // Restore registers.
840 // TODO(dcarney): correct callee saved registers.
841 __ PopCalleeSavedRegisters();
849 int pop_count = descriptor->IsJSFunctionCall()
850 ? static_cast<int>(descriptor->JSParameterCount())
858 void CodeGenerator::AssembleMove(InstructionOperand* source,
859 InstructionOperand* destination) {
860 Arm64OperandConverter g(this, NULL);
861 // Dispatch on the source and destination operand kinds. Not all
862 // combinations are possible.
863 if (source->IsRegister()) {
864 DCHECK(destination->IsRegister() || destination->IsStackSlot());
865 Register src = g.ToRegister(source);
866 if (destination->IsRegister()) {
867 __ Mov(g.ToRegister(destination), src);
869 __ Str(src, g.ToMemOperand(destination, masm()));
871 } else if (source->IsStackSlot()) {
872 MemOperand src = g.ToMemOperand(source, masm());
873 DCHECK(destination->IsRegister() || destination->IsStackSlot());
874 if (destination->IsRegister()) {
875 __ Ldr(g.ToRegister(destination), src);
877 UseScratchRegisterScope scope(masm());
878 Register temp = scope.AcquireX();
880 __ Str(temp, g.ToMemOperand(destination, masm()));
882 } else if (source->IsConstant()) {
883 Constant src = g.ToConstant(ConstantOperand::cast(source));
884 if (destination->IsRegister() || destination->IsStackSlot()) {
885 UseScratchRegisterScope scope(masm());
886 Register dst = destination->IsRegister() ? g.ToRegister(destination)
888 if (src.type() == Constant::kHeapObject) {
889 __ LoadObject(dst, src.ToHeapObject());
891 __ Mov(dst, g.ToImmediate(source));
893 if (destination->IsStackSlot()) {
894 __ Str(dst, g.ToMemOperand(destination, masm()));
896 } else if (src.type() == Constant::kFloat32) {
897 if (destination->IsDoubleRegister()) {
898 FPRegister dst = g.ToDoubleRegister(destination).S();
899 __ Fmov(dst, src.ToFloat32());
901 DCHECK(destination->IsDoubleStackSlot());
902 UseScratchRegisterScope scope(masm());
903 FPRegister temp = scope.AcquireS();
904 __ Fmov(temp, src.ToFloat32());
905 __ Str(temp, g.ToMemOperand(destination, masm()));
908 DCHECK_EQ(Constant::kFloat64, src.type());
909 if (destination->IsDoubleRegister()) {
910 FPRegister dst = g.ToDoubleRegister(destination);
911 __ Fmov(dst, src.ToFloat64());
913 DCHECK(destination->IsDoubleStackSlot());
914 UseScratchRegisterScope scope(masm());
915 FPRegister temp = scope.AcquireD();
916 __ Fmov(temp, src.ToFloat64());
917 __ Str(temp, g.ToMemOperand(destination, masm()));
920 } else if (source->IsDoubleRegister()) {
921 FPRegister src = g.ToDoubleRegister(source);
922 if (destination->IsDoubleRegister()) {
923 FPRegister dst = g.ToDoubleRegister(destination);
926 DCHECK(destination->IsDoubleStackSlot());
927 __ Str(src, g.ToMemOperand(destination, masm()));
929 } else if (source->IsDoubleStackSlot()) {
930 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
931 MemOperand src = g.ToMemOperand(source, masm());
932 if (destination->IsDoubleRegister()) {
933 __ Ldr(g.ToDoubleRegister(destination), src);
935 UseScratchRegisterScope scope(masm());
936 FPRegister temp = scope.AcquireD();
938 __ Str(temp, g.ToMemOperand(destination, masm()));
946 void CodeGenerator::AssembleSwap(InstructionOperand* source,
947 InstructionOperand* destination) {
948 Arm64OperandConverter g(this, NULL);
949 // Dispatch on the source and destination operand kinds. Not all
950 // combinations are possible.
951 if (source->IsRegister()) {
952 // Register-register.
953 UseScratchRegisterScope scope(masm());
954 Register temp = scope.AcquireX();
955 Register src = g.ToRegister(source);
956 if (destination->IsRegister()) {
957 Register dst = g.ToRegister(destination);
962 DCHECK(destination->IsStackSlot());
963 MemOperand dst = g.ToMemOperand(destination, masm());
968 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
969 UseScratchRegisterScope scope(masm());
970 CPURegister temp_0 = scope.AcquireX();
971 CPURegister temp_1 = scope.AcquireX();
972 MemOperand src = g.ToMemOperand(source, masm());
973 MemOperand dst = g.ToMemOperand(destination, masm());
978 } else if (source->IsDoubleRegister()) {
979 UseScratchRegisterScope scope(masm());
980 FPRegister temp = scope.AcquireD();
981 FPRegister src = g.ToDoubleRegister(source);
982 if (destination->IsDoubleRegister()) {
983 FPRegister dst = g.ToDoubleRegister(destination);
988 DCHECK(destination->IsDoubleStackSlot());
989 MemOperand dst = g.ToMemOperand(destination, masm());
995 // No other combinations are possible.
1001 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
1004 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1005 int space_needed = Deoptimizer::patch_size();
1006 if (!info()->IsStub()) {
1007 // Ensure that we have enough space after the previous lazy-bailout
1008 // instruction for patching the code here.
1009 intptr_t current_pc = masm()->pc_offset();
1011 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1012 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1013 DCHECK((padding_size % kInstructionSize) == 0);
1014 InstructionAccurateScope instruction_accurate(
1015 masm(), padding_size / kInstructionSize);
1017 while (padding_size > 0) {
1019 padding_size -= kInstructionSize;
1023 MarkLazyDeoptSite();
1028 } // namespace compiler
1029 } // namespace internal