1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/scopes.h"
20 // Adds Arm64-specific methods to convert InstructionOperands.
21 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
23 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
24 : InstructionOperandConverter(gen, instr) {}
26 DoubleRegister InputFloat32Register(size_t index) {
27 return InputDoubleRegister(index).S();
30 DoubleRegister InputFloat64Register(size_t index) {
31 return InputDoubleRegister(index);
34 DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
36 DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
38 Register InputRegister32(size_t index) {
39 return ToRegister(instr_->InputAt(index)).W();
42 Register InputRegister64(size_t index) { return InputRegister(index); }
44 Operand InputImmediate(size_t index) {
45 return ToImmediate(instr_->InputAt(index));
48 Operand InputOperand(size_t index) {
49 return ToOperand(instr_->InputAt(index));
52 Operand InputOperand64(size_t index) { return InputOperand(index); }
54 Operand InputOperand32(size_t index) {
55 return ToOperand32(instr_->InputAt(index));
58 Register OutputRegister64() { return OutputRegister(); }
60 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
62 Operand InputOperand2_32(size_t index) {
63 switch (AddressingModeField::decode(instr_->opcode())) {
65 return InputOperand32(index);
66 case kMode_Operand2_R_LSL_I:
67 return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
68 case kMode_Operand2_R_LSR_I:
69 return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
70 case kMode_Operand2_R_ASR_I:
71 return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
72 case kMode_Operand2_R_ROR_I:
73 return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
74 case kMode_Operand2_R_UXTB:
75 return Operand(InputRegister32(index), UXTB);
76 case kMode_Operand2_R_UXTH:
77 return Operand(InputRegister32(index), UXTH);
86 Operand InputOperand2_64(size_t index) {
87 switch (AddressingModeField::decode(instr_->opcode())) {
89 return InputOperand64(index);
90 case kMode_Operand2_R_LSL_I:
91 return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
92 case kMode_Operand2_R_LSR_I:
93 return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
94 case kMode_Operand2_R_ASR_I:
95 return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
96 case kMode_Operand2_R_ROR_I:
97 return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
98 case kMode_Operand2_R_UXTB:
99 return Operand(InputRegister64(index), UXTB);
100 case kMode_Operand2_R_UXTH:
101 return Operand(InputRegister64(index), UXTH);
110 MemOperand MemoryOperand(size_t* first_index) {
111 const size_t index = *first_index;
112 switch (AddressingModeField::decode(instr_->opcode())) {
114 case kMode_Operand2_R_LSL_I:
115 case kMode_Operand2_R_LSR_I:
116 case kMode_Operand2_R_ASR_I:
117 case kMode_Operand2_R_ROR_I:
118 case kMode_Operand2_R_UXTB:
119 case kMode_Operand2_R_UXTH:
123 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
126 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
129 return MemOperand(no_reg);
132 MemOperand MemoryOperand(size_t first_index = 0) {
133 return MemoryOperand(&first_index);
136 Operand ToOperand(InstructionOperand* op) {
137 if (op->IsRegister()) {
138 return Operand(ToRegister(op));
140 return ToImmediate(op);
143 Operand ToOperand32(InstructionOperand* op) {
144 if (op->IsRegister()) {
145 return Operand(ToRegister(op).W());
147 return ToImmediate(op);
150 Operand ToImmediate(InstructionOperand* operand) {
151 Constant constant = ToConstant(operand);
152 switch (constant.type()) {
153 case Constant::kInt32:
154 return Operand(constant.ToInt32());
155 case Constant::kInt64:
156 return Operand(constant.ToInt64());
157 case Constant::kFloat32:
159 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
160 case Constant::kFloat64:
162 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
163 case Constant::kExternalReference:
164 return Operand(constant.ToExternalReference());
165 case Constant::kHeapObject:
166 return Operand(constant.ToHeapObject());
167 case Constant::kRpoNumber:
168 UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
175 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
177 DCHECK(!op->IsRegister());
178 DCHECK(!op->IsDoubleRegister());
179 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
180 // The linkage computes where all spill slots are located.
181 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
182 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
190 class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
192 OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
193 : OutOfLineCode(gen), result_(result) {}
195 void Generate() FINAL {
196 __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
200 DoubleRegister const result_;
204 class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
206 OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
207 : OutOfLineCode(gen), result_(result) {}
209 void Generate() FINAL {
210 __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
214 DoubleRegister const result_;
218 class OutOfLineLoadZero FINAL : public OutOfLineCode {
220 OutOfLineLoadZero(CodeGenerator* gen, Register result)
221 : OutOfLineCode(gen), result_(result) {}
223 void Generate() FINAL { __ Mov(result_, 0); }
226 Register const result_;
230 Condition FlagsConditionToCondition(FlagsCondition condition) {
236 case kSignedLessThan:
238 case kSignedGreaterThanOrEqual:
240 case kSignedLessThanOrEqual:
242 case kSignedGreaterThan:
244 case kUnsignedLessThan:
246 case kUnsignedGreaterThanOrEqual:
248 case kUnsignedLessThanOrEqual:
250 case kUnsignedGreaterThan:
256 case kUnorderedEqual:
257 case kUnorderedNotEqual:
267 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
269 auto result = i.OutputFloat##width##Register(); \
270 auto buffer = i.InputRegister(0); \
271 auto offset = i.InputRegister32(1); \
272 auto length = i.InputOperand32(2); \
273 __ Cmp(offset, length); \
274 auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
275 __ B(hs, ool->entry()); \
276 __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
277 __ Bind(ool->exit()); \
281 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
283 auto result = i.OutputRegister32(); \
284 auto buffer = i.InputRegister(0); \
285 auto offset = i.InputRegister32(1); \
286 auto length = i.InputOperand32(2); \
287 __ Cmp(offset, length); \
288 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
289 __ B(hs, ool->entry()); \
290 __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
291 __ Bind(ool->exit()); \
295 #define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
297 auto buffer = i.InputRegister(0); \
298 auto offset = i.InputRegister32(1); \
299 auto length = i.InputOperand32(2); \
300 auto value = i.InputFloat##width##Register(3); \
301 __ Cmp(offset, length); \
304 __ Str(value, MemOperand(buffer, offset, UXTW)); \
309 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
311 auto buffer = i.InputRegister(0); \
312 auto offset = i.InputRegister32(1); \
313 auto length = i.InputOperand32(2); \
314 auto value = i.InputRegister32(3); \
315 __ Cmp(offset, length); \
318 __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
323 #define ASSEMBLE_SHIFT(asm_instr, width) \
325 if (instr->InputAt(1)->IsRegister()) { \
326 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
327 i.InputRegister##width(1)); \
329 int64_t imm = i.InputOperand##width(1).immediate().value(); \
330 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
335 // Assembles an instruction after register allocation, producing machine code.
336 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
337 Arm64OperandConverter i(this, instr);
338 InstructionCode opcode = instr->opcode();
339 switch (ArchOpcodeField::decode(opcode)) {
340 case kArchCallCodeObject: {
341 EnsureSpaceForLazyDeopt();
342 if (instr->InputAt(0)->IsImmediate()) {
343 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
344 RelocInfo::CODE_TARGET);
346 Register target = i.InputRegister(0);
347 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
350 RecordCallPosition(instr);
353 case kArchCallJSFunction: {
354 EnsureSpaceForLazyDeopt();
355 Register func = i.InputRegister(0);
356 if (FLAG_debug_code) {
357 // Check the function's context matches the context argument.
358 UseScratchRegisterScope scope(masm());
359 Register temp = scope.AcquireX();
360 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
362 __ Assert(eq, kWrongFunctionContext);
364 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
366 RecordCallPosition(instr);
370 AssembleArchJump(i.InputRpo(0));
372 case kArchTableSwitch:
373 AssembleArchTableSwitch(instr);
375 case kArchLookupSwitch:
376 AssembleArchLookupSwitch(instr);
379 // don't emit code for nops.
381 case kArchDeoptimize: {
383 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
384 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
390 case kArchStackPointer:
391 __ mov(i.OutputRegister(), masm()->StackPointer());
393 case kArchTruncateDoubleToI:
394 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
396 case kArm64Float64RoundDown:
397 __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
399 case kArm64Float64RoundTiesAway:
400 __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
402 case kArm64Float64RoundTruncate:
403 __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
405 case kArm64Float64RoundUp:
406 __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
409 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
412 if (FlagsModeField::decode(opcode) != kFlags_none) {
413 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
414 i.InputOperand2_32(1));
416 __ Add(i.OutputRegister32(), i.InputRegister32(0),
417 i.InputOperand2_32(1));
421 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
424 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
427 __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
430 __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
433 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
436 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
439 __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
442 __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
445 __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
449 __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
450 i.InputRegister32(2));
453 __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
457 __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
458 i.InputRegister32(2));
461 __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
464 __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
467 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
470 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
473 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
476 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
479 UseScratchRegisterScope scope(masm());
480 Register temp = scope.AcquireX();
481 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
482 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
486 UseScratchRegisterScope scope(masm());
487 Register temp = scope.AcquireW();
488 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
489 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
490 i.InputRegister32(0));
494 UseScratchRegisterScope scope(masm());
495 Register temp = scope.AcquireX();
496 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
497 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
501 UseScratchRegisterScope scope(masm());
502 Register temp = scope.AcquireW();
503 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
504 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
505 i.InputRegister32(0));
508 // TODO(dcarney): use mvn instr??
510 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
513 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
516 __ Neg(i.OutputRegister(), i.InputOperand(0));
519 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
522 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
525 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
528 __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
531 __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
534 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
537 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
540 __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
543 __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
546 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
549 if (FlagsModeField::decode(opcode) != kFlags_none) {
550 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
551 i.InputOperand2_32(1));
553 __ Sub(i.OutputRegister32(), i.InputRegister32(0),
554 i.InputOperand2_32(1));
558 ASSEMBLE_SHIFT(Lsl, 64);
561 ASSEMBLE_SHIFT(Lsl, 32);
564 ASSEMBLE_SHIFT(Lsr, 64);
567 ASSEMBLE_SHIFT(Lsr, 32);
570 ASSEMBLE_SHIFT(Asr, 64);
573 ASSEMBLE_SHIFT(Asr, 32);
576 ASSEMBLE_SHIFT(Ror, 64);
579 ASSEMBLE_SHIFT(Ror, 32);
582 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
585 __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
588 __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
591 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
594 __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
598 __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
602 __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
605 case kArm64TestAndBranch32:
606 case kArm64TestAndBranch:
607 // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
609 case kArm64CompareAndBranch32:
610 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
613 __ Claim(i.InputInt32(0));
617 Operand operand(i.InputInt32(1) * kPointerSize);
618 __ Poke(i.InputRegister(0), operand);
621 case kArm64PokePair: {
622 int slot = i.InputInt32(2) - 1;
623 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
627 __ Clz(i.OutputRegister32(), i.InputRegister32(0));
630 __ Cmp(i.InputRegister(0), i.InputOperand(1));
633 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
636 __ Cmn(i.InputRegister(0), i.InputOperand(1));
639 __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
642 __ Tst(i.InputRegister(0), i.InputOperand(1));
645 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
647 case kArm64Float64Cmp:
648 if (instr->InputAt(1)->IsDoubleRegister()) {
649 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
651 DCHECK(instr->InputAt(1)->IsImmediate());
652 // 0.0 is the only immediate supported by fcmp instructions.
653 DCHECK(i.InputDouble(1) == 0.0);
654 __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
657 case kArm64Float64Add:
658 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
659 i.InputDoubleRegister(1));
661 case kArm64Float64Sub:
662 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
663 i.InputDoubleRegister(1));
665 case kArm64Float64Mul:
666 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
667 i.InputDoubleRegister(1));
669 case kArm64Float64Div:
670 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
671 i.InputDoubleRegister(1));
673 case kArm64Float64Mod: {
674 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
675 FrameScope scope(masm(), StackFrame::MANUAL);
676 DCHECK(d0.is(i.InputDoubleRegister(0)));
677 DCHECK(d1.is(i.InputDoubleRegister(1)));
678 DCHECK(d0.is(i.OutputDoubleRegister()));
679 // TODO(dcarney): make sure this saves all relevant registers.
680 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
684 case kArm64Float64Sqrt:
685 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
687 case kArm64Float32ToFloat64:
688 __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
690 case kArm64Float64ToFloat32:
691 __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
693 case kArm64Float64ToInt32:
694 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
696 case kArm64Float64ToUint32:
697 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
699 case kArm64Int32ToFloat64:
700 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
702 case kArm64Uint32ToFloat64:
703 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
705 case kArm64Float64ExtractLowWord32:
706 __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
708 case kArm64Float64ExtractHighWord32:
709 // TODO(arm64): This should use MOV (to general) when NEON is supported.
710 __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
711 __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
713 case kArm64Float64InsertLowWord32: {
714 // TODO(arm64): This should use MOV (from general) when NEON is supported.
715 UseScratchRegisterScope scope(masm());
716 Register tmp = scope.AcquireX();
717 __ Fmov(tmp, i.InputFloat64Register(0));
718 __ Bfi(tmp, i.InputRegister(1), 0, 32);
719 __ Fmov(i.OutputFloat64Register(), tmp);
722 case kArm64Float64InsertHighWord32: {
723 // TODO(arm64): This should use MOV (from general) when NEON is supported.
724 UseScratchRegisterScope scope(masm());
725 Register tmp = scope.AcquireX();
726 __ Fmov(tmp.W(), i.InputFloat32Register(0));
727 __ Bfi(tmp, i.InputRegister(1), 32, 32);
728 __ Fmov(i.OutputFloat64Register(), tmp);
731 case kArm64Float64MoveU64: {
732 __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
735 case kArm64Float64Max:
736 __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
737 i.InputDoubleRegister(1));
739 case kArm64Float64Min:
740 __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
741 i.InputDoubleRegister(1));
744 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
747 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
750 __ Strb(i.InputRegister(2), i.MemoryOperand());
753 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
756 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
759 __ Strh(i.InputRegister(2), i.MemoryOperand());
762 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
765 __ Str(i.InputRegister32(2), i.MemoryOperand());
768 __ Ldr(i.OutputRegister(), i.MemoryOperand());
771 __ Str(i.InputRegister(2), i.MemoryOperand());
774 __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
777 __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
780 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
783 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
785 case kArm64StoreWriteBarrier: {
786 Register object = i.InputRegister(0);
787 Register index = i.InputRegister(1);
788 Register value = i.InputRegister(2);
789 __ Add(index, object, index);
790 __ Str(value, MemOperand(index));
791 SaveFPRegsMode mode =
792 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
793 // TODO(dcarney): we shouldn't test write barriers from c calls.
794 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
795 UseScratchRegisterScope scope(masm());
796 Register temp = no_reg;
797 if (csp.is(masm()->StackPointer())) {
798 temp = scope.AcquireX();
799 lr_status = kLRHasBeenSaved;
800 __ Push(lr, temp); // Need to push a pair
802 __ RecordWrite(object, index, value, lr_status, mode);
803 if (csp.is(masm()->StackPointer())) {
808 case kCheckedLoadInt8:
809 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
811 case kCheckedLoadUint8:
812 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
814 case kCheckedLoadInt16:
815 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
817 case kCheckedLoadUint16:
818 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
820 case kCheckedLoadWord32:
821 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
823 case kCheckedLoadFloat32:
824 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
826 case kCheckedLoadFloat64:
827 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
829 case kCheckedStoreWord8:
830 ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
832 case kCheckedStoreWord16:
833 ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
835 case kCheckedStoreWord32:
836 ASSEMBLE_CHECKED_STORE_INTEGER(Str);
838 case kCheckedStoreFloat32:
839 ASSEMBLE_CHECKED_STORE_FLOAT(32);
841 case kCheckedStoreFloat64:
842 ASSEMBLE_CHECKED_STORE_FLOAT(64);
845 } // NOLINT(readability/fn_size)
848 // Assemble branches after this instruction.
849 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
850 Arm64OperandConverter i(this, instr);
851 Label* tlabel = branch->true_label;
852 Label* flabel = branch->false_label;
853 FlagsCondition condition = branch->condition;
854 ArchOpcode opcode = instr->arch_opcode();
856 if (opcode == kArm64CompareAndBranch32) {
859 __ Cbz(i.InputRegister32(0), tlabel);
862 __ Cbnz(i.InputRegister32(0), tlabel);
867 } else if (opcode == kArm64TestAndBranch32) {
870 __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
873 __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
878 } else if (opcode == kArm64TestAndBranch) {
881 __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
884 __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
890 Condition cc = FlagsConditionToCondition(condition);
893 if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
897 void CodeGenerator::AssembleArchJump(RpoNumber target) {
898 if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
902 // Assemble boolean materializations after this instruction.
903 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
904 FlagsCondition condition) {
905 Arm64OperandConverter i(this, instr);
907 // Materialize a full 64-bit 1 or 0 value. The result register is always the
908 // last output of the instruction.
909 DCHECK_NE(0u, instr->OutputCount());
910 Register reg = i.OutputRegister(instr->OutputCount() - 1);
911 Condition cc = FlagsConditionToCondition(condition);
916 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
917 Arm64OperandConverter i(this, instr);
918 Register input = i.InputRegister32(0);
919 for (size_t index = 2; index < instr->InputCount(); index += 2) {
920 __ Cmp(input, i.InputInt32(index + 0));
921 __ B(eq, GetLabel(i.InputRpo(index + 1)));
923 AssembleArchJump(i.InputRpo(1));
927 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
928 Arm64OperandConverter i(this, instr);
929 UseScratchRegisterScope scope(masm());
930 Register input = i.InputRegister32(0);
931 Register temp = scope.AcquireX();
932 size_t const case_count = instr->InputCount() - 2;
934 __ Cmp(input, case_count);
935 __ B(hs, GetLabel(i.InputRpo(1)));
936 __ Adr(temp, &table);
937 __ Add(temp, temp, Operand(input, UXTW, 2));
939 __ StartBlockPools();
941 for (size_t index = 0; index < case_count; ++index) {
942 __ B(GetLabel(i.InputRpo(index + 2)));
948 void CodeGenerator::AssembleDeoptimizerCall(
949 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
950 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
951 isolate(), deoptimization_id, bailout_type);
952 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
956 // TODO(dcarney): increase stack slots in frame once before first use.
957 static int AlignedStackSlots(int stack_slots) {
958 if (stack_slots & 1) stack_slots++;
963 void CodeGenerator::AssemblePrologue() {
964 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
965 int stack_slots = frame()->GetSpillSlotCount();
966 if (descriptor->kind() == CallDescriptor::kCallAddress) {
967 __ SetStackPointer(csp);
970 // TODO(dcarney): correct callee saved registers.
971 __ PushCalleeSavedRegisters();
972 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
973 } else if (descriptor->IsJSFunctionCall()) {
974 CompilationInfo* info = this->info();
975 __ SetStackPointer(jssp);
976 __ Prologue(info->IsCodePreAgingActive());
977 frame()->SetRegisterSaveAreaSize(
978 StandardFrameConstants::kFixedFrameSizeFromFp);
979 } else if (stack_slots > 0) {
980 __ SetStackPointer(jssp);
982 frame()->SetRegisterSaveAreaSize(
983 StandardFrameConstants::kFixedFrameSizeFromFp);
986 if (info()->is_osr()) {
987 // TurboFan OSR-compiled functions cannot be entered directly.
988 __ Abort(kShouldNotDirectlyEnterOsrFunction);
990 // Unoptimized code jumps directly to this entrypoint while the unoptimized
991 // frame is still on the stack. Optimized code uses OSR values directly from
992 // the unoptimized frame. Thus, all that needs to be done is to allocate the
993 // remaining stack slots.
994 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
995 osr_pc_offset_ = __ pc_offset();
996 // TODO(titzer): cannot address target function == local #-1
997 __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
998 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
999 stack_slots -= frame()->GetOsrStackSlotCount();
1002 if (stack_slots > 0) {
1003 Register sp = __ StackPointer();
1005 __ Sub(sp, sp, stack_slots * kPointerSize);
1007 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
1012 void CodeGenerator::AssembleReturn() {
1013 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1014 int stack_slots = frame()->GetSpillSlotCount();
1015 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1016 if (frame()->GetRegisterSaveAreaSize() > 0) {
1017 // Remove this frame's spill slots first.
1018 if (stack_slots > 0) {
1019 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
1021 // Restore registers.
1022 // TODO(dcarney): correct callee saved registers.
1023 __ PopCalleeSavedRegisters();
1028 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1031 int pop_count = descriptor->IsJSFunctionCall()
1032 ? static_cast<int>(descriptor->JSParameterCount())
1042 void CodeGenerator::AssembleMove(InstructionOperand* source,
1043 InstructionOperand* destination) {
1044 Arm64OperandConverter g(this, NULL);
1045 // Dispatch on the source and destination operand kinds. Not all
1046 // combinations are possible.
1047 if (source->IsRegister()) {
1048 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1049 Register src = g.ToRegister(source);
1050 if (destination->IsRegister()) {
1051 __ Mov(g.ToRegister(destination), src);
1053 __ Str(src, g.ToMemOperand(destination, masm()));
1055 } else if (source->IsStackSlot()) {
1056 MemOperand src = g.ToMemOperand(source, masm());
1057 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1058 if (destination->IsRegister()) {
1059 __ Ldr(g.ToRegister(destination), src);
1061 UseScratchRegisterScope scope(masm());
1062 Register temp = scope.AcquireX();
1064 __ Str(temp, g.ToMemOperand(destination, masm()));
1066 } else if (source->IsConstant()) {
1067 Constant src = g.ToConstant(ConstantOperand::cast(source));
1068 if (destination->IsRegister() || destination->IsStackSlot()) {
1069 UseScratchRegisterScope scope(masm());
1070 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1072 if (src.type() == Constant::kHeapObject) {
1073 __ LoadObject(dst, src.ToHeapObject());
1075 __ Mov(dst, g.ToImmediate(source));
1077 if (destination->IsStackSlot()) {
1078 __ Str(dst, g.ToMemOperand(destination, masm()));
1080 } else if (src.type() == Constant::kFloat32) {
1081 if (destination->IsDoubleRegister()) {
1082 FPRegister dst = g.ToDoubleRegister(destination).S();
1083 __ Fmov(dst, src.ToFloat32());
1085 DCHECK(destination->IsDoubleStackSlot());
1086 UseScratchRegisterScope scope(masm());
1087 FPRegister temp = scope.AcquireS();
1088 __ Fmov(temp, src.ToFloat32());
1089 __ Str(temp, g.ToMemOperand(destination, masm()));
1092 DCHECK_EQ(Constant::kFloat64, src.type());
1093 if (destination->IsDoubleRegister()) {
1094 FPRegister dst = g.ToDoubleRegister(destination);
1095 __ Fmov(dst, src.ToFloat64());
1097 DCHECK(destination->IsDoubleStackSlot());
1098 UseScratchRegisterScope scope(masm());
1099 FPRegister temp = scope.AcquireD();
1100 __ Fmov(temp, src.ToFloat64());
1101 __ Str(temp, g.ToMemOperand(destination, masm()));
1104 } else if (source->IsDoubleRegister()) {
1105 FPRegister src = g.ToDoubleRegister(source);
1106 if (destination->IsDoubleRegister()) {
1107 FPRegister dst = g.ToDoubleRegister(destination);
1110 DCHECK(destination->IsDoubleStackSlot());
1111 __ Str(src, g.ToMemOperand(destination, masm()));
1113 } else if (source->IsDoubleStackSlot()) {
1114 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1115 MemOperand src = g.ToMemOperand(source, masm());
1116 if (destination->IsDoubleRegister()) {
1117 __ Ldr(g.ToDoubleRegister(destination), src);
1119 UseScratchRegisterScope scope(masm());
1120 FPRegister temp = scope.AcquireD();
1122 __ Str(temp, g.ToMemOperand(destination, masm()));
1130 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1131 InstructionOperand* destination) {
1132 Arm64OperandConverter g(this, NULL);
1133 // Dispatch on the source and destination operand kinds. Not all
1134 // combinations are possible.
1135 if (source->IsRegister()) {
1136 // Register-register.
1137 UseScratchRegisterScope scope(masm());
1138 Register temp = scope.AcquireX();
1139 Register src = g.ToRegister(source);
1140 if (destination->IsRegister()) {
1141 Register dst = g.ToRegister(destination);
1146 DCHECK(destination->IsStackSlot());
1147 MemOperand dst = g.ToMemOperand(destination, masm());
1152 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1153 UseScratchRegisterScope scope(masm());
1154 DoubleRegister temp_0 = scope.AcquireD();
1155 DoubleRegister temp_1 = scope.AcquireD();
1156 MemOperand src = g.ToMemOperand(source, masm());
1157 MemOperand dst = g.ToMemOperand(destination, masm());
1158 __ Ldr(temp_0, src);
1159 __ Ldr(temp_1, dst);
1160 __ Str(temp_0, dst);
1161 __ Str(temp_1, src);
1162 } else if (source->IsDoubleRegister()) {
1163 UseScratchRegisterScope scope(masm());
1164 FPRegister temp = scope.AcquireD();
1165 FPRegister src = g.ToDoubleRegister(source);
1166 if (destination->IsDoubleRegister()) {
1167 FPRegister dst = g.ToDoubleRegister(destination);
1172 DCHECK(destination->IsDoubleStackSlot());
1173 MemOperand dst = g.ToMemOperand(destination, masm());
1179 // No other combinations are possible.
1185 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1186 // On 64-bit ARM we emit the jump tables inline.
1191 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
1194 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1195 int space_needed = Deoptimizer::patch_size();
1196 if (!info()->IsStub()) {
1197 // Ensure that we have enough space after the previous lazy-bailout
1198 // instruction for patching the code here.
1199 intptr_t current_pc = masm()->pc_offset();
1201 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1202 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1203 DCHECK((padding_size % kInstructionSize) == 0);
1204 InstructionAccurateScope instruction_accurate(
1205 masm(), padding_size / kInstructionSize);
1207 while (padding_size > 0) {
1209 padding_size -= kInstructionSize;
1213 MarkLazyDeoptSite();
1218 } // namespace compiler
1219 } // namespace internal