1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm64/macro-assembler-arm64.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/scopes.h"
20 // Adds Arm64-specific methods to convert InstructionOperands.
21 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
23 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
24 : InstructionOperandConverter(gen, instr) {}
26 DoubleRegister InputFloat32Register(int index) {
27 return InputDoubleRegister(index).S();
30 DoubleRegister InputFloat64Register(int index) {
31 return InputDoubleRegister(index);
34 DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
36 DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
38 Register InputRegister32(int index) {
39 return ToRegister(instr_->InputAt(index)).W();
42 Register InputRegister64(int index) { return InputRegister(index); }
44 Operand InputImmediate(int index) {
45 return ToImmediate(instr_->InputAt(index));
48 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
50 Operand InputOperand64(int index) { return InputOperand(index); }
52 Operand InputOperand32(int index) {
53 return ToOperand32(instr_->InputAt(index));
56 Register OutputRegister64() { return OutputRegister(); }
58 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
60 Operand InputOperand2_32(int index) {
61 switch (AddressingModeField::decode(instr_->opcode())) {
63 return InputOperand32(index);
64 case kMode_Operand2_R_LSL_I:
65 return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
66 case kMode_Operand2_R_LSR_I:
67 return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
68 case kMode_Operand2_R_ASR_I:
69 return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
70 case kMode_Operand2_R_ROR_I:
71 return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
80 Operand InputOperand2_64(int index) {
81 switch (AddressingModeField::decode(instr_->opcode())) {
83 return InputOperand64(index);
84 case kMode_Operand2_R_LSL_I:
85 return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
86 case kMode_Operand2_R_LSR_I:
87 return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
88 case kMode_Operand2_R_ASR_I:
89 return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
90 case kMode_Operand2_R_ROR_I:
91 return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
100 MemOperand MemoryOperand(int* first_index) {
101 const int index = *first_index;
102 switch (AddressingModeField::decode(instr_->opcode())) {
104 case kMode_Operand2_R_LSL_I:
105 case kMode_Operand2_R_LSR_I:
106 case kMode_Operand2_R_ASR_I:
107 case kMode_Operand2_R_ROR_I:
111 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
114 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
117 return MemOperand(no_reg);
120 MemOperand MemoryOperand(int first_index = 0) {
121 return MemoryOperand(&first_index);
124 Operand ToOperand(InstructionOperand* op) {
125 if (op->IsRegister()) {
126 return Operand(ToRegister(op));
128 return ToImmediate(op);
131 Operand ToOperand32(InstructionOperand* op) {
132 if (op->IsRegister()) {
133 return Operand(ToRegister(op).W());
135 return ToImmediate(op);
138 Operand ToImmediate(InstructionOperand* operand) {
139 Constant constant = ToConstant(operand);
140 switch (constant.type()) {
141 case Constant::kInt32:
142 return Operand(constant.ToInt32());
143 case Constant::kInt64:
144 return Operand(constant.ToInt64());
145 case Constant::kFloat32:
147 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
148 case Constant::kFloat64:
150 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
151 case Constant::kExternalReference:
152 return Operand(constant.ToExternalReference());
153 case Constant::kHeapObject:
154 return Operand(constant.ToHeapObject());
155 case Constant::kRpoNumber:
156 UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
163 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
165 DCHECK(!op->IsRegister());
166 DCHECK(!op->IsDoubleRegister());
167 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
168 // The linkage computes where all spill slots are located.
169 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
170 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
178 class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
180 OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
181 : OutOfLineCode(gen), result_(result) {}
183 void Generate() FINAL {
184 __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
188 DoubleRegister const result_;
192 class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
194 OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
195 : OutOfLineCode(gen), result_(result) {}
197 void Generate() FINAL {
198 __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
202 DoubleRegister const result_;
206 class OutOfLineLoadZero FINAL : public OutOfLineCode {
208 OutOfLineLoadZero(CodeGenerator* gen, Register result)
209 : OutOfLineCode(gen), result_(result) {}
211 void Generate() FINAL { __ Mov(result_, 0); }
214 Register const result_;
218 Condition FlagsConditionToCondition(FlagsCondition condition) {
224 case kSignedLessThan:
226 case kSignedGreaterThanOrEqual:
228 case kSignedLessThanOrEqual:
230 case kSignedGreaterThan:
232 case kUnsignedLessThan:
234 case kUnsignedGreaterThanOrEqual:
236 case kUnsignedLessThanOrEqual:
238 case kUnsignedGreaterThan:
244 case kUnorderedEqual:
245 case kUnorderedNotEqual:
255 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
257 auto result = i.OutputFloat##width##Register(); \
258 auto buffer = i.InputRegister(0); \
259 auto offset = i.InputRegister32(1); \
260 auto length = i.InputOperand32(2); \
261 __ Cmp(offset, length); \
262 auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
263 __ B(hs, ool->entry()); \
264 __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
265 __ Bind(ool->exit()); \
269 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
271 auto result = i.OutputRegister32(); \
272 auto buffer = i.InputRegister(0); \
273 auto offset = i.InputRegister32(1); \
274 auto length = i.InputOperand32(2); \
275 __ Cmp(offset, length); \
276 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
277 __ B(hs, ool->entry()); \
278 __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
279 __ Bind(ool->exit()); \
283 #define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
285 auto buffer = i.InputRegister(0); \
286 auto offset = i.InputRegister32(1); \
287 auto length = i.InputOperand32(2); \
288 auto value = i.InputFloat##width##Register(3); \
289 __ Cmp(offset, length); \
292 __ Str(value, MemOperand(buffer, offset, UXTW)); \
297 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
299 auto buffer = i.InputRegister(0); \
300 auto offset = i.InputRegister32(1); \
301 auto length = i.InputOperand32(2); \
302 auto value = i.InputRegister32(3); \
303 __ Cmp(offset, length); \
306 __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
311 #define ASSEMBLE_SHIFT(asm_instr, width) \
313 if (instr->InputAt(1)->IsRegister()) { \
314 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
315 i.InputRegister##width(1)); \
317 int64_t imm = i.InputOperand##width(1).immediate().value(); \
318 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
323 // Assembles an instruction after register allocation, producing machine code.
324 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
325 Arm64OperandConverter i(this, instr);
326 InstructionCode opcode = instr->opcode();
327 switch (ArchOpcodeField::decode(opcode)) {
328 case kArchCallCodeObject: {
329 EnsureSpaceForLazyDeopt();
330 if (instr->InputAt(0)->IsImmediate()) {
331 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
332 RelocInfo::CODE_TARGET);
334 Register target = i.InputRegister(0);
335 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
338 AddSafepointAndDeopt(instr);
341 case kArchCallJSFunction: {
342 EnsureSpaceForLazyDeopt();
343 Register func = i.InputRegister(0);
344 if (FLAG_debug_code) {
345 // Check the function's context matches the context argument.
346 UseScratchRegisterScope scope(masm());
347 Register temp = scope.AcquireX();
348 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
350 __ Assert(eq, kWrongFunctionContext);
352 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
354 AddSafepointAndDeopt(instr);
358 AssembleArchJump(i.InputRpo(0));
360 case kArchTableSwitch:
361 AssembleArchTableSwitch(instr);
363 case kArchLookupSwitch:
364 AssembleArchLookupSwitch(instr);
367 // don't emit code for nops.
372 case kArchStackPointer:
373 __ mov(i.OutputRegister(), masm()->StackPointer());
375 case kArchTruncateDoubleToI:
376 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
378 case kArm64Float64Ceil:
379 __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
381 case kArm64Float64Floor:
382 __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
384 case kArm64Float64RoundTruncate:
385 __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
387 case kArm64Float64RoundTiesAway:
388 __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
391 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
394 if (FlagsModeField::decode(opcode) != kFlags_none) {
395 __ Adds(i.OutputRegister32(), i.InputRegister32(0),
396 i.InputOperand2_32(1));
398 __ Add(i.OutputRegister32(), i.InputRegister32(0),
399 i.InputOperand2_32(1));
403 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
406 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
409 __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
412 __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
415 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
418 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
421 __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
424 __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
427 __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
431 __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
432 i.InputRegister32(2));
435 __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
439 __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
440 i.InputRegister32(2));
443 __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
446 __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
449 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
452 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
455 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
458 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
461 UseScratchRegisterScope scope(masm());
462 Register temp = scope.AcquireX();
463 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
464 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
468 UseScratchRegisterScope scope(masm());
469 Register temp = scope.AcquireW();
470 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
471 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
472 i.InputRegister32(0));
476 UseScratchRegisterScope scope(masm());
477 Register temp = scope.AcquireX();
478 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
479 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
483 UseScratchRegisterScope scope(masm());
484 Register temp = scope.AcquireW();
485 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
486 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
487 i.InputRegister32(0));
490 // TODO(dcarney): use mvn instr??
492 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
495 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
498 __ Neg(i.OutputRegister(), i.InputOperand(0));
501 __ Neg(i.OutputRegister32(), i.InputOperand32(0));
504 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
507 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
510 __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
513 __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
516 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
519 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
522 __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
525 __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
528 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
531 if (FlagsModeField::decode(opcode) != kFlags_none) {
532 __ Subs(i.OutputRegister32(), i.InputRegister32(0),
533 i.InputOperand2_32(1));
535 __ Sub(i.OutputRegister32(), i.InputRegister32(0),
536 i.InputOperand2_32(1));
540 ASSEMBLE_SHIFT(Lsl, 64);
543 ASSEMBLE_SHIFT(Lsl, 32);
546 ASSEMBLE_SHIFT(Lsr, 64);
549 ASSEMBLE_SHIFT(Lsr, 32);
552 ASSEMBLE_SHIFT(Asr, 64);
555 ASSEMBLE_SHIFT(Asr, 32);
558 ASSEMBLE_SHIFT(Ror, 64);
561 ASSEMBLE_SHIFT(Ror, 32);
564 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
567 __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
570 __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
573 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
576 __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
580 __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
583 case kArm64TestAndBranch32:
584 case kArm64TestAndBranch:
585 // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
587 case kArm64CompareAndBranch32:
588 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
591 int words = MiscField::decode(instr->opcode());
596 int slot = MiscField::decode(instr->opcode());
597 Operand operand(slot * kPointerSize);
598 __ Poke(i.InputRegister(0), operand);
601 case kArm64PokePairZero: {
602 // TODO(dcarney): test slot offset and register order.
603 int slot = MiscField::decode(instr->opcode()) - 1;
604 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
607 case kArm64PokePair: {
608 int slot = MiscField::decode(instr->opcode()) - 1;
609 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
613 __ Cmp(i.InputRegister(0), i.InputOperand(1));
616 __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
619 __ Cmn(i.InputRegister(0), i.InputOperand(1));
622 __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
625 __ Tst(i.InputRegister(0), i.InputOperand(1));
628 __ Tst(i.InputRegister32(0), i.InputOperand32(1));
630 case kArm64Float64Cmp:
631 if (instr->InputAt(1)->IsDoubleRegister()) {
632 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
634 DCHECK(instr->InputAt(1)->IsImmediate());
635 // 0.0 is the only immediate supported by fcmp instructions.
636 DCHECK(i.InputDouble(1) == 0.0);
637 __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
640 case kArm64Float64Add:
641 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
642 i.InputDoubleRegister(1));
644 case kArm64Float64Sub:
645 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
646 i.InputDoubleRegister(1));
648 case kArm64Float64Mul:
649 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
650 i.InputDoubleRegister(1));
652 case kArm64Float64Div:
653 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
654 i.InputDoubleRegister(1));
656 case kArm64Float64Mod: {
657 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
658 FrameScope scope(masm(), StackFrame::MANUAL);
659 DCHECK(d0.is(i.InputDoubleRegister(0)));
660 DCHECK(d1.is(i.InputDoubleRegister(1)));
661 DCHECK(d0.is(i.OutputDoubleRegister()));
662 // TODO(dcarney): make sure this saves all relevant registers.
663 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
667 case kArm64Float64Sqrt:
668 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
670 case kArm64Float32ToFloat64:
671 __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
673 case kArm64Float64ToFloat32:
674 __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
676 case kArm64Float64ToInt32:
677 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
679 case kArm64Float64ToUint32:
680 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
682 case kArm64Int32ToFloat64:
683 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
685 case kArm64Uint32ToFloat64:
686 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
689 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
692 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
695 __ Strb(i.InputRegister(2), i.MemoryOperand());
698 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
701 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
704 __ Strh(i.InputRegister(2), i.MemoryOperand());
707 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
710 __ Str(i.InputRegister32(2), i.MemoryOperand());
713 __ Ldr(i.OutputRegister(), i.MemoryOperand());
716 __ Str(i.InputRegister(2), i.MemoryOperand());
719 __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
722 __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
725 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
728 __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
730 case kArm64StoreWriteBarrier: {
731 Register object = i.InputRegister(0);
732 Register index = i.InputRegister(1);
733 Register value = i.InputRegister(2);
734 __ Add(index, object, index);
735 __ Str(value, MemOperand(index));
736 SaveFPRegsMode mode =
737 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
738 // TODO(dcarney): we shouldn't test write barriers from c calls.
739 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
740 UseScratchRegisterScope scope(masm());
741 Register temp = no_reg;
742 if (csp.is(masm()->StackPointer())) {
743 temp = scope.AcquireX();
744 lr_status = kLRHasBeenSaved;
745 __ Push(lr, temp); // Need to push a pair
747 __ RecordWrite(object, index, value, lr_status, mode);
748 if (csp.is(masm()->StackPointer())) {
753 case kCheckedLoadInt8:
754 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
756 case kCheckedLoadUint8:
757 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
759 case kCheckedLoadInt16:
760 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
762 case kCheckedLoadUint16:
763 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
765 case kCheckedLoadWord32:
766 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
768 case kCheckedLoadFloat32:
769 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
771 case kCheckedLoadFloat64:
772 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
774 case kCheckedStoreWord8:
775 ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
777 case kCheckedStoreWord16:
778 ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
780 case kCheckedStoreWord32:
781 ASSEMBLE_CHECKED_STORE_INTEGER(Str);
783 case kCheckedStoreFloat32:
784 ASSEMBLE_CHECKED_STORE_FLOAT(32);
786 case kCheckedStoreFloat64:
787 ASSEMBLE_CHECKED_STORE_FLOAT(64);
793 // Assemble branches after this instruction.
794 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
795 Arm64OperandConverter i(this, instr);
796 Label* tlabel = branch->true_label;
797 Label* flabel = branch->false_label;
798 FlagsCondition condition = branch->condition;
799 ArchOpcode opcode = instr->arch_opcode();
801 if (opcode == kArm64CompareAndBranch32) {
804 __ Cbz(i.InputRegister32(0), tlabel);
807 __ Cbnz(i.InputRegister32(0), tlabel);
812 } else if (opcode == kArm64TestAndBranch32) {
815 __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
818 __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
823 } else if (opcode == kArm64TestAndBranch) {
826 __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
829 __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
835 Condition cc = FlagsConditionToCondition(condition);
838 if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
842 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
843 if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
847 // Assemble boolean materializations after this instruction.
848 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
849 FlagsCondition condition) {
850 Arm64OperandConverter i(this, instr);
852 // Materialize a full 64-bit 1 or 0 value. The result register is always the
853 // last output of the instruction.
854 DCHECK_NE(0u, instr->OutputCount());
855 Register reg = i.OutputRegister(instr->OutputCount() - 1);
856 Condition cc = FlagsConditionToCondition(condition);
861 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
862 Arm64OperandConverter i(this, instr);
863 Register input = i.InputRegister32(0);
864 for (size_t index = 2; index < instr->InputCount(); index += 2) {
865 __ Cmp(input, i.InputInt32(index + 0));
866 __ B(eq, GetLabel(i.InputRpo(index + 1)));
868 AssembleArchJump(i.InputRpo(1));
872 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
873 Arm64OperandConverter i(this, instr);
874 UseScratchRegisterScope scope(masm());
875 Register input = i.InputRegister32(0);
876 Register temp = scope.AcquireX();
877 size_t const case_count = instr->InputCount() - 2;
879 __ Cmp(input, case_count);
880 __ B(hs, GetLabel(i.InputRpo(1)));
881 __ Adr(temp, &table);
882 __ Add(temp, temp, Operand(input, UXTW, 2));
884 __ StartBlockPools();
886 for (size_t index = 0; index < case_count; ++index) {
887 __ B(GetLabel(i.InputRpo(index + 2)));
893 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
894 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
895 isolate(), deoptimization_id, Deoptimizer::LAZY);
896 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
900 // TODO(dcarney): increase stack slots in frame once before first use.
901 static int AlignedStackSlots(int stack_slots) {
902 if (stack_slots & 1) stack_slots++;
907 void CodeGenerator::AssemblePrologue() {
908 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
909 int stack_slots = frame()->GetSpillSlotCount();
910 if (descriptor->kind() == CallDescriptor::kCallAddress) {
911 __ SetStackPointer(csp);
914 // TODO(dcarney): correct callee saved registers.
915 __ PushCalleeSavedRegisters();
916 frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
917 } else if (descriptor->IsJSFunctionCall()) {
918 CompilationInfo* info = this->info();
919 __ SetStackPointer(jssp);
920 __ Prologue(info->IsCodePreAgingActive());
921 frame()->SetRegisterSaveAreaSize(
922 StandardFrameConstants::kFixedFrameSizeFromFp);
923 } else if (stack_slots > 0) {
924 __ SetStackPointer(jssp);
926 frame()->SetRegisterSaveAreaSize(
927 StandardFrameConstants::kFixedFrameSizeFromFp);
930 if (info()->is_osr()) {
931 // TurboFan OSR-compiled functions cannot be entered directly.
932 __ Abort(kShouldNotDirectlyEnterOsrFunction);
934 // Unoptimized code jumps directly to this entrypoint while the unoptimized
935 // frame is still on the stack. Optimized code uses OSR values directly from
936 // the unoptimized frame. Thus, all that needs to be done is to allocate the
937 // remaining stack slots.
938 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
939 osr_pc_offset_ = __ pc_offset();
940 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
941 stack_slots -= frame()->GetOsrStackSlotCount();
944 if (stack_slots > 0) {
945 Register sp = __ StackPointer();
947 __ Sub(sp, sp, stack_slots * kPointerSize);
949 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
954 void CodeGenerator::AssembleReturn() {
955 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
956 int stack_slots = frame()->GetSpillSlotCount();
957 if (descriptor->kind() == CallDescriptor::kCallAddress) {
958 if (frame()->GetRegisterSaveAreaSize() > 0) {
959 // Remove this frame's spill slots first.
960 if (stack_slots > 0) {
961 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
963 // Restore registers.
964 // TODO(dcarney): correct callee saved registers.
965 __ PopCalleeSavedRegisters();
970 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
973 int pop_count = descriptor->IsJSFunctionCall()
974 ? static_cast<int>(descriptor->JSParameterCount())
984 void CodeGenerator::AssembleMove(InstructionOperand* source,
985 InstructionOperand* destination) {
986 Arm64OperandConverter g(this, NULL);
987 // Dispatch on the source and destination operand kinds. Not all
988 // combinations are possible.
989 if (source->IsRegister()) {
990 DCHECK(destination->IsRegister() || destination->IsStackSlot());
991 Register src = g.ToRegister(source);
992 if (destination->IsRegister()) {
993 __ Mov(g.ToRegister(destination), src);
995 __ Str(src, g.ToMemOperand(destination, masm()));
997 } else if (source->IsStackSlot()) {
998 MemOperand src = g.ToMemOperand(source, masm());
999 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1000 if (destination->IsRegister()) {
1001 __ Ldr(g.ToRegister(destination), src);
1003 UseScratchRegisterScope scope(masm());
1004 Register temp = scope.AcquireX();
1006 __ Str(temp, g.ToMemOperand(destination, masm()));
1008 } else if (source->IsConstant()) {
1009 Constant src = g.ToConstant(ConstantOperand::cast(source));
1010 if (destination->IsRegister() || destination->IsStackSlot()) {
1011 UseScratchRegisterScope scope(masm());
1012 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1014 if (src.type() == Constant::kHeapObject) {
1015 __ LoadObject(dst, src.ToHeapObject());
1017 __ Mov(dst, g.ToImmediate(source));
1019 if (destination->IsStackSlot()) {
1020 __ Str(dst, g.ToMemOperand(destination, masm()));
1022 } else if (src.type() == Constant::kFloat32) {
1023 if (destination->IsDoubleRegister()) {
1024 FPRegister dst = g.ToDoubleRegister(destination).S();
1025 __ Fmov(dst, src.ToFloat32());
1027 DCHECK(destination->IsDoubleStackSlot());
1028 UseScratchRegisterScope scope(masm());
1029 FPRegister temp = scope.AcquireS();
1030 __ Fmov(temp, src.ToFloat32());
1031 __ Str(temp, g.ToMemOperand(destination, masm()));
1034 DCHECK_EQ(Constant::kFloat64, src.type());
1035 if (destination->IsDoubleRegister()) {
1036 FPRegister dst = g.ToDoubleRegister(destination);
1037 __ Fmov(dst, src.ToFloat64());
1039 DCHECK(destination->IsDoubleStackSlot());
1040 UseScratchRegisterScope scope(masm());
1041 FPRegister temp = scope.AcquireD();
1042 __ Fmov(temp, src.ToFloat64());
1043 __ Str(temp, g.ToMemOperand(destination, masm()));
1046 } else if (source->IsDoubleRegister()) {
1047 FPRegister src = g.ToDoubleRegister(source);
1048 if (destination->IsDoubleRegister()) {
1049 FPRegister dst = g.ToDoubleRegister(destination);
1052 DCHECK(destination->IsDoubleStackSlot());
1053 __ Str(src, g.ToMemOperand(destination, masm()));
1055 } else if (source->IsDoubleStackSlot()) {
1056 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1057 MemOperand src = g.ToMemOperand(source, masm());
1058 if (destination->IsDoubleRegister()) {
1059 __ Ldr(g.ToDoubleRegister(destination), src);
1061 UseScratchRegisterScope scope(masm());
1062 FPRegister temp = scope.AcquireD();
1064 __ Str(temp, g.ToMemOperand(destination, masm()));
1072 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1073 InstructionOperand* destination) {
1074 Arm64OperandConverter g(this, NULL);
1075 // Dispatch on the source and destination operand kinds. Not all
1076 // combinations are possible.
1077 if (source->IsRegister()) {
1078 // Register-register.
1079 UseScratchRegisterScope scope(masm());
1080 Register temp = scope.AcquireX();
1081 Register src = g.ToRegister(source);
1082 if (destination->IsRegister()) {
1083 Register dst = g.ToRegister(destination);
1088 DCHECK(destination->IsStackSlot());
1089 MemOperand dst = g.ToMemOperand(destination, masm());
1094 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1095 UseScratchRegisterScope scope(masm());
1096 DoubleRegister temp_0 = scope.AcquireD();
1097 DoubleRegister temp_1 = scope.AcquireD();
1098 MemOperand src = g.ToMemOperand(source, masm());
1099 MemOperand dst = g.ToMemOperand(destination, masm());
1100 __ Ldr(temp_0, src);
1101 __ Ldr(temp_1, dst);
1102 __ Str(temp_0, dst);
1103 __ Str(temp_1, src);
1104 } else if (source->IsDoubleRegister()) {
1105 UseScratchRegisterScope scope(masm());
1106 FPRegister temp = scope.AcquireD();
1107 FPRegister src = g.ToDoubleRegister(source);
1108 if (destination->IsDoubleRegister()) {
1109 FPRegister dst = g.ToDoubleRegister(destination);
1114 DCHECK(destination->IsDoubleStackSlot());
1115 MemOperand dst = g.ToMemOperand(destination, masm());
1121 // No other combinations are possible.
1127 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1128 // On 64-bit ARM we emit the jump tables inline.
1133 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
1136 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1137 int space_needed = Deoptimizer::patch_size();
1138 if (!info()->IsStub()) {
1139 // Ensure that we have enough space after the previous lazy-bailout
1140 // instruction for patching the code here.
1141 intptr_t current_pc = masm()->pc_offset();
1143 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1144 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1145 DCHECK((padding_size % kInstructionSize) == 0);
1146 InstructionAccurateScope instruction_accurate(
1147 masm(), padding_size / kInstructionSize);
1149 while (padding_size > 0) {
1151 padding_size -= kInstructionSize;
1155 MarkLazyDeoptSite();
1160 } // namespace compiler
1161 } // namespace internal