1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/scopes.h"
20 #define kScratchReg r9
23 // Adds Arm-specific methods to convert InstructionOperands.
24 class ArmOperandConverter FINAL : public InstructionOperandConverter {
26 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
27 : InstructionOperandConverter(gen, instr) {}
29 SwVfpRegister OutputFloat32Register(size_t index = 0) {
30 return ToFloat32Register(instr_->OutputAt(index));
33 SwVfpRegister InputFloat32Register(size_t index) {
34 return ToFloat32Register(instr_->InputAt(index));
37 SwVfpRegister ToFloat32Register(InstructionOperand* op) {
38 return ToFloat64Register(op).low();
41 LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
42 return ToFloat64Register(instr_->OutputAt(index));
45 LowDwVfpRegister InputFloat64Register(size_t index) {
46 return ToFloat64Register(instr_->InputAt(index));
49 LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
50 return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
53 SBit OutputSBit() const {
54 switch (instr_->flags_mode()) {
65 Operand InputImmediate(size_t index) {
66 Constant constant = ToConstant(instr_->InputAt(index));
67 switch (constant.type()) {
68 case Constant::kInt32:
69 return Operand(constant.ToInt32());
70 case Constant::kFloat32:
72 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
73 case Constant::kFloat64:
75 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
76 case Constant::kInt64:
77 case Constant::kExternalReference:
78 case Constant::kHeapObject:
79 case Constant::kRpoNumber:
83 return Operand::Zero();
86 Operand InputOperand2(size_t first_index) {
87 const size_t index = first_index;
88 switch (AddressingModeField::decode(instr_->opcode())) {
93 case kMode_Operand2_I:
94 return InputImmediate(index + 0);
95 case kMode_Operand2_R:
96 return Operand(InputRegister(index + 0));
97 case kMode_Operand2_R_ASR_I:
98 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
99 case kMode_Operand2_R_ASR_R:
100 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
101 case kMode_Operand2_R_LSL_I:
102 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
103 case kMode_Operand2_R_LSL_R:
104 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
105 case kMode_Operand2_R_LSR_I:
106 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
107 case kMode_Operand2_R_LSR_R:
108 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
109 case kMode_Operand2_R_ROR_I:
110 return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
111 case kMode_Operand2_R_ROR_R:
112 return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
115 return Operand::Zero();
118 MemOperand InputOffset(size_t* first_index) {
119 const size_t index = *first_index;
120 switch (AddressingModeField::decode(instr_->opcode())) {
122 case kMode_Operand2_I:
123 case kMode_Operand2_R:
124 case kMode_Operand2_R_ASR_I:
125 case kMode_Operand2_R_ASR_R:
126 case kMode_Operand2_R_LSL_I:
127 case kMode_Operand2_R_LSL_R:
128 case kMode_Operand2_R_LSR_I:
129 case kMode_Operand2_R_LSR_R:
130 case kMode_Operand2_R_ROR_I:
131 case kMode_Operand2_R_ROR_R:
133 case kMode_Offset_RI:
135 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
136 case kMode_Offset_RR:
138 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
141 return MemOperand(r0);
144 MemOperand InputOffset(size_t first_index = 0) {
145 return InputOffset(&first_index);
148 MemOperand ToMemOperand(InstructionOperand* op) const {
150 DCHECK(!op->IsRegister());
151 DCHECK(!op->IsDoubleRegister());
152 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
153 // The linkage computes where all spill slots are located.
154 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
155 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
162 class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
164 OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
165 : OutOfLineCode(gen), result_(result) {}
167 void Generate() FINAL {
168 __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
172 SwVfpRegister const result_;
176 class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
178 OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
179 : OutOfLineCode(gen), result_(result) {}
181 void Generate() FINAL {
182 __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
186 DwVfpRegister const result_;
190 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
192 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
193 : OutOfLineCode(gen), result_(result) {}
195 void Generate() FINAL { __ mov(result_, Operand::Zero()); }
198 Register const result_;
202 Condition FlagsConditionToCondition(FlagsCondition condition) {
208 case kSignedLessThan:
210 case kSignedGreaterThanOrEqual:
212 case kSignedLessThanOrEqual:
214 case kSignedGreaterThan:
216 case kUnsignedLessThan:
218 case kUnsignedGreaterThanOrEqual:
220 case kUnsignedLessThanOrEqual:
222 case kUnsignedGreaterThan:
228 case kUnorderedEqual:
229 case kUnorderedNotEqual:
239 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
241 auto result = i.OutputFloat##width##Register(); \
242 auto offset = i.InputRegister(0); \
243 if (instr->InputAt(1)->IsRegister()) { \
244 __ cmp(offset, i.InputRegister(1)); \
246 __ cmp(offset, i.InputImmediate(1)); \
248 auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
249 __ b(hs, ool->entry()); \
250 __ vldr(result, i.InputOffset(2)); \
251 __ bind(ool->exit()); \
252 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
256 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
258 auto result = i.OutputRegister(); \
259 auto offset = i.InputRegister(0); \
260 if (instr->InputAt(1)->IsRegister()) { \
261 __ cmp(offset, i.InputRegister(1)); \
263 __ cmp(offset, i.InputImmediate(1)); \
265 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
266 __ b(hs, ool->entry()); \
267 __ asm_instr(result, i.InputOffset(2)); \
268 __ bind(ool->exit()); \
269 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
273 #define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
275 auto offset = i.InputRegister(0); \
276 if (instr->InputAt(1)->IsRegister()) { \
277 __ cmp(offset, i.InputRegister(1)); \
279 __ cmp(offset, i.InputImmediate(1)); \
281 auto value = i.InputFloat##width##Register(2); \
282 __ vstr(value, i.InputOffset(3), lo); \
283 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
287 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
289 auto offset = i.InputRegister(0); \
290 if (instr->InputAt(1)->IsRegister()) { \
291 __ cmp(offset, i.InputRegister(1)); \
293 __ cmp(offset, i.InputImmediate(1)); \
295 auto value = i.InputRegister(2); \
296 __ asm_instr(value, i.InputOffset(3), lo); \
297 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
301 // Assembles an instruction after register allocation, producing machine code.
302 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
303 ArmOperandConverter i(this, instr);
305 switch (ArchOpcodeField::decode(instr->opcode())) {
306 case kArchCallCodeObject: {
307 EnsureSpaceForLazyDeopt();
308 if (instr->InputAt(0)->IsImmediate()) {
309 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
310 RelocInfo::CODE_TARGET);
312 __ add(ip, i.InputRegister(0),
313 Operand(Code::kHeaderSize - kHeapObjectTag));
316 RecordCallPosition(instr);
317 DCHECK_EQ(LeaveCC, i.OutputSBit());
320 case kArchCallJSFunction: {
321 EnsureSpaceForLazyDeopt();
322 Register func = i.InputRegister(0);
323 if (FLAG_debug_code) {
324 // Check the function's context matches the context argument.
325 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
326 __ cmp(cp, kScratchReg);
327 __ Assert(eq, kWrongFunctionContext);
329 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
331 RecordCallPosition(instr);
332 DCHECK_EQ(LeaveCC, i.OutputSBit());
336 AssembleArchJump(i.InputRpo(0));
337 DCHECK_EQ(LeaveCC, i.OutputSBit());
339 case kArchLookupSwitch:
340 AssembleArchLookupSwitch(instr);
341 DCHECK_EQ(LeaveCC, i.OutputSBit());
343 case kArchTableSwitch:
344 AssembleArchTableSwitch(instr);
345 DCHECK_EQ(LeaveCC, i.OutputSBit());
348 // don't emit code for nops.
349 DCHECK_EQ(LeaveCC, i.OutputSBit());
351 case kArchDeoptimize: {
353 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
354 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
359 DCHECK_EQ(LeaveCC, i.OutputSBit());
361 case kArchStackPointer:
362 __ mov(i.OutputRegister(), sp);
363 DCHECK_EQ(LeaveCC, i.OutputSBit());
365 case kArchTruncateDoubleToI:
366 __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
367 DCHECK_EQ(LeaveCC, i.OutputSBit());
370 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
374 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
378 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
382 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
386 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
387 i.InputRegister(2), i.OutputSBit());
390 CpuFeatureScope scope(masm(), MLS);
391 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
393 DCHECK_EQ(LeaveCC, i.OutputSBit());
397 __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
398 DCHECK_EQ(LeaveCC, i.OutputSBit());
401 __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
403 DCHECK_EQ(LeaveCC, i.OutputSBit());
406 __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
407 i.InputRegister(1), i.OutputSBit());
410 CpuFeatureScope scope(masm(), SUDIV);
411 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
412 DCHECK_EQ(LeaveCC, i.OutputSBit());
416 CpuFeatureScope scope(masm(), SUDIV);
417 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
418 DCHECK_EQ(LeaveCC, i.OutputSBit());
422 __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
425 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
428 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
432 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
436 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
440 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
444 CpuFeatureScope scope(masm(), ARMv7);
445 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
446 DCHECK_EQ(LeaveCC, i.OutputSBit());
450 CpuFeatureScope scope(masm(), ARMv7);
451 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
453 DCHECK_EQ(LeaveCC, i.OutputSBit());
457 __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
458 DCHECK_EQ(LeaveCC, i.OutputSBit());
461 __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
462 DCHECK_EQ(LeaveCC, i.OutputSBit());
465 __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
467 DCHECK_EQ(LeaveCC, i.OutputSBit());
470 __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
472 DCHECK_EQ(LeaveCC, i.OutputSBit());
475 __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
476 DCHECK_EQ(LeaveCC, i.OutputSBit());
479 __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
480 DCHECK_EQ(LeaveCC, i.OutputSBit());
483 __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
485 DCHECK_EQ(LeaveCC, i.OutputSBit());
488 __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
490 DCHECK_EQ(LeaveCC, i.OutputSBit());
493 __ clz(i.OutputRegister(), i.InputRegister(0));
494 DCHECK_EQ(LeaveCC, i.OutputSBit());
497 __ cmp(i.InputRegister(0), i.InputOperand2(1));
498 DCHECK_EQ(SetCC, i.OutputSBit());
501 __ cmn(i.InputRegister(0), i.InputOperand2(1));
502 DCHECK_EQ(SetCC, i.OutputSBit());
505 __ tst(i.InputRegister(0), i.InputOperand2(1));
506 DCHECK_EQ(SetCC, i.OutputSBit());
509 __ teq(i.InputRegister(0), i.InputOperand2(1));
510 DCHECK_EQ(SetCC, i.OutputSBit());
513 if (instr->InputAt(1)->IsDoubleRegister()) {
514 __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
515 i.InputFloat64Register(1));
517 DCHECK(instr->InputAt(1)->IsImmediate());
518 // 0.0 is the only immediate supported by vcmp instructions.
519 DCHECK(i.InputDouble(1) == 0.0);
520 __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
522 DCHECK_EQ(SetCC, i.OutputSBit());
525 __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
526 i.InputFloat64Register(1));
527 DCHECK_EQ(LeaveCC, i.OutputSBit());
530 __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
531 i.InputFloat64Register(1));
532 DCHECK_EQ(LeaveCC, i.OutputSBit());
535 __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
536 i.InputFloat64Register(1));
537 DCHECK_EQ(LeaveCC, i.OutputSBit());
540 __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
541 i.InputFloat64Register(2));
542 DCHECK_EQ(LeaveCC, i.OutputSBit());
545 __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
546 i.InputFloat64Register(2));
547 DCHECK_EQ(LeaveCC, i.OutputSBit());
550 __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
551 i.InputFloat64Register(1));
552 DCHECK_EQ(LeaveCC, i.OutputSBit());
555 // TODO(bmeurer): We should really get rid of this special instruction,
556 // and generate a CallAddress instruction instead.
557 FrameScope scope(masm(), StackFrame::MANUAL);
558 __ PrepareCallCFunction(0, 2, kScratchReg);
559 __ MovToFloatParameters(i.InputFloat64Register(0),
560 i.InputFloat64Register(1));
561 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
563 // Move the result in the double result register.
564 __ MovFromFloatResult(i.OutputFloat64Register());
565 DCHECK_EQ(LeaveCC, i.OutputSBit());
569 __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
572 __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
575 __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
578 __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
581 __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
584 __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
586 case kArmVcvtF32F64: {
587 __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
588 DCHECK_EQ(LeaveCC, i.OutputSBit());
591 case kArmVcvtF64F32: {
592 __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
593 DCHECK_EQ(LeaveCC, i.OutputSBit());
596 case kArmVcvtF64S32: {
597 SwVfpRegister scratch = kScratchDoubleReg.low();
598 __ vmov(scratch, i.InputRegister(0));
599 __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
600 DCHECK_EQ(LeaveCC, i.OutputSBit());
603 case kArmVcvtF64U32: {
604 SwVfpRegister scratch = kScratchDoubleReg.low();
605 __ vmov(scratch, i.InputRegister(0));
606 __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
607 DCHECK_EQ(LeaveCC, i.OutputSBit());
610 case kArmVcvtS32F64: {
611 SwVfpRegister scratch = kScratchDoubleReg.low();
612 __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
613 __ vmov(i.OutputRegister(), scratch);
614 DCHECK_EQ(LeaveCC, i.OutputSBit());
617 case kArmVcvtU32F64: {
618 SwVfpRegister scratch = kScratchDoubleReg.low();
619 __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
620 __ vmov(i.OutputRegister(), scratch);
621 DCHECK_EQ(LeaveCC, i.OutputSBit());
624 case kArmVmovLowU32F64:
625 __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
626 DCHECK_EQ(LeaveCC, i.OutputSBit());
628 case kArmVmovLowF64U32:
629 __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
630 DCHECK_EQ(LeaveCC, i.OutputSBit());
632 case kArmVmovHighU32F64:
633 __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
634 DCHECK_EQ(LeaveCC, i.OutputSBit());
636 case kArmVmovHighF64U32:
637 __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
638 DCHECK_EQ(LeaveCC, i.OutputSBit());
640 case kArmVmovF64U32U32:
641 __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
643 DCHECK_EQ(LeaveCC, i.OutputSBit());
646 __ ldrb(i.OutputRegister(), i.InputOffset());
647 DCHECK_EQ(LeaveCC, i.OutputSBit());
650 __ ldrsb(i.OutputRegister(), i.InputOffset());
651 DCHECK_EQ(LeaveCC, i.OutputSBit());
655 MemOperand operand = i.InputOffset(&index);
656 __ strb(i.InputRegister(index), operand);
657 DCHECK_EQ(LeaveCC, i.OutputSBit());
661 __ ldrh(i.OutputRegister(), i.InputOffset());
664 __ ldrsh(i.OutputRegister(), i.InputOffset());
668 MemOperand operand = i.InputOffset(&index);
669 __ strh(i.InputRegister(index), operand);
670 DCHECK_EQ(LeaveCC, i.OutputSBit());
674 __ ldr(i.OutputRegister(), i.InputOffset());
678 MemOperand operand = i.InputOffset(&index);
679 __ str(i.InputRegister(index), operand);
680 DCHECK_EQ(LeaveCC, i.OutputSBit());
684 __ vldr(i.OutputFloat32Register(), i.InputOffset());
685 DCHECK_EQ(LeaveCC, i.OutputSBit());
690 MemOperand operand = i.InputOffset(&index);
691 __ vstr(i.InputFloat32Register(index), operand);
692 DCHECK_EQ(LeaveCC, i.OutputSBit());
696 __ vldr(i.OutputFloat64Register(), i.InputOffset());
697 DCHECK_EQ(LeaveCC, i.OutputSBit());
701 MemOperand operand = i.InputOffset(&index);
702 __ vstr(i.InputFloat64Register(index), operand);
703 DCHECK_EQ(LeaveCC, i.OutputSBit());
707 __ Push(i.InputRegister(0));
708 DCHECK_EQ(LeaveCC, i.OutputSBit());
710 case kArmStoreWriteBarrier: {
711 Register object = i.InputRegister(0);
712 Register index = i.InputRegister(1);
713 Register value = i.InputRegister(2);
714 __ add(index, object, index);
715 __ str(value, MemOperand(index));
716 SaveFPRegsMode mode =
717 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
718 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
719 __ RecordWrite(object, index, value, lr_status, mode);
720 DCHECK_EQ(LeaveCC, i.OutputSBit());
723 case kCheckedLoadInt8:
724 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
726 case kCheckedLoadUint8:
727 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
729 case kCheckedLoadInt16:
730 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
732 case kCheckedLoadUint16:
733 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
735 case kCheckedLoadWord32:
736 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
738 case kCheckedLoadFloat32:
739 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
741 case kCheckedLoadFloat64:
742 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
744 case kCheckedStoreWord8:
745 ASSEMBLE_CHECKED_STORE_INTEGER(strb);
747 case kCheckedStoreWord16:
748 ASSEMBLE_CHECKED_STORE_INTEGER(strh);
750 case kCheckedStoreWord32:
751 ASSEMBLE_CHECKED_STORE_INTEGER(str);
753 case kCheckedStoreFloat32:
754 ASSEMBLE_CHECKED_STORE_FLOAT(32);
756 case kCheckedStoreFloat64:
757 ASSEMBLE_CHECKED_STORE_FLOAT(64);
763 // Assembles branches after an instruction.
764 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
765 ArmOperandConverter i(this, instr);
766 Label* tlabel = branch->true_label;
767 Label* flabel = branch->false_label;
768 Condition cc = FlagsConditionToCondition(branch->condition);
770 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
774 void CodeGenerator::AssembleArchJump(RpoNumber target) {
775 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
779 // Assembles boolean materializations after an instruction.
780 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
781 FlagsCondition condition) {
782 ArmOperandConverter i(this, instr);
784 // Materialize a full 32-bit 1 or 0 value. The result register is always the
785 // last output of the instruction.
786 DCHECK_NE(0u, instr->OutputCount());
787 Register reg = i.OutputRegister(instr->OutputCount() - 1);
788 Condition cc = FlagsConditionToCondition(condition);
789 __ mov(reg, Operand(0));
790 __ mov(reg, Operand(1), LeaveCC, cc);
794 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
795 ArmOperandConverter i(this, instr);
796 Register input = i.InputRegister(0);
797 for (size_t index = 2; index < instr->InputCount(); index += 2) {
798 __ cmp(input, Operand(i.InputInt32(index + 0)));
799 __ b(eq, GetLabel(i.InputRpo(index + 1)));
801 AssembleArchJump(i.InputRpo(1));
805 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
806 ArmOperandConverter i(this, instr);
807 Register input = i.InputRegister(0);
808 size_t const case_count = instr->InputCount() - 2;
809 __ CheckConstPool(true, true);
810 __ cmp(input, Operand(case_count));
811 __ BlockConstPoolFor(case_count + 2);
812 __ ldr(pc, MemOperand(pc, input, LSL, 2), lo);
813 __ b(GetLabel(i.InputRpo(1)));
814 for (size_t index = 0; index < case_count; ++index) {
815 __ dd(GetLabel(i.InputRpo(index + 2)));
820 void CodeGenerator::AssembleDeoptimizerCall(
821 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
822 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
823 isolate(), deoptimization_id, bailout_type);
824 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
828 void CodeGenerator::AssemblePrologue() {
829 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
830 int stack_slots = frame()->GetSpillSlotCount();
831 if (descriptor->kind() == CallDescriptor::kCallAddress) {
833 if (FLAG_enable_ool_constant_pool) {
835 // Adjust FP to point to saved FP.
836 __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
843 const RegList saves = descriptor->CalleeSavedRegisters();
844 if (saves != 0 || saved_pp) {
845 // Save callee-saved registers.
846 int register_save_area_size = saved_pp ? kPointerSize : 0;
847 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
848 if (!((1 << i) & saves)) continue;
849 register_save_area_size += kPointerSize;
851 frame()->SetRegisterSaveAreaSize(register_save_area_size);
852 __ stm(db_w, sp, saves);
854 } else if (descriptor->IsJSFunctionCall()) {
855 CompilationInfo* info = this->info();
856 __ Prologue(info->IsCodePreAgingActive());
857 frame()->SetRegisterSaveAreaSize(
858 StandardFrameConstants::kFixedFrameSizeFromFp);
859 } else if (stack_slots > 0) {
861 frame()->SetRegisterSaveAreaSize(
862 StandardFrameConstants::kFixedFrameSizeFromFp);
865 if (info()->is_osr()) {
866 // TurboFan OSR-compiled functions cannot be entered directly.
867 __ Abort(kShouldNotDirectlyEnterOsrFunction);
869 // Unoptimized code jumps directly to this entrypoint while the unoptimized
870 // frame is still on the stack. Optimized code uses OSR values directly from
871 // the unoptimized frame. Thus, all that needs to be done is to allocate the
872 // remaining stack slots.
873 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
874 osr_pc_offset_ = __ pc_offset();
875 // TODO(titzer): cannot address target function == local #-1
876 __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
877 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
878 stack_slots -= frame()->GetOsrStackSlotCount();
881 if (stack_slots > 0) {
882 __ sub(sp, sp, Operand(stack_slots * kPointerSize));
887 void CodeGenerator::AssembleReturn() {
888 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
889 int stack_slots = frame()->GetSpillSlotCount();
890 if (descriptor->kind() == CallDescriptor::kCallAddress) {
891 if (frame()->GetRegisterSaveAreaSize() > 0) {
892 // Remove this frame's spill slots first.
893 if (stack_slots > 0) {
894 __ add(sp, sp, Operand(stack_slots * kPointerSize));
896 // Restore registers.
897 const RegList saves = descriptor->CalleeSavedRegisters();
899 __ ldm(ia_w, sp, saves);
902 __ LeaveFrame(StackFrame::MANUAL);
904 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
905 __ LeaveFrame(StackFrame::MANUAL);
906 int pop_count = descriptor->IsJSFunctionCall()
907 ? static_cast<int>(descriptor->JSParameterCount())
917 void CodeGenerator::AssembleMove(InstructionOperand* source,
918 InstructionOperand* destination) {
919 ArmOperandConverter g(this, NULL);
920 // Dispatch on the source and destination operand kinds. Not all
921 // combinations are possible.
922 if (source->IsRegister()) {
923 DCHECK(destination->IsRegister() || destination->IsStackSlot());
924 Register src = g.ToRegister(source);
925 if (destination->IsRegister()) {
926 __ mov(g.ToRegister(destination), src);
928 __ str(src, g.ToMemOperand(destination));
930 } else if (source->IsStackSlot()) {
931 DCHECK(destination->IsRegister() || destination->IsStackSlot());
932 MemOperand src = g.ToMemOperand(source);
933 if (destination->IsRegister()) {
934 __ ldr(g.ToRegister(destination), src);
936 Register temp = kScratchReg;
938 __ str(temp, g.ToMemOperand(destination));
940 } else if (source->IsConstant()) {
941 Constant src = g.ToConstant(source);
942 if (destination->IsRegister() || destination->IsStackSlot()) {
944 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
945 switch (src.type()) {
946 case Constant::kInt32:
947 __ mov(dst, Operand(src.ToInt32()));
949 case Constant::kInt64:
952 case Constant::kFloat32:
954 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
956 case Constant::kFloat64:
958 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
960 case Constant::kExternalReference:
961 __ mov(dst, Operand(src.ToExternalReference()));
963 case Constant::kHeapObject:
964 __ Move(dst, src.ToHeapObject());
966 case Constant::kRpoNumber:
967 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
970 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
971 } else if (src.type() == Constant::kFloat32) {
972 if (destination->IsDoubleStackSlot()) {
973 MemOperand dst = g.ToMemOperand(destination);
974 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
977 SwVfpRegister dst = g.ToFloat32Register(destination);
978 __ vmov(dst, src.ToFloat32());
981 DCHECK_EQ(Constant::kFloat64, src.type());
982 DwVfpRegister dst = destination->IsDoubleRegister()
983 ? g.ToFloat64Register(destination)
985 __ vmov(dst, src.ToFloat64(), kScratchReg);
986 if (destination->IsDoubleStackSlot()) {
987 __ vstr(dst, g.ToMemOperand(destination));
990 } else if (source->IsDoubleRegister()) {
991 DwVfpRegister src = g.ToDoubleRegister(source);
992 if (destination->IsDoubleRegister()) {
993 DwVfpRegister dst = g.ToDoubleRegister(destination);
996 DCHECK(destination->IsDoubleStackSlot());
997 __ vstr(src, g.ToMemOperand(destination));
999 } else if (source->IsDoubleStackSlot()) {
1000 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1001 MemOperand src = g.ToMemOperand(source);
1002 if (destination->IsDoubleRegister()) {
1003 __ vldr(g.ToDoubleRegister(destination), src);
1005 DwVfpRegister temp = kScratchDoubleReg;
1007 __ vstr(temp, g.ToMemOperand(destination));
1015 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1016 InstructionOperand* destination) {
1017 ArmOperandConverter g(this, NULL);
1018 // Dispatch on the source and destination operand kinds. Not all
1019 // combinations are possible.
1020 if (source->IsRegister()) {
1021 // Register-register.
1022 Register temp = kScratchReg;
1023 Register src = g.ToRegister(source);
1024 if (destination->IsRegister()) {
1025 Register dst = g.ToRegister(destination);
1030 DCHECK(destination->IsStackSlot());
1031 MemOperand dst = g.ToMemOperand(destination);
1036 } else if (source->IsStackSlot()) {
1037 DCHECK(destination->IsStackSlot());
1038 Register temp_0 = kScratchReg;
1039 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1040 MemOperand src = g.ToMemOperand(source);
1041 MemOperand dst = g.ToMemOperand(destination);
1042 __ ldr(temp_0, src);
1043 __ vldr(temp_1, dst);
1044 __ str(temp_0, dst);
1045 __ vstr(temp_1, src);
1046 } else if (source->IsDoubleRegister()) {
1047 DwVfpRegister temp = kScratchDoubleReg;
1048 DwVfpRegister src = g.ToDoubleRegister(source);
1049 if (destination->IsDoubleRegister()) {
1050 DwVfpRegister dst = g.ToDoubleRegister(destination);
1055 DCHECK(destination->IsDoubleStackSlot());
1056 MemOperand dst = g.ToMemOperand(destination);
1061 } else if (source->IsDoubleStackSlot()) {
1062 DCHECK(destination->IsDoubleStackSlot());
1063 Register temp_0 = kScratchReg;
1064 DwVfpRegister temp_1 = kScratchDoubleReg;
1065 MemOperand src0 = g.ToMemOperand(source);
1066 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1067 MemOperand dst0 = g.ToMemOperand(destination);
1068 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1069 __ vldr(temp_1, dst0); // Save destination in temp_1.
1070 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1071 __ str(temp_0, dst0);
1072 __ ldr(temp_0, src1);
1073 __ str(temp_0, dst1);
1074 __ vstr(temp_1, src0);
1076 // No other combinations are possible.
1082 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1083 // On 32-bit ARM we emit the jump tables inline.
1088 void CodeGenerator::AddNopForSmiCodeInlining() {
1089 // On 32-bit ARM we do not insert nops for inlined Smi code.
1093 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1094 int space_needed = Deoptimizer::patch_size();
1095 if (!info()->IsStub()) {
1096 // Ensure that we have enough space after the previous lazy-bailout
1097 // instruction for patching the code here.
1098 int current_pc = masm()->pc_offset();
1099 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1100 // Block literal pool emission for duration of padding.
1101 v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
1102 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1103 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1104 while (padding_size > 0) {
1106 padding_size -= v8::internal::Assembler::kInstrSize;
1110 MarkLazyDeoptSite();
1115 } // namespace compiler
1116 } // namespace internal