1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/scopes.h"
20 #define kScratchReg r9
23 // Adds Arm-specific methods to convert InstructionOperands.
24 class ArmOperandConverter FINAL : public InstructionOperandConverter {
26 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
27 : InstructionOperandConverter(gen, instr) {}
29 SwVfpRegister OutputFloat32Register(int index = 0) {
30 return ToFloat32Register(instr_->OutputAt(index));
33 SwVfpRegister InputFloat32Register(int index) {
34 return ToFloat32Register(instr_->InputAt(index));
37 SwVfpRegister ToFloat32Register(InstructionOperand* op) {
38 return ToFloat64Register(op).low();
41 LowDwVfpRegister OutputFloat64Register(int index = 0) {
42 return ToFloat64Register(instr_->OutputAt(index));
45 LowDwVfpRegister InputFloat64Register(int index) {
46 return ToFloat64Register(instr_->InputAt(index));
49 LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
50 return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
53 SBit OutputSBit() const {
54 switch (instr_->flags_mode()) {
65 Operand InputImmediate(int index) {
66 Constant constant = ToConstant(instr_->InputAt(index));
67 switch (constant.type()) {
68 case Constant::kInt32:
69 return Operand(constant.ToInt32());
70 case Constant::kFloat32:
72 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
73 case Constant::kFloat64:
75 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
76 case Constant::kInt64:
77 case Constant::kExternalReference:
78 case Constant::kHeapObject:
79 case Constant::kRpoNumber:
83 return Operand::Zero();
86 Operand InputOperand2(int first_index) {
87 const int index = first_index;
88 switch (AddressingModeField::decode(instr_->opcode())) {
93 case kMode_Operand2_I:
94 return InputImmediate(index + 0);
95 case kMode_Operand2_R:
96 return Operand(InputRegister(index + 0));
97 case kMode_Operand2_R_ASR_I:
98 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
99 case kMode_Operand2_R_ASR_R:
100 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
101 case kMode_Operand2_R_LSL_I:
102 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
103 case kMode_Operand2_R_LSL_R:
104 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
105 case kMode_Operand2_R_LSR_I:
106 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
107 case kMode_Operand2_R_LSR_R:
108 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
109 case kMode_Operand2_R_ROR_I:
110 return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
111 case kMode_Operand2_R_ROR_R:
112 return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
115 return Operand::Zero();
118 MemOperand InputOffset(int* first_index) {
119 const int index = *first_index;
120 switch (AddressingModeField::decode(instr_->opcode())) {
122 case kMode_Operand2_I:
123 case kMode_Operand2_R:
124 case kMode_Operand2_R_ASR_I:
125 case kMode_Operand2_R_ASR_R:
126 case kMode_Operand2_R_LSL_I:
127 case kMode_Operand2_R_LSL_R:
128 case kMode_Operand2_R_LSR_I:
129 case kMode_Operand2_R_LSR_R:
130 case kMode_Operand2_R_ROR_I:
131 case kMode_Operand2_R_ROR_R:
133 case kMode_Offset_RI:
135 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
136 case kMode_Offset_RR:
138 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
141 return MemOperand(r0);
144 MemOperand InputOffset(int first_index = 0) {
145 return InputOffset(&first_index);
148 MemOperand ToMemOperand(InstructionOperand* op) const {
150 DCHECK(!op->IsRegister());
151 DCHECK(!op->IsDoubleRegister());
152 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
153 // The linkage computes where all spill slots are located.
154 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
155 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
162 class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
164 OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
165 : OutOfLineCode(gen), result_(result) {}
167 void Generate() FINAL {
168 __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
172 SwVfpRegister const result_;
176 class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
178 OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
179 : OutOfLineCode(gen), result_(result) {}
181 void Generate() FINAL {
182 __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
186 DwVfpRegister const result_;
190 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
192 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
193 : OutOfLineCode(gen), result_(result) {}
195 void Generate() FINAL { __ mov(result_, Operand::Zero()); }
198 Register const result_;
202 Condition FlagsConditionToCondition(FlagsCondition condition) {
208 case kSignedLessThan:
210 case kSignedGreaterThanOrEqual:
212 case kSignedLessThanOrEqual:
214 case kSignedGreaterThan:
216 case kUnsignedLessThan:
218 case kUnsignedGreaterThanOrEqual:
220 case kUnsignedLessThanOrEqual:
222 case kUnsignedGreaterThan:
228 case kUnorderedEqual:
229 case kUnorderedNotEqual:
239 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
241 auto result = i.OutputFloat##width##Register(); \
242 auto offset = i.InputRegister(0); \
243 if (instr->InputAt(1)->IsRegister()) { \
244 __ cmp(offset, i.InputRegister(1)); \
246 __ cmp(offset, i.InputImmediate(1)); \
248 auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
249 __ b(hs, ool->entry()); \
250 __ vldr(result, i.InputOffset(2)); \
251 __ bind(ool->exit()); \
252 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
256 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
258 auto result = i.OutputRegister(); \
259 auto offset = i.InputRegister(0); \
260 if (instr->InputAt(1)->IsRegister()) { \
261 __ cmp(offset, i.InputRegister(1)); \
263 __ cmp(offset, i.InputImmediate(1)); \
265 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
266 __ b(hs, ool->entry()); \
267 __ asm_instr(result, i.InputOffset(2)); \
268 __ bind(ool->exit()); \
269 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
273 #define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
275 auto offset = i.InputRegister(0); \
276 if (instr->InputAt(1)->IsRegister()) { \
277 __ cmp(offset, i.InputRegister(1)); \
279 __ cmp(offset, i.InputImmediate(1)); \
281 auto value = i.InputFloat##width##Register(2); \
282 __ vstr(value, i.InputOffset(3), lo); \
283 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
287 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
289 auto offset = i.InputRegister(0); \
290 if (instr->InputAt(1)->IsRegister()) { \
291 __ cmp(offset, i.InputRegister(1)); \
293 __ cmp(offset, i.InputImmediate(1)); \
295 auto value = i.InputRegister(2); \
296 __ asm_instr(value, i.InputOffset(3), lo); \
297 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
301 // Assembles an instruction after register allocation, producing machine code.
302 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
303 ArmOperandConverter i(this, instr);
305 switch (ArchOpcodeField::decode(instr->opcode())) {
306 case kArchCallCodeObject: {
307 EnsureSpaceForLazyDeopt();
308 if (instr->InputAt(0)->IsImmediate()) {
309 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
310 RelocInfo::CODE_TARGET);
312 __ add(ip, i.InputRegister(0),
313 Operand(Code::kHeaderSize - kHeapObjectTag));
316 AddSafepointAndDeopt(instr);
317 DCHECK_EQ(LeaveCC, i.OutputSBit());
320 case kArchCallJSFunction: {
321 EnsureSpaceForLazyDeopt();
322 Register func = i.InputRegister(0);
323 if (FLAG_debug_code) {
324 // Check the function's context matches the context argument.
325 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
326 __ cmp(cp, kScratchReg);
327 __ Assert(eq, kWrongFunctionContext);
329 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
331 AddSafepointAndDeopt(instr);
332 DCHECK_EQ(LeaveCC, i.OutputSBit());
336 AssembleArchJump(i.InputRpo(0));
337 DCHECK_EQ(LeaveCC, i.OutputSBit());
339 case kArchLookupSwitch:
340 AssembleArchLookupSwitch(instr);
341 DCHECK_EQ(LeaveCC, i.OutputSBit());
343 case kArchTableSwitch:
344 AssembleArchTableSwitch(instr);
345 DCHECK_EQ(LeaveCC, i.OutputSBit());
348 // don't emit code for nops.
349 DCHECK_EQ(LeaveCC, i.OutputSBit());
353 DCHECK_EQ(LeaveCC, i.OutputSBit());
355 case kArchStackPointer:
356 __ mov(i.OutputRegister(), sp);
357 DCHECK_EQ(LeaveCC, i.OutputSBit());
359 case kArchTruncateDoubleToI:
360 __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
361 DCHECK_EQ(LeaveCC, i.OutputSBit());
364 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
368 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
372 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
376 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
380 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
381 i.InputRegister(2), i.OutputSBit());
384 CpuFeatureScope scope(masm(), MLS);
385 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
387 DCHECK_EQ(LeaveCC, i.OutputSBit());
391 __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
392 DCHECK_EQ(LeaveCC, i.OutputSBit());
395 __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
397 DCHECK_EQ(LeaveCC, i.OutputSBit());
400 __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
401 i.InputRegister(1), i.OutputSBit());
404 CpuFeatureScope scope(masm(), SUDIV);
405 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
406 DCHECK_EQ(LeaveCC, i.OutputSBit());
410 CpuFeatureScope scope(masm(), SUDIV);
411 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
412 DCHECK_EQ(LeaveCC, i.OutputSBit());
416 __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
419 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
422 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
426 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
430 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
434 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
438 CpuFeatureScope scope(masm(), ARMv7);
439 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
440 DCHECK_EQ(LeaveCC, i.OutputSBit());
444 CpuFeatureScope scope(masm(), ARMv7);
445 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
447 DCHECK_EQ(LeaveCC, i.OutputSBit());
451 __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
452 DCHECK_EQ(LeaveCC, i.OutputSBit());
455 __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
456 DCHECK_EQ(LeaveCC, i.OutputSBit());
459 __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
461 DCHECK_EQ(LeaveCC, i.OutputSBit());
464 __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
466 DCHECK_EQ(LeaveCC, i.OutputSBit());
469 __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
470 DCHECK_EQ(LeaveCC, i.OutputSBit());
473 __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
474 DCHECK_EQ(LeaveCC, i.OutputSBit());
477 __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
479 DCHECK_EQ(LeaveCC, i.OutputSBit());
482 __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
484 DCHECK_EQ(LeaveCC, i.OutputSBit());
487 __ cmp(i.InputRegister(0), i.InputOperand2(1));
488 DCHECK_EQ(SetCC, i.OutputSBit());
491 __ cmn(i.InputRegister(0), i.InputOperand2(1));
492 DCHECK_EQ(SetCC, i.OutputSBit());
495 __ tst(i.InputRegister(0), i.InputOperand2(1));
496 DCHECK_EQ(SetCC, i.OutputSBit());
499 __ teq(i.InputRegister(0), i.InputOperand2(1));
500 DCHECK_EQ(SetCC, i.OutputSBit());
503 if (instr->InputAt(1)->IsDoubleRegister()) {
504 __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
505 i.InputFloat64Register(1));
507 DCHECK(instr->InputAt(1)->IsImmediate());
508 // 0.0 is the only immediate supported by vcmp instructions.
509 DCHECK(i.InputDouble(1) == 0.0);
510 __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
512 DCHECK_EQ(SetCC, i.OutputSBit());
515 __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
516 i.InputFloat64Register(1));
517 DCHECK_EQ(LeaveCC, i.OutputSBit());
520 __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
521 i.InputFloat64Register(1));
522 DCHECK_EQ(LeaveCC, i.OutputSBit());
525 __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
526 i.InputFloat64Register(1));
527 DCHECK_EQ(LeaveCC, i.OutputSBit());
530 __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
531 i.InputFloat64Register(2));
532 DCHECK_EQ(LeaveCC, i.OutputSBit());
535 __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
536 i.InputFloat64Register(2));
537 DCHECK_EQ(LeaveCC, i.OutputSBit());
540 __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
541 i.InputFloat64Register(1));
542 DCHECK_EQ(LeaveCC, i.OutputSBit());
545 // TODO(bmeurer): We should really get rid of this special instruction,
546 // and generate a CallAddress instruction instead.
547 FrameScope scope(masm(), StackFrame::MANUAL);
548 __ PrepareCallCFunction(0, 2, kScratchReg);
549 __ MovToFloatParameters(i.InputFloat64Register(0),
550 i.InputFloat64Register(1));
551 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
553 // Move the result in the double result register.
554 __ MovFromFloatResult(i.OutputFloat64Register());
555 DCHECK_EQ(LeaveCC, i.OutputSBit());
559 __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
562 __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
565 __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
567 case kArmVroundTruncateF64:
568 __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
570 case kArmVroundTiesAwayF64:
571 __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
574 __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
576 case kArmVcvtF32F64: {
577 __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
578 DCHECK_EQ(LeaveCC, i.OutputSBit());
581 case kArmVcvtF64F32: {
582 __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
583 DCHECK_EQ(LeaveCC, i.OutputSBit());
586 case kArmVcvtF64S32: {
587 SwVfpRegister scratch = kScratchDoubleReg.low();
588 __ vmov(scratch, i.InputRegister(0));
589 __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
590 DCHECK_EQ(LeaveCC, i.OutputSBit());
593 case kArmVcvtF64U32: {
594 SwVfpRegister scratch = kScratchDoubleReg.low();
595 __ vmov(scratch, i.InputRegister(0));
596 __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
597 DCHECK_EQ(LeaveCC, i.OutputSBit());
600 case kArmVcvtS32F64: {
601 SwVfpRegister scratch = kScratchDoubleReg.low();
602 __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
603 __ vmov(i.OutputRegister(), scratch);
604 DCHECK_EQ(LeaveCC, i.OutputSBit());
607 case kArmVcvtU32F64: {
608 SwVfpRegister scratch = kScratchDoubleReg.low();
609 __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
610 __ vmov(i.OutputRegister(), scratch);
611 DCHECK_EQ(LeaveCC, i.OutputSBit());
615 __ ldrb(i.OutputRegister(), i.InputOffset());
616 DCHECK_EQ(LeaveCC, i.OutputSBit());
619 __ ldrsb(i.OutputRegister(), i.InputOffset());
620 DCHECK_EQ(LeaveCC, i.OutputSBit());
624 MemOperand operand = i.InputOffset(&index);
625 __ strb(i.InputRegister(index), operand);
626 DCHECK_EQ(LeaveCC, i.OutputSBit());
630 __ ldrh(i.OutputRegister(), i.InputOffset());
633 __ ldrsh(i.OutputRegister(), i.InputOffset());
637 MemOperand operand = i.InputOffset(&index);
638 __ strh(i.InputRegister(index), operand);
639 DCHECK_EQ(LeaveCC, i.OutputSBit());
643 __ ldr(i.OutputRegister(), i.InputOffset());
647 MemOperand operand = i.InputOffset(&index);
648 __ str(i.InputRegister(index), operand);
649 DCHECK_EQ(LeaveCC, i.OutputSBit());
653 __ vldr(i.OutputFloat32Register(), i.InputOffset());
654 DCHECK_EQ(LeaveCC, i.OutputSBit());
659 MemOperand operand = i.InputOffset(&index);
660 __ vstr(i.InputFloat32Register(index), operand);
661 DCHECK_EQ(LeaveCC, i.OutputSBit());
665 __ vldr(i.OutputFloat64Register(), i.InputOffset());
666 DCHECK_EQ(LeaveCC, i.OutputSBit());
670 MemOperand operand = i.InputOffset(&index);
671 __ vstr(i.InputFloat64Register(index), operand);
672 DCHECK_EQ(LeaveCC, i.OutputSBit());
676 __ Push(i.InputRegister(0));
677 DCHECK_EQ(LeaveCC, i.OutputSBit());
679 case kArmStoreWriteBarrier: {
680 Register object = i.InputRegister(0);
681 Register index = i.InputRegister(1);
682 Register value = i.InputRegister(2);
683 __ add(index, object, index);
684 __ str(value, MemOperand(index));
685 SaveFPRegsMode mode =
686 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
687 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
688 __ RecordWrite(object, index, value, lr_status, mode);
689 DCHECK_EQ(LeaveCC, i.OutputSBit());
692 case kCheckedLoadInt8:
693 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
695 case kCheckedLoadUint8:
696 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
698 case kCheckedLoadInt16:
699 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
701 case kCheckedLoadUint16:
702 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
704 case kCheckedLoadWord32:
705 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
707 case kCheckedLoadFloat32:
708 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
710 case kCheckedLoadFloat64:
711 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
713 case kCheckedStoreWord8:
714 ASSEMBLE_CHECKED_STORE_INTEGER(strb);
716 case kCheckedStoreWord16:
717 ASSEMBLE_CHECKED_STORE_INTEGER(strh);
719 case kCheckedStoreWord32:
720 ASSEMBLE_CHECKED_STORE_INTEGER(str);
722 case kCheckedStoreFloat32:
723 ASSEMBLE_CHECKED_STORE_FLOAT(32);
725 case kCheckedStoreFloat64:
726 ASSEMBLE_CHECKED_STORE_FLOAT(64);
732 // Assembles branches after an instruction.
733 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
734 ArmOperandConverter i(this, instr);
735 Label* tlabel = branch->true_label;
736 Label* flabel = branch->false_label;
737 Condition cc = FlagsConditionToCondition(branch->condition);
739 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
743 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
744 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
748 // Assembles boolean materializations after an instruction.
749 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
750 FlagsCondition condition) {
751 ArmOperandConverter i(this, instr);
753 // Materialize a full 32-bit 1 or 0 value. The result register is always the
754 // last output of the instruction.
755 DCHECK_NE(0u, instr->OutputCount());
756 Register reg = i.OutputRegister(instr->OutputCount() - 1);
757 Condition cc = FlagsConditionToCondition(condition);
758 __ mov(reg, Operand(0));
759 __ mov(reg, Operand(1), LeaveCC, cc);
763 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
764 ArmOperandConverter i(this, instr);
765 Register input = i.InputRegister(0);
766 for (size_t index = 2; index < instr->InputCount(); index += 2) {
767 __ cmp(input, Operand(i.InputInt32(index + 0)));
768 __ b(eq, GetLabel(i.InputRpo(index + 1)));
770 AssembleArchJump(i.InputRpo(1));
774 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
775 ArmOperandConverter i(this, instr);
776 Register input = i.InputRegister(0);
777 size_t const case_count = instr->InputCount() - 2;
778 __ cmp(input, Operand(case_count));
779 __ BlockConstPoolFor(case_count + 2);
780 __ ldr(pc, MemOperand(pc, input, LSL, 2), lo);
781 __ b(GetLabel(i.InputRpo(1)));
782 for (size_t index = 0; index < case_count; ++index) {
783 __ dd(GetLabel(i.InputRpo(index + 2)));
788 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
789 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
790 isolate(), deoptimization_id, Deoptimizer::LAZY);
791 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
795 void CodeGenerator::AssemblePrologue() {
796 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
797 int stack_slots = frame()->GetSpillSlotCount();
798 if (descriptor->kind() == CallDescriptor::kCallAddress) {
800 if (FLAG_enable_ool_constant_pool) {
802 // Adjust FP to point to saved FP.
803 __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
810 const RegList saves = descriptor->CalleeSavedRegisters();
811 if (saves != 0 || saved_pp) {
812 // Save callee-saved registers.
813 int register_save_area_size = saved_pp ? kPointerSize : 0;
814 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
815 if (!((1 << i) & saves)) continue;
816 register_save_area_size += kPointerSize;
818 frame()->SetRegisterSaveAreaSize(register_save_area_size);
819 __ stm(db_w, sp, saves);
821 } else if (descriptor->IsJSFunctionCall()) {
822 CompilationInfo* info = this->info();
823 __ Prologue(info->IsCodePreAgingActive());
824 frame()->SetRegisterSaveAreaSize(
825 StandardFrameConstants::kFixedFrameSizeFromFp);
826 } else if (stack_slots > 0) {
828 frame()->SetRegisterSaveAreaSize(
829 StandardFrameConstants::kFixedFrameSizeFromFp);
832 if (info()->is_osr()) {
833 // TurboFan OSR-compiled functions cannot be entered directly.
834 __ Abort(kShouldNotDirectlyEnterOsrFunction);
836 // Unoptimized code jumps directly to this entrypoint while the unoptimized
837 // frame is still on the stack. Optimized code uses OSR values directly from
838 // the unoptimized frame. Thus, all that needs to be done is to allocate the
839 // remaining stack slots.
840 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
841 osr_pc_offset_ = __ pc_offset();
842 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
843 stack_slots -= frame()->GetOsrStackSlotCount();
846 if (stack_slots > 0) {
847 __ sub(sp, sp, Operand(stack_slots * kPointerSize));
852 void CodeGenerator::AssembleReturn() {
853 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
854 int stack_slots = frame()->GetSpillSlotCount();
855 if (descriptor->kind() == CallDescriptor::kCallAddress) {
856 if (frame()->GetRegisterSaveAreaSize() > 0) {
857 // Remove this frame's spill slots first.
858 if (stack_slots > 0) {
859 __ add(sp, sp, Operand(stack_slots * kPointerSize));
861 // Restore registers.
862 const RegList saves = descriptor->CalleeSavedRegisters();
864 __ ldm(ia_w, sp, saves);
867 __ LeaveFrame(StackFrame::MANUAL);
869 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
870 __ LeaveFrame(StackFrame::MANUAL);
871 int pop_count = descriptor->IsJSFunctionCall()
872 ? static_cast<int>(descriptor->JSParameterCount())
882 void CodeGenerator::AssembleMove(InstructionOperand* source,
883 InstructionOperand* destination) {
884 ArmOperandConverter g(this, NULL);
885 // Dispatch on the source and destination operand kinds. Not all
886 // combinations are possible.
887 if (source->IsRegister()) {
888 DCHECK(destination->IsRegister() || destination->IsStackSlot());
889 Register src = g.ToRegister(source);
890 if (destination->IsRegister()) {
891 __ mov(g.ToRegister(destination), src);
893 __ str(src, g.ToMemOperand(destination));
895 } else if (source->IsStackSlot()) {
896 DCHECK(destination->IsRegister() || destination->IsStackSlot());
897 MemOperand src = g.ToMemOperand(source);
898 if (destination->IsRegister()) {
899 __ ldr(g.ToRegister(destination), src);
901 Register temp = kScratchReg;
903 __ str(temp, g.ToMemOperand(destination));
905 } else if (source->IsConstant()) {
906 Constant src = g.ToConstant(source);
907 if (destination->IsRegister() || destination->IsStackSlot()) {
909 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
910 switch (src.type()) {
911 case Constant::kInt32:
912 __ mov(dst, Operand(src.ToInt32()));
914 case Constant::kInt64:
917 case Constant::kFloat32:
919 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
921 case Constant::kFloat64:
923 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
925 case Constant::kExternalReference:
926 __ mov(dst, Operand(src.ToExternalReference()));
928 case Constant::kHeapObject:
929 __ Move(dst, src.ToHeapObject());
931 case Constant::kRpoNumber:
932 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
935 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
936 } else if (src.type() == Constant::kFloat32) {
937 if (destination->IsDoubleStackSlot()) {
938 MemOperand dst = g.ToMemOperand(destination);
939 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
942 SwVfpRegister dst = g.ToFloat32Register(destination);
943 __ vmov(dst, src.ToFloat32());
946 DCHECK_EQ(Constant::kFloat64, src.type());
947 DwVfpRegister dst = destination->IsDoubleRegister()
948 ? g.ToFloat64Register(destination)
950 __ vmov(dst, src.ToFloat64(), kScratchReg);
951 if (destination->IsDoubleStackSlot()) {
952 __ vstr(dst, g.ToMemOperand(destination));
955 } else if (source->IsDoubleRegister()) {
956 DwVfpRegister src = g.ToDoubleRegister(source);
957 if (destination->IsDoubleRegister()) {
958 DwVfpRegister dst = g.ToDoubleRegister(destination);
961 DCHECK(destination->IsDoubleStackSlot());
962 __ vstr(src, g.ToMemOperand(destination));
964 } else if (source->IsDoubleStackSlot()) {
965 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
966 MemOperand src = g.ToMemOperand(source);
967 if (destination->IsDoubleRegister()) {
968 __ vldr(g.ToDoubleRegister(destination), src);
970 DwVfpRegister temp = kScratchDoubleReg;
972 __ vstr(temp, g.ToMemOperand(destination));
980 void CodeGenerator::AssembleSwap(InstructionOperand* source,
981 InstructionOperand* destination) {
982 ArmOperandConverter g(this, NULL);
983 // Dispatch on the source and destination operand kinds. Not all
984 // combinations are possible.
985 if (source->IsRegister()) {
986 // Register-register.
987 Register temp = kScratchReg;
988 Register src = g.ToRegister(source);
989 if (destination->IsRegister()) {
990 Register dst = g.ToRegister(destination);
995 DCHECK(destination->IsStackSlot());
996 MemOperand dst = g.ToMemOperand(destination);
1001 } else if (source->IsStackSlot()) {
1002 DCHECK(destination->IsStackSlot());
1003 Register temp_0 = kScratchReg;
1004 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1005 MemOperand src = g.ToMemOperand(source);
1006 MemOperand dst = g.ToMemOperand(destination);
1007 __ ldr(temp_0, src);
1008 __ vldr(temp_1, dst);
1009 __ str(temp_0, dst);
1010 __ vstr(temp_1, src);
1011 } else if (source->IsDoubleRegister()) {
1012 DwVfpRegister temp = kScratchDoubleReg;
1013 DwVfpRegister src = g.ToDoubleRegister(source);
1014 if (destination->IsDoubleRegister()) {
1015 DwVfpRegister dst = g.ToDoubleRegister(destination);
1020 DCHECK(destination->IsDoubleStackSlot());
1021 MemOperand dst = g.ToMemOperand(destination);
1026 } else if (source->IsDoubleStackSlot()) {
1027 DCHECK(destination->IsDoubleStackSlot());
1028 Register temp_0 = kScratchReg;
1029 DwVfpRegister temp_1 = kScratchDoubleReg;
1030 MemOperand src0 = g.ToMemOperand(source);
1031 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1032 MemOperand dst0 = g.ToMemOperand(destination);
1033 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1034 __ vldr(temp_1, dst0); // Save destination in temp_1.
1035 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1036 __ str(temp_0, dst0);
1037 __ ldr(temp_0, src1);
1038 __ str(temp_0, dst1);
1039 __ vstr(temp_1, src0);
1041 // No other combinations are possible.
1047 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1048 // On 32-bit ARM we emit the jump tables inline.
1053 void CodeGenerator::AddNopForSmiCodeInlining() {
1054 // On 32-bit ARM we do not insert nops for inlined Smi code.
1058 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1059 int space_needed = Deoptimizer::patch_size();
1060 if (!info()->IsStub()) {
1061 // Ensure that we have enough space after the previous lazy-bailout
1062 // instruction for patching the code here.
1063 int current_pc = masm()->pc_offset();
1064 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1065 // Block literal pool emission for duration of padding.
1066 v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
1067 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1068 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1069 while (padding_size > 0) {
1071 padding_size -= v8::internal::Assembler::kInstrSize;
1075 MarkLazyDeoptSite();
1080 } // namespace compiler
1081 } // namespace internal