1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/mips/macro-assembler-mips.h"
10 #include "src/scopes.h"
19 // TODO(plind): Possibly avoid using these lithium names.
20 #define kScratchReg kLithiumScratchReg
21 #define kCompareReg kLithiumScratchReg2
22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
31 #define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter FINAL : public InstructionOperandConverter {
39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40 : InstructionOperandConverter(gen, instr) {}
42 FloatRegister OutputSingleRegister(int index = 0) {
43 return ToSingleRegister(instr_->OutputAt(index));
46 FloatRegister InputSingleRegister(int index) {
47 return ToSingleRegister(instr_->InputAt(index));
50 FloatRegister ToSingleRegister(InstructionOperand* op) {
51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op);
56 Operand InputImmediate(int index) {
57 Constant constant = ToConstant(instr_->InputAt(index));
58 switch (constant.type()) {
59 case Constant::kInt32:
60 return Operand(constant.ToInt32());
61 case Constant::kFloat32:
63 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
64 case Constant::kFloat64:
66 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
67 case Constant::kInt64:
68 case Constant::kExternalReference:
69 case Constant::kHeapObject:
70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
71 // maybe not done on arm due to const pool ??
73 case Constant::kRpoNumber:
74 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
78 return Operand(zero_reg);
81 Operand InputOperand(int index) {
82 InstructionOperand* op = instr_->InputAt(index);
83 if (op->IsRegister()) {
84 return Operand(ToRegister(op));
86 return InputImmediate(index);
89 MemOperand MemoryOperand(int* first_index) {
90 const int index = *first_index;
91 switch (AddressingModeField::decode(instr_->opcode())) {
96 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 // TODO(plind): r6 address mode, to be implemented ...
102 return MemOperand(no_reg);
105 MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
107 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(!op->IsRegister());
110 DCHECK(!op->IsDoubleRegister());
111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
112 // The linkage computes where all spill slots are located.
113 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
114 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
119 static inline bool HasRegisterInput(Instruction* instr, int index) {
120 return instr->InputAt(index)->IsRegister();
126 class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
129 : OutOfLineCode(gen), result_(result) {}
131 void Generate() FINAL {
132 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
136 FloatRegister const result_;
140 class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
143 : OutOfLineCode(gen), result_(result) {}
145 void Generate() FINAL {
146 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
150 DoubleRegister const result_;
154 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
157 : OutOfLineCode(gen), result_(result) {}
159 void Generate() FINAL { __ mov(result_, zero_reg); }
162 Register const result_;
166 class OutOfLineRound : public OutOfLineCode {
168 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
169 : OutOfLineCode(gen), result_(result) {}
171 void Generate() FINAL {
172 // Handle rounding to zero case where sign has to be preserved.
173 // High bits of double input already in kScratchReg.
174 __ srl(at, kScratchReg, 31);
176 __ Mthc1(at, result_);
180 DoubleRegister const result_;
184 class OutOfLineTruncate FINAL : public OutOfLineRound {
186 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
187 : OutOfLineRound(gen, result) {}
191 class OutOfLineFloor FINAL : public OutOfLineRound {
193 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
194 : OutOfLineRound(gen, result) {}
198 class OutOfLineCeil FINAL : public OutOfLineRound {
200 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
201 : OutOfLineRound(gen, result) {}
205 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
211 case kSignedLessThan:
213 case kSignedGreaterThanOrEqual:
215 case kSignedLessThanOrEqual:
217 case kSignedGreaterThan:
219 case kUnsignedLessThan:
221 case kUnsignedGreaterThanOrEqual:
223 case kUnsignedLessThanOrEqual:
225 case kUnsignedGreaterThan:
227 case kUnorderedEqual:
228 case kUnorderedNotEqual:
238 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
252 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
265 FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
266 FlagsCondition condition) {
274 case kUnsignedLessThan:
277 case kUnsignedGreaterThanOrEqual:
280 case kUnsignedLessThanOrEqual:
283 case kUnsignedGreaterThan:
286 case kUnorderedEqual:
287 case kUnorderedNotEqual:
295 return kNoFPUCondition;
301 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
303 auto result = i.Output##width##Register(); \
304 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
305 if (instr->InputAt(0)->IsRegister()) { \
306 auto offset = i.InputRegister(0); \
307 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
308 __ addu(at, i.InputRegister(2), offset); \
309 __ asm_instr(result, MemOperand(at, 0)); \
311 auto offset = i.InputOperand(0).immediate(); \
312 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
313 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
315 __ bind(ool->exit()); \
319 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
321 auto result = i.OutputRegister(); \
322 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
323 if (instr->InputAt(0)->IsRegister()) { \
324 auto offset = i.InputRegister(0); \
325 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
326 __ addu(at, i.InputRegister(2), offset); \
327 __ asm_instr(result, MemOperand(at, 0)); \
329 auto offset = i.InputOperand(0).immediate(); \
330 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
331 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
333 __ bind(ool->exit()); \
337 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
340 if (instr->InputAt(0)->IsRegister()) { \
341 auto offset = i.InputRegister(0); \
342 auto value = i.Input##width##Register(2); \
343 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
344 __ addu(at, i.InputRegister(3), offset); \
345 __ asm_instr(value, MemOperand(at, 0)); \
347 auto offset = i.InputOperand(0).immediate(); \
348 auto value = i.Input##width##Register(2); \
349 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
350 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
356 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
359 if (instr->InputAt(0)->IsRegister()) { \
360 auto offset = i.InputRegister(0); \
361 auto value = i.InputRegister(2); \
362 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
363 __ addu(at, i.InputRegister(3), offset); \
364 __ asm_instr(value, MemOperand(at, 0)); \
366 auto offset = i.InputOperand(0).immediate(); \
367 auto value = i.InputRegister(2); \
368 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
369 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
375 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
378 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
380 __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
381 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
382 HeapNumber::kExponentBits); \
383 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
384 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
385 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
386 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
387 __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
388 __ or_(at, at, kScratchReg2); \
389 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
390 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
391 __ bind(ool->exit()); \
396 // Assembles an instruction after register allocation, producing machine code.
397 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
398 MipsOperandConverter i(this, instr);
399 InstructionCode opcode = instr->opcode();
401 switch (ArchOpcodeField::decode(opcode)) {
402 case kArchCallCodeObject: {
403 EnsureSpaceForLazyDeopt();
404 if (instr->InputAt(0)->IsImmediate()) {
405 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
406 RelocInfo::CODE_TARGET);
408 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
411 AddSafepointAndDeopt(instr);
414 case kArchCallJSFunction: {
415 EnsureSpaceForLazyDeopt();
416 Register func = i.InputRegister(0);
417 if (FLAG_debug_code) {
418 // Check the function's context matches the context argument.
419 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
420 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
423 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
425 AddSafepointAndDeopt(instr);
429 AssembleArchJump(i.InputRpo(0));
431 case kArchLookupSwitch:
432 AssembleArchLookupSwitch(instr);
434 case kArchTableSwitch:
435 AssembleArchTableSwitch(instr);
438 // don't emit code for nops.
443 case kArchStackPointer:
444 __ mov(i.OutputRegister(), sp);
446 case kArchTruncateDoubleToI:
447 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
450 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
453 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
454 i.InputOperand(1), kCompareReg, kScratchReg);
457 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
460 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
461 i.InputOperand(1), kCompareReg, kScratchReg);
464 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
467 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
470 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
473 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
476 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
479 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
482 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
485 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
488 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
491 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
494 if (instr->InputAt(1)->IsRegister()) {
495 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
497 int32_t imm = i.InputOperand(1).immediate();
498 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
502 if (instr->InputAt(1)->IsRegister()) {
503 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
505 int32_t imm = i.InputOperand(1).immediate();
506 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
510 if (instr->InputAt(1)->IsRegister()) {
511 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
513 int32_t imm = i.InputOperand(1).immediate();
514 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
518 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
521 // Pseudo-instruction used for tst/branch. No opcode emitted here.
524 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
527 // TODO(plind): Should we combine mov/li like this, or use separate instr?
528 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
529 if (HasRegisterInput(instr, 0)) {
530 __ mov(i.OutputRegister(), i.InputRegister(0));
532 __ li(i.OutputRegister(), i.InputOperand(0));
537 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
540 // TODO(plind): add special case: combine mult & add.
541 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
542 i.InputDoubleRegister(1));
545 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
546 i.InputDoubleRegister(1));
549 // TODO(plind): add special case: right op is -1.0, see arm port.
550 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
551 i.InputDoubleRegister(1));
554 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
555 i.InputDoubleRegister(1));
558 // TODO(bmeurer): We should really get rid of this special instruction,
559 // and generate a CallAddress instruction instead.
560 FrameScope scope(masm(), StackFrame::MANUAL);
561 __ PrepareCallCFunction(0, 2, kScratchReg);
562 __ MovToFloatParameters(i.InputDoubleRegister(0),
563 i.InputDoubleRegister(1));
564 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
566 // Move the result in the double result register.
567 __ MovFromFloatResult(i.OutputDoubleRegister());
570 case kMipsFloat64Floor: {
571 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
574 case kMipsFloat64Ceil: {
575 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
578 case kMipsFloat64RoundTruncate: {
579 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
583 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
587 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
591 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
595 FPURegister scratch = kScratchDoubleReg;
596 __ mtc1(i.InputRegister(0), scratch);
597 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
601 FPURegister scratch = kScratchDoubleReg;
602 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
606 FPURegister scratch = kScratchDoubleReg;
607 // Other arches use round to zero here, so we follow.
608 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
609 __ mfc1(i.OutputRegister(), scratch);
612 case kMipsTruncUwD: {
613 FPURegister scratch = kScratchDoubleReg;
614 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
615 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
618 // ... more basic instructions ...
621 __ lbu(i.OutputRegister(), i.MemoryOperand());
624 __ lb(i.OutputRegister(), i.MemoryOperand());
627 __ sb(i.InputRegister(2), i.MemoryOperand());
630 __ lhu(i.OutputRegister(), i.MemoryOperand());
633 __ lh(i.OutputRegister(), i.MemoryOperand());
636 __ sh(i.InputRegister(2), i.MemoryOperand());
639 __ lw(i.OutputRegister(), i.MemoryOperand());
642 __ sw(i.InputRegister(2), i.MemoryOperand());
645 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
650 MemOperand operand = i.MemoryOperand(&index);
651 __ swc1(i.InputSingleRegister(index), operand);
655 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
658 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
661 __ Push(i.InputRegister(0));
663 case kMipsStackClaim: {
664 int words = MiscField::decode(instr->opcode());
665 __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
668 case kMipsStoreToStackSlot: {
669 int slot = MiscField::decode(instr->opcode());
670 __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
673 case kMipsStoreWriteBarrier: {
674 Register object = i.InputRegister(0);
675 Register index = i.InputRegister(1);
676 Register value = i.InputRegister(2);
677 __ addu(index, object, index);
678 __ sw(value, MemOperand(index));
679 SaveFPRegsMode mode =
680 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
681 RAStatus ra_status = kRAHasNotBeenSaved;
682 __ RecordWrite(object, index, value, ra_status, mode);
685 case kCheckedLoadInt8:
686 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
688 case kCheckedLoadUint8:
689 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
691 case kCheckedLoadInt16:
692 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
694 case kCheckedLoadUint16:
695 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
697 case kCheckedLoadWord32:
698 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
700 case kCheckedLoadFloat32:
701 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
703 case kCheckedLoadFloat64:
704 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
706 case kCheckedStoreWord8:
707 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
709 case kCheckedStoreWord16:
710 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
712 case kCheckedStoreWord32:
713 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
715 case kCheckedStoreFloat32:
716 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
718 case kCheckedStoreFloat64:
719 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
725 #define UNSUPPORTED_COND(opcode, condition) \
726 OFStream out(stdout); \
727 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
730 // Assembles branches after an instruction.
731 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
732 MipsOperandConverter i(this, instr);
733 Label* tlabel = branch->true_label;
734 Label* flabel = branch->false_label;
735 Condition cc = kNoCondition;
737 // MIPS does not have condition code flags, so compare and branch are
738 // implemented differently than on the other arch's. The compare operations
739 // emit mips pseudo-instructions, which are handled here by branch
740 // instructions that do the actual comparison. Essential that the input
741 // registers to compare pseudo-op are not modified before this branch op, as
742 // they are tested here.
743 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
744 // not separated by other instructions.
746 if (instr->arch_opcode() == kMipsTst) {
747 cc = FlagsConditionToConditionTst(branch->condition);
748 __ And(at, i.InputRegister(0), i.InputOperand(1));
749 __ Branch(tlabel, cc, at, Operand(zero_reg));
751 } else if (instr->arch_opcode() == kMipsAddOvf ||
752 instr->arch_opcode() == kMipsSubOvf) {
753 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
754 cc = FlagsConditionToConditionOvf(branch->condition);
755 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
757 } else if (instr->arch_opcode() == kMipsCmp) {
758 cc = FlagsConditionToConditionCmp(branch->condition);
759 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
761 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
763 } else if (instr->arch_opcode() == kMipsCmpD) {
764 // TODO(dusmil) optimize unordered checks to use fewer instructions
765 // even if we have to unfold BranchF macro.
767 switch (branch->condition) {
775 case kUnsignedLessThan:
778 case kUnsignedGreaterThanOrEqual:
782 case kUnsignedLessThanOrEqual:
785 case kUnsignedGreaterThan:
790 UNSUPPORTED_COND(kMipsCmpD, branch->condition);
793 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
794 i.InputDoubleRegister(1));
796 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
799 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
800 instr->arch_opcode());
806 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
807 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
811 // Assembles boolean materializations after an instruction.
812 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
813 FlagsCondition condition) {
814 MipsOperandConverter i(this, instr);
817 // Materialize a full 32-bit 1 or 0 value. The result register is always the
818 // last output of the instruction.
820 DCHECK_NE(0u, instr->OutputCount());
821 Register result = i.OutputRegister(instr->OutputCount() - 1);
822 Condition cc = kNoCondition;
824 // MIPS does not have condition code flags, so compare and branch are
825 // implemented differently than on the other arch's. The compare operations
826 // emit mips psuedo-instructions, which are checked and handled here.
828 // For materializations, we use delay slot to set the result true, and
829 // in the false case, where we fall thru the branch, we reset the result
832 if (instr->arch_opcode() == kMipsTst) {
833 cc = FlagsConditionToConditionTst(condition);
834 __ And(at, i.InputRegister(0), i.InputOperand(1));
835 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
836 __ li(result, Operand(1)); // In delay slot.
838 } else if (instr->arch_opcode() == kMipsAddOvf ||
839 instr->arch_opcode() == kMipsSubOvf) {
840 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
841 cc = FlagsConditionToConditionOvf(condition);
842 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
843 __ li(result, Operand(1)); // In delay slot.
845 } else if (instr->arch_opcode() == kMipsCmp) {
846 Register left = i.InputRegister(0);
847 Operand right = i.InputOperand(1);
848 cc = FlagsConditionToConditionCmp(condition);
849 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
850 __ li(result, Operand(1)); // In delay slot.
852 } else if (instr->arch_opcode() == kMipsCmpD) {
853 FPURegister left = i.InputDoubleRegister(0);
854 FPURegister right = i.InputDoubleRegister(1);
857 FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
858 if (!IsMipsArchVariant(kMips32r6)) {
859 __ li(result, Operand(1));
860 __ c(cc, D, left, right);
862 __ Movf(result, zero_reg);
864 __ Movt(result, zero_reg);
867 __ cmp(cc, L, kDoubleCompareReg, left, right);
868 __ mfc1(at, kDoubleCompareReg);
869 __ srl(result, at, 31); // Cmp returns all 1s for true.
870 if (!predicate) // Toggle result for not equal.
871 __ xori(result, result, 1);
875 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
876 instr->arch_opcode());
881 // Fallthrough case is the false materialization.
882 __ bind(&false_value);
883 __ li(result, Operand(0));
888 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
889 MipsOperandConverter i(this, instr);
890 Register input = i.InputRegister(0);
891 for (size_t index = 2; index < instr->InputCount(); index += 2) {
892 __ li(at, Operand(i.InputInt32(index + 0)));
893 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
895 __ nop(); // Branch delay slot of the last beq.
896 AssembleArchJump(i.InputRpo(1));
900 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
901 MipsOperandConverter i(this, instr);
902 Register input = i.InputRegister(0);
903 size_t const case_count = instr->InputCount() - 2;
905 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
906 __ BlockTrampolinePoolFor(case_count + 6);
908 __ sll(at, input, 2); // Branch delay slot.
911 __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
913 __ nop(); // Branch delay slot nop.
914 for (size_t index = 0; index < case_count; ++index) {
915 __ dd(GetLabel(i.InputRpo(index + 2)));
920 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
921 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
922 isolate(), deoptimization_id, Deoptimizer::LAZY);
923 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
927 void CodeGenerator::AssemblePrologue() {
928 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
929 int stack_slots = frame()->GetSpillSlotCount();
930 if (descriptor->kind() == CallDescriptor::kCallAddress) {
933 const RegList saves = descriptor->CalleeSavedRegisters();
934 if (saves != 0) { // Save callee-saved registers.
935 // TODO(plind): make callee save size const, possibly DCHECK it.
936 int register_save_area_size = 0;
937 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
938 if (!((1 << i) & saves)) continue;
939 register_save_area_size += kPointerSize;
941 frame()->SetRegisterSaveAreaSize(register_save_area_size);
944 } else if (descriptor->IsJSFunctionCall()) {
945 CompilationInfo* info = this->info();
946 __ Prologue(info->IsCodePreAgingActive());
947 frame()->SetRegisterSaveAreaSize(
948 StandardFrameConstants::kFixedFrameSizeFromFp);
949 } else if (stack_slots > 0) {
951 frame()->SetRegisterSaveAreaSize(
952 StandardFrameConstants::kFixedFrameSizeFromFp);
955 if (info()->is_osr()) {
956 // TurboFan OSR-compiled functions cannot be entered directly.
957 __ Abort(kShouldNotDirectlyEnterOsrFunction);
959 // Unoptimized code jumps directly to this entrypoint while the unoptimized
960 // frame is still on the stack. Optimized code uses OSR values directly from
961 // the unoptimized frame. Thus, all that needs to be done is to allocate the
962 // remaining stack slots.
963 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
964 osr_pc_offset_ = __ pc_offset();
965 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
966 stack_slots -= frame()->GetOsrStackSlotCount();
969 if (stack_slots > 0) {
970 __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
975 void CodeGenerator::AssembleReturn() {
976 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
977 int stack_slots = frame()->GetSpillSlotCount();
978 if (descriptor->kind() == CallDescriptor::kCallAddress) {
979 if (frame()->GetRegisterSaveAreaSize() > 0) {
980 // Remove this frame's spill slots first.
981 if (stack_slots > 0) {
982 __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
984 // Restore registers.
985 const RegList saves = descriptor->CalleeSavedRegisters();
993 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
996 int pop_count = descriptor->IsJSFunctionCall()
997 ? static_cast<int>(descriptor->JSParameterCount())
999 __ DropAndRet(pop_count);
1006 void CodeGenerator::AssembleMove(InstructionOperand* source,
1007 InstructionOperand* destination) {
1008 MipsOperandConverter g(this, NULL);
1009 // Dispatch on the source and destination operand kinds. Not all
1010 // combinations are possible.
1011 if (source->IsRegister()) {
1012 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1013 Register src = g.ToRegister(source);
1014 if (destination->IsRegister()) {
1015 __ mov(g.ToRegister(destination), src);
1017 __ sw(src, g.ToMemOperand(destination));
1019 } else if (source->IsStackSlot()) {
1020 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1021 MemOperand src = g.ToMemOperand(source);
1022 if (destination->IsRegister()) {
1023 __ lw(g.ToRegister(destination), src);
1025 Register temp = kScratchReg;
1027 __ sw(temp, g.ToMemOperand(destination));
1029 } else if (source->IsConstant()) {
1030 Constant src = g.ToConstant(source);
1031 if (destination->IsRegister() || destination->IsStackSlot()) {
1033 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1034 switch (src.type()) {
1035 case Constant::kInt32:
1036 __ li(dst, Operand(src.ToInt32()));
1038 case Constant::kFloat32:
1039 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1041 case Constant::kInt64:
1044 case Constant::kFloat64:
1045 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1047 case Constant::kExternalReference:
1048 __ li(dst, Operand(src.ToExternalReference()));
1050 case Constant::kHeapObject:
1051 __ li(dst, src.ToHeapObject());
1053 case Constant::kRpoNumber:
1054 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
1057 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
1058 } else if (src.type() == Constant::kFloat32) {
1059 if (destination->IsDoubleStackSlot()) {
1060 MemOperand dst = g.ToMemOperand(destination);
1061 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1064 FloatRegister dst = g.ToSingleRegister(destination);
1065 __ Move(dst, src.ToFloat32());
1068 DCHECK_EQ(Constant::kFloat64, src.type());
1069 DoubleRegister dst = destination->IsDoubleRegister()
1070 ? g.ToDoubleRegister(destination)
1071 : kScratchDoubleReg;
1072 __ Move(dst, src.ToFloat64());
1073 if (destination->IsDoubleStackSlot()) {
1074 __ sdc1(dst, g.ToMemOperand(destination));
1077 } else if (source->IsDoubleRegister()) {
1078 FPURegister src = g.ToDoubleRegister(source);
1079 if (destination->IsDoubleRegister()) {
1080 FPURegister dst = g.ToDoubleRegister(destination);
1083 DCHECK(destination->IsDoubleStackSlot());
1084 __ sdc1(src, g.ToMemOperand(destination));
1086 } else if (source->IsDoubleStackSlot()) {
1087 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1088 MemOperand src = g.ToMemOperand(source);
1089 if (destination->IsDoubleRegister()) {
1090 __ ldc1(g.ToDoubleRegister(destination), src);
1092 FPURegister temp = kScratchDoubleReg;
1094 __ sdc1(temp, g.ToMemOperand(destination));
1102 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1103 InstructionOperand* destination) {
1104 MipsOperandConverter g(this, NULL);
1105 // Dispatch on the source and destination operand kinds. Not all
1106 // combinations are possible.
1107 if (source->IsRegister()) {
1108 // Register-register.
1109 Register temp = kScratchReg;
1110 Register src = g.ToRegister(source);
1111 if (destination->IsRegister()) {
1112 Register dst = g.ToRegister(destination);
1117 DCHECK(destination->IsStackSlot());
1118 MemOperand dst = g.ToMemOperand(destination);
1123 } else if (source->IsStackSlot()) {
1124 DCHECK(destination->IsStackSlot());
1125 Register temp_0 = kScratchReg;
1126 Register temp_1 = kCompareReg;
1127 MemOperand src = g.ToMemOperand(source);
1128 MemOperand dst = g.ToMemOperand(destination);
1133 } else if (source->IsDoubleRegister()) {
1134 FPURegister temp = kScratchDoubleReg;
1135 FPURegister src = g.ToDoubleRegister(source);
1136 if (destination->IsDoubleRegister()) {
1137 FPURegister dst = g.ToDoubleRegister(destination);
1142 DCHECK(destination->IsDoubleStackSlot());
1143 MemOperand dst = g.ToMemOperand(destination);
1148 } else if (source->IsDoubleStackSlot()) {
1149 DCHECK(destination->IsDoubleStackSlot());
1150 Register temp_0 = kScratchReg;
1151 FPURegister temp_1 = kScratchDoubleReg;
1152 MemOperand src0 = g.ToMemOperand(source);
1153 MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
1154 MemOperand dst0 = g.ToMemOperand(destination);
1155 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
1156 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1157 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1158 __ sw(temp_0, dst0);
1159 __ lw(temp_0, src1);
1160 __ sw(temp_0, dst1);
1161 __ sdc1(temp_1, src0);
1163 // No other combinations are possible.
1169 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1170 // On 32-bit MIPS we emit the jump tables inline.
1175 void CodeGenerator::AddNopForSmiCodeInlining() {
1176 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1177 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1178 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1182 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1183 int space_needed = Deoptimizer::patch_size();
1184 if (!info()->IsStub()) {
1185 // Ensure that we have enough space after the previous lazy-bailout
1186 // instruction for patching the code here.
1187 int current_pc = masm()->pc_offset();
1188 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1189 // Block tramoline pool emission for duration of padding.
1190 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1192 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1193 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1194 while (padding_size > 0) {
1196 padding_size -= v8::internal::Assembler::kInstrSize;
1200 MarkLazyDeoptSite();
1205 } // namespace compiler
1206 } // namespace internal