1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/mips/macro-assembler-mips.h"
10 #include "src/scopes.h"
19 // TODO(plind): Possibly avoid using these lithium names.
20 #define kScratchReg kLithiumScratchReg
21 #define kCompareReg kLithiumScratchReg2
22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
31 #define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter FINAL : public InstructionOperandConverter {
39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40 : InstructionOperandConverter(gen, instr) {}
42 FloatRegister OutputSingleRegister(size_t index = 0) {
43 return ToSingleRegister(instr_->OutputAt(index));
46 FloatRegister InputSingleRegister(size_t index) {
47 return ToSingleRegister(instr_->InputAt(index));
50 FloatRegister ToSingleRegister(InstructionOperand* op) {
51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op);
56 Operand InputImmediate(size_t index) {
57 Constant constant = ToConstant(instr_->InputAt(index));
58 switch (constant.type()) {
59 case Constant::kInt32:
60 return Operand(constant.ToInt32());
61 case Constant::kFloat32:
63 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
64 case Constant::kFloat64:
66 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
67 case Constant::kInt64:
68 case Constant::kExternalReference:
69 case Constant::kHeapObject:
70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
71 // maybe not done on arm due to const pool ??
73 case Constant::kRpoNumber:
74 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
78 return Operand(zero_reg);
81 Operand InputOperand(size_t index) {
82 InstructionOperand* op = instr_->InputAt(index);
83 if (op->IsRegister()) {
84 return Operand(ToRegister(op));
86 return InputImmediate(index);
89 MemOperand MemoryOperand(size_t* first_index) {
90 const size_t index = *first_index;
91 switch (AddressingModeField::decode(instr_->opcode())) {
96 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 // TODO(plind): r6 address mode, to be implemented ...
102 return MemOperand(no_reg);
105 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
107 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(!op->IsRegister());
110 DCHECK(!op->IsDoubleRegister());
111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
112 // The linkage computes where all spill slots are located.
113 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
114 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
119 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
120 return instr->InputAt(index)->IsRegister();
126 class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
129 : OutOfLineCode(gen), result_(result) {}
131 void Generate() FINAL {
132 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
136 FloatRegister const result_;
140 class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
143 : OutOfLineCode(gen), result_(result) {}
145 void Generate() FINAL {
146 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
150 DoubleRegister const result_;
154 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
157 : OutOfLineCode(gen), result_(result) {}
159 void Generate() FINAL { __ mov(result_, zero_reg); }
162 Register const result_;
166 class OutOfLineRound : public OutOfLineCode {
168 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
169 : OutOfLineCode(gen), result_(result) {}
171 void Generate() FINAL {
172 // Handle rounding to zero case where sign has to be preserved.
173 // High bits of double input already in kScratchReg.
174 __ srl(at, kScratchReg, 31);
176 __ Mthc1(at, result_);
180 DoubleRegister const result_;
184 class OutOfLineTruncate FINAL : public OutOfLineRound {
186 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
187 : OutOfLineRound(gen, result) {}
191 class OutOfLineFloor FINAL : public OutOfLineRound {
193 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
194 : OutOfLineRound(gen, result) {}
198 class OutOfLineCeil FINAL : public OutOfLineRound {
200 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
201 : OutOfLineRound(gen, result) {}
205 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
211 case kSignedLessThan:
213 case kSignedGreaterThanOrEqual:
215 case kSignedLessThanOrEqual:
217 case kSignedGreaterThan:
219 case kUnsignedLessThan:
221 case kUnsignedGreaterThanOrEqual:
223 case kUnsignedLessThanOrEqual:
225 case kUnsignedGreaterThan:
227 case kUnorderedEqual:
228 case kUnorderedNotEqual:
238 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
252 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
265 FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
266 FlagsCondition condition) {
274 case kUnsignedLessThan:
277 case kUnsignedGreaterThanOrEqual:
280 case kUnsignedLessThanOrEqual:
283 case kUnsignedGreaterThan:
286 case kUnorderedEqual:
287 case kUnorderedNotEqual:
295 return kNoFPUCondition;
301 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
303 auto result = i.Output##width##Register(); \
304 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
305 if (instr->InputAt(0)->IsRegister()) { \
306 auto offset = i.InputRegister(0); \
307 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
308 __ addu(at, i.InputRegister(2), offset); \
309 __ asm_instr(result, MemOperand(at, 0)); \
311 auto offset = i.InputOperand(0).immediate(); \
312 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
313 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
315 __ bind(ool->exit()); \
319 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
321 auto result = i.OutputRegister(); \
322 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
323 if (instr->InputAt(0)->IsRegister()) { \
324 auto offset = i.InputRegister(0); \
325 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
326 __ addu(at, i.InputRegister(2), offset); \
327 __ asm_instr(result, MemOperand(at, 0)); \
329 auto offset = i.InputOperand(0).immediate(); \
330 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
331 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
333 __ bind(ool->exit()); \
337 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
340 if (instr->InputAt(0)->IsRegister()) { \
341 auto offset = i.InputRegister(0); \
342 auto value = i.Input##width##Register(2); \
343 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
344 __ addu(at, i.InputRegister(3), offset); \
345 __ asm_instr(value, MemOperand(at, 0)); \
347 auto offset = i.InputOperand(0).immediate(); \
348 auto value = i.Input##width##Register(2); \
349 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
350 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
356 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
359 if (instr->InputAt(0)->IsRegister()) { \
360 auto offset = i.InputRegister(0); \
361 auto value = i.InputRegister(2); \
362 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
363 __ addu(at, i.InputRegister(3), offset); \
364 __ asm_instr(value, MemOperand(at, 0)); \
366 auto offset = i.InputOperand(0).immediate(); \
367 auto value = i.InputRegister(2); \
368 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
369 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
375 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
378 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
380 __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
381 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
382 HeapNumber::kExponentBits); \
383 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
384 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
385 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
386 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
387 __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
388 __ or_(at, at, kScratchReg2); \
389 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
390 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
391 __ bind(ool->exit()); \
396 // Assembles an instruction after register allocation, producing machine code.
397 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
398 MipsOperandConverter i(this, instr);
399 InstructionCode opcode = instr->opcode();
401 switch (ArchOpcodeField::decode(opcode)) {
402 case kArchCallCodeObject: {
403 EnsureSpaceForLazyDeopt();
404 if (instr->InputAt(0)->IsImmediate()) {
405 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
406 RelocInfo::CODE_TARGET);
408 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
411 RecordCallPosition(instr);
414 case kArchCallJSFunction: {
415 EnsureSpaceForLazyDeopt();
416 Register func = i.InputRegister(0);
417 if (FLAG_debug_code) {
418 // Check the function's context matches the context argument.
419 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
420 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
423 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
425 RecordCallPosition(instr);
429 AssembleArchJump(i.InputRpo(0));
431 case kArchLookupSwitch:
432 AssembleArchLookupSwitch(instr);
434 case kArchTableSwitch:
435 AssembleArchTableSwitch(instr);
438 // don't emit code for nops.
440 case kArchDeoptimize: {
442 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
443 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
449 case kArchStackPointer:
450 __ mov(i.OutputRegister(), sp);
452 case kArchTruncateDoubleToI:
453 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
456 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
459 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
460 i.InputOperand(1), kCompareReg, kScratchReg);
463 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
466 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
467 i.InputOperand(1), kCompareReg, kScratchReg);
470 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
473 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
476 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
479 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
482 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
485 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
488 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
491 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
494 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
497 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
500 __ Clz(i.OutputRegister(), i.InputRegister(0));
503 if (instr->InputAt(1)->IsRegister()) {
504 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
506 int32_t imm = i.InputOperand(1).immediate();
507 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
511 if (instr->InputAt(1)->IsRegister()) {
512 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
514 int32_t imm = i.InputOperand(1).immediate();
515 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
519 if (instr->InputAt(1)->IsRegister()) {
520 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
522 int32_t imm = i.InputOperand(1).immediate();
523 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
527 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
530 // Pseudo-instruction used for tst/branch. No opcode emitted here.
533 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
536 // TODO(plind): Should we combine mov/li like this, or use separate instr?
537 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
538 if (HasRegisterInput(instr, 0)) {
539 __ mov(i.OutputRegister(), i.InputRegister(0));
541 __ li(i.OutputRegister(), i.InputOperand(0));
546 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
549 // TODO(plind): add special case: combine mult & add.
550 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
551 i.InputDoubleRegister(1));
554 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
555 i.InputDoubleRegister(1));
558 // TODO(plind): add special case: right op is -1.0, see arm port.
559 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
560 i.InputDoubleRegister(1));
563 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
564 i.InputDoubleRegister(1));
567 // TODO(bmeurer): We should really get rid of this special instruction,
568 // and generate a CallAddress instruction instead.
569 FrameScope scope(masm(), StackFrame::MANUAL);
570 __ PrepareCallCFunction(0, 2, kScratchReg);
571 __ MovToFloatParameters(i.InputDoubleRegister(0),
572 i.InputDoubleRegister(1));
573 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
575 // Move the result in the double result register.
576 __ MovFromFloatResult(i.OutputDoubleRegister());
579 case kMipsFloat64RoundDown: {
580 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
583 case kMipsFloat64RoundTruncate: {
584 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
587 case kMipsFloat64RoundUp: {
588 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
592 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
596 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
600 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
604 FPURegister scratch = kScratchDoubleReg;
605 __ mtc1(i.InputRegister(0), scratch);
606 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
610 FPURegister scratch = kScratchDoubleReg;
611 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
615 FPURegister scratch = kScratchDoubleReg;
616 // Other arches use round to zero here, so we follow.
617 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
618 __ mfc1(i.OutputRegister(), scratch);
621 case kMipsTruncUwD: {
622 FPURegister scratch = kScratchDoubleReg;
623 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
624 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
627 case kMipsFloat64ExtractLowWord32:
628 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
630 case kMipsFloat64ExtractHighWord32:
631 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
633 case kMipsFloat64InsertLowWord32:
634 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
636 case kMipsFloat64InsertHighWord32:
637 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
639 // ... more basic instructions ...
642 __ lbu(i.OutputRegister(), i.MemoryOperand());
645 __ lb(i.OutputRegister(), i.MemoryOperand());
648 __ sb(i.InputRegister(2), i.MemoryOperand());
651 __ lhu(i.OutputRegister(), i.MemoryOperand());
654 __ lh(i.OutputRegister(), i.MemoryOperand());
657 __ sh(i.InputRegister(2), i.MemoryOperand());
660 __ lw(i.OutputRegister(), i.MemoryOperand());
663 __ sw(i.InputRegister(2), i.MemoryOperand());
666 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
671 MemOperand operand = i.MemoryOperand(&index);
672 __ swc1(i.InputSingleRegister(index), operand);
676 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
679 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
682 __ Push(i.InputRegister(0));
684 case kMipsStackClaim: {
685 int words = MiscField::decode(instr->opcode());
686 __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
689 case kMipsStoreToStackSlot: {
690 int slot = MiscField::decode(instr->opcode());
691 __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
694 case kMipsStoreWriteBarrier: {
695 Register object = i.InputRegister(0);
696 Register index = i.InputRegister(1);
697 Register value = i.InputRegister(2);
698 __ addu(index, object, index);
699 __ sw(value, MemOperand(index));
700 SaveFPRegsMode mode =
701 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
702 RAStatus ra_status = kRAHasNotBeenSaved;
703 __ RecordWrite(object, index, value, ra_status, mode);
706 case kCheckedLoadInt8:
707 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
709 case kCheckedLoadUint8:
710 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
712 case kCheckedLoadInt16:
713 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
715 case kCheckedLoadUint16:
716 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
718 case kCheckedLoadWord32:
719 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
721 case kCheckedLoadFloat32:
722 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
724 case kCheckedLoadFloat64:
725 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
727 case kCheckedStoreWord8:
728 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
730 case kCheckedStoreWord16:
731 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
733 case kCheckedStoreWord32:
734 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
736 case kCheckedStoreFloat32:
737 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
739 case kCheckedStoreFloat64:
740 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
746 #define UNSUPPORTED_COND(opcode, condition) \
747 OFStream out(stdout); \
748 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
751 // Assembles branches after an instruction.
752 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
753 MipsOperandConverter i(this, instr);
754 Label* tlabel = branch->true_label;
755 Label* flabel = branch->false_label;
756 Condition cc = kNoCondition;
758 // MIPS does not have condition code flags, so compare and branch are
759 // implemented differently than on the other arch's. The compare operations
760 // emit mips pseudo-instructions, which are handled here by branch
761 // instructions that do the actual comparison. Essential that the input
762 // registers to compare pseudo-op are not modified before this branch op, as
763 // they are tested here.
764 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
765 // not separated by other instructions.
767 if (instr->arch_opcode() == kMipsTst) {
768 cc = FlagsConditionToConditionTst(branch->condition);
769 __ And(at, i.InputRegister(0), i.InputOperand(1));
770 __ Branch(tlabel, cc, at, Operand(zero_reg));
772 } else if (instr->arch_opcode() == kMipsAddOvf ||
773 instr->arch_opcode() == kMipsSubOvf) {
774 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
775 cc = FlagsConditionToConditionOvf(branch->condition);
776 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
778 } else if (instr->arch_opcode() == kMipsCmp) {
779 cc = FlagsConditionToConditionCmp(branch->condition);
780 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
782 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
784 } else if (instr->arch_opcode() == kMipsCmpD) {
785 // TODO(dusmil) optimize unordered checks to use fewer instructions
786 // even if we have to unfold BranchF macro.
788 switch (branch->condition) {
796 case kUnsignedLessThan:
799 case kUnsignedGreaterThanOrEqual:
803 case kUnsignedLessThanOrEqual:
806 case kUnsignedGreaterThan:
811 UNSUPPORTED_COND(kMipsCmpD, branch->condition);
814 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
815 i.InputDoubleRegister(1));
817 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
820 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
821 instr->arch_opcode());
827 void CodeGenerator::AssembleArchJump(RpoNumber target) {
828 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
832 // Assembles boolean materializations after an instruction.
833 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
834 FlagsCondition condition) {
835 MipsOperandConverter i(this, instr);
838 // Materialize a full 32-bit 1 or 0 value. The result register is always the
839 // last output of the instruction.
841 DCHECK_NE(0u, instr->OutputCount());
842 Register result = i.OutputRegister(instr->OutputCount() - 1);
843 Condition cc = kNoCondition;
845 // MIPS does not have condition code flags, so compare and branch are
846 // implemented differently than on the other arch's. The compare operations
847 // emit mips psuedo-instructions, which are checked and handled here.
849 // For materializations, we use delay slot to set the result true, and
850 // in the false case, where we fall thru the branch, we reset the result
853 if (instr->arch_opcode() == kMipsTst) {
854 cc = FlagsConditionToConditionTst(condition);
855 __ And(at, i.InputRegister(0), i.InputOperand(1));
856 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
857 __ li(result, Operand(1)); // In delay slot.
859 } else if (instr->arch_opcode() == kMipsAddOvf ||
860 instr->arch_opcode() == kMipsSubOvf) {
861 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
862 cc = FlagsConditionToConditionOvf(condition);
863 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
864 __ li(result, Operand(1)); // In delay slot.
866 } else if (instr->arch_opcode() == kMipsCmp) {
867 Register left = i.InputRegister(0);
868 Operand right = i.InputOperand(1);
869 cc = FlagsConditionToConditionCmp(condition);
870 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
871 __ li(result, Operand(1)); // In delay slot.
873 } else if (instr->arch_opcode() == kMipsCmpD) {
874 FPURegister left = i.InputDoubleRegister(0);
875 FPURegister right = i.InputDoubleRegister(1);
878 FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
879 if (!IsMipsArchVariant(kMips32r6)) {
880 __ li(result, Operand(1));
881 __ c(cc, D, left, right);
883 __ Movf(result, zero_reg);
885 __ Movt(result, zero_reg);
888 __ cmp(cc, L, kDoubleCompareReg, left, right);
889 __ mfc1(at, kDoubleCompareReg);
890 __ srl(result, at, 31); // Cmp returns all 1s for true.
891 if (!predicate) // Toggle result for not equal.
892 __ xori(result, result, 1);
896 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
897 instr->arch_opcode());
902 // Fallthrough case is the false materialization.
903 __ bind(&false_value);
904 __ li(result, Operand(0));
909 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
910 MipsOperandConverter i(this, instr);
911 Register input = i.InputRegister(0);
912 for (size_t index = 2; index < instr->InputCount(); index += 2) {
913 __ li(at, Operand(i.InputInt32(index + 0)));
914 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
916 __ nop(); // Branch delay slot of the last beq.
917 AssembleArchJump(i.InputRpo(1));
921 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
922 MipsOperandConverter i(this, instr);
923 Register input = i.InputRegister(0);
924 size_t const case_count = instr->InputCount() - 2;
926 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
927 __ BlockTrampolinePoolFor(case_count + 6);
929 __ sll(at, input, 2); // Branch delay slot.
932 __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
934 __ nop(); // Branch delay slot nop.
935 for (size_t index = 0; index < case_count; ++index) {
936 __ dd(GetLabel(i.InputRpo(index + 2)));
941 void CodeGenerator::AssembleDeoptimizerCall(
942 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
943 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
944 isolate(), deoptimization_id, bailout_type);
945 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
949 void CodeGenerator::AssemblePrologue() {
950 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
951 int stack_slots = frame()->GetSpillSlotCount();
952 if (descriptor->kind() == CallDescriptor::kCallAddress) {
955 const RegList saves = descriptor->CalleeSavedRegisters();
956 if (saves != 0) { // Save callee-saved registers.
957 // TODO(plind): make callee save size const, possibly DCHECK it.
958 int register_save_area_size = 0;
959 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
960 if (!((1 << i) & saves)) continue;
961 register_save_area_size += kPointerSize;
963 frame()->SetRegisterSaveAreaSize(register_save_area_size);
966 } else if (descriptor->IsJSFunctionCall()) {
967 CompilationInfo* info = this->info();
968 __ Prologue(info->IsCodePreAgingActive());
969 frame()->SetRegisterSaveAreaSize(
970 StandardFrameConstants::kFixedFrameSizeFromFp);
971 } else if (stack_slots > 0) {
973 frame()->SetRegisterSaveAreaSize(
974 StandardFrameConstants::kFixedFrameSizeFromFp);
977 if (info()->is_osr()) {
978 // TurboFan OSR-compiled functions cannot be entered directly.
979 __ Abort(kShouldNotDirectlyEnterOsrFunction);
981 // Unoptimized code jumps directly to this entrypoint while the unoptimized
982 // frame is still on the stack. Optimized code uses OSR values directly from
983 // the unoptimized frame. Thus, all that needs to be done is to allocate the
984 // remaining stack slots.
985 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
986 osr_pc_offset_ = __ pc_offset();
987 // TODO(titzer): cannot address target function == local #-1
988 __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
989 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
990 stack_slots -= frame()->GetOsrStackSlotCount();
993 if (stack_slots > 0) {
994 __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
999 void CodeGenerator::AssembleReturn() {
1000 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1001 int stack_slots = frame()->GetSpillSlotCount();
1002 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1003 if (frame()->GetRegisterSaveAreaSize() > 0) {
1004 // Remove this frame's spill slots first.
1005 if (stack_slots > 0) {
1006 __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
1008 // Restore registers.
1009 const RegList saves = descriptor->CalleeSavedRegisters();
1017 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1020 int pop_count = descriptor->IsJSFunctionCall()
1021 ? static_cast<int>(descriptor->JSParameterCount())
1023 __ DropAndRet(pop_count);
1030 void CodeGenerator::AssembleMove(InstructionOperand* source,
1031 InstructionOperand* destination) {
1032 MipsOperandConverter g(this, NULL);
1033 // Dispatch on the source and destination operand kinds. Not all
1034 // combinations are possible.
1035 if (source->IsRegister()) {
1036 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1037 Register src = g.ToRegister(source);
1038 if (destination->IsRegister()) {
1039 __ mov(g.ToRegister(destination), src);
1041 __ sw(src, g.ToMemOperand(destination));
1043 } else if (source->IsStackSlot()) {
1044 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1045 MemOperand src = g.ToMemOperand(source);
1046 if (destination->IsRegister()) {
1047 __ lw(g.ToRegister(destination), src);
1049 Register temp = kScratchReg;
1051 __ sw(temp, g.ToMemOperand(destination));
1053 } else if (source->IsConstant()) {
1054 Constant src = g.ToConstant(source);
1055 if (destination->IsRegister() || destination->IsStackSlot()) {
1057 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1058 switch (src.type()) {
1059 case Constant::kInt32:
1060 __ li(dst, Operand(src.ToInt32()));
1062 case Constant::kFloat32:
1063 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1065 case Constant::kInt64:
1068 case Constant::kFloat64:
1069 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1071 case Constant::kExternalReference:
1072 __ li(dst, Operand(src.ToExternalReference()));
1074 case Constant::kHeapObject:
1075 __ li(dst, src.ToHeapObject());
1077 case Constant::kRpoNumber:
1078 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
1081 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
1082 } else if (src.type() == Constant::kFloat32) {
1083 if (destination->IsDoubleStackSlot()) {
1084 MemOperand dst = g.ToMemOperand(destination);
1085 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1088 FloatRegister dst = g.ToSingleRegister(destination);
1089 __ Move(dst, src.ToFloat32());
1092 DCHECK_EQ(Constant::kFloat64, src.type());
1093 DoubleRegister dst = destination->IsDoubleRegister()
1094 ? g.ToDoubleRegister(destination)
1095 : kScratchDoubleReg;
1096 __ Move(dst, src.ToFloat64());
1097 if (destination->IsDoubleStackSlot()) {
1098 __ sdc1(dst, g.ToMemOperand(destination));
1101 } else if (source->IsDoubleRegister()) {
1102 FPURegister src = g.ToDoubleRegister(source);
1103 if (destination->IsDoubleRegister()) {
1104 FPURegister dst = g.ToDoubleRegister(destination);
1107 DCHECK(destination->IsDoubleStackSlot());
1108 __ sdc1(src, g.ToMemOperand(destination));
1110 } else if (source->IsDoubleStackSlot()) {
1111 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1112 MemOperand src = g.ToMemOperand(source);
1113 if (destination->IsDoubleRegister()) {
1114 __ ldc1(g.ToDoubleRegister(destination), src);
1116 FPURegister temp = kScratchDoubleReg;
1118 __ sdc1(temp, g.ToMemOperand(destination));
1126 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1127 InstructionOperand* destination) {
1128 MipsOperandConverter g(this, NULL);
1129 // Dispatch on the source and destination operand kinds. Not all
1130 // combinations are possible.
1131 if (source->IsRegister()) {
1132 // Register-register.
1133 Register temp = kScratchReg;
1134 Register src = g.ToRegister(source);
1135 if (destination->IsRegister()) {
1136 Register dst = g.ToRegister(destination);
1141 DCHECK(destination->IsStackSlot());
1142 MemOperand dst = g.ToMemOperand(destination);
1147 } else if (source->IsStackSlot()) {
1148 DCHECK(destination->IsStackSlot());
1149 Register temp_0 = kScratchReg;
1150 Register temp_1 = kCompareReg;
1151 MemOperand src = g.ToMemOperand(source);
1152 MemOperand dst = g.ToMemOperand(destination);
1157 } else if (source->IsDoubleRegister()) {
1158 FPURegister temp = kScratchDoubleReg;
1159 FPURegister src = g.ToDoubleRegister(source);
1160 if (destination->IsDoubleRegister()) {
1161 FPURegister dst = g.ToDoubleRegister(destination);
1166 DCHECK(destination->IsDoubleStackSlot());
1167 MemOperand dst = g.ToMemOperand(destination);
1172 } else if (source->IsDoubleStackSlot()) {
1173 DCHECK(destination->IsDoubleStackSlot());
1174 Register temp_0 = kScratchReg;
1175 FPURegister temp_1 = kScratchDoubleReg;
1176 MemOperand src0 = g.ToMemOperand(source);
1177 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
1178 MemOperand dst0 = g.ToMemOperand(destination);
1179 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
1180 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1181 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1182 __ sw(temp_0, dst0);
1183 __ lw(temp_0, src1);
1184 __ sw(temp_0, dst1);
1185 __ sdc1(temp_1, src0);
1187 // No other combinations are possible.
1193 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1194 // On 32-bit MIPS we emit the jump tables inline.
1199 void CodeGenerator::AddNopForSmiCodeInlining() {
1200 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1201 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1202 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1206 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1207 int space_needed = Deoptimizer::patch_size();
1208 if (!info()->IsStub()) {
1209 // Ensure that we have enough space after the previous lazy-bailout
1210 // instruction for patching the code here.
1211 int current_pc = masm()->pc_offset();
1212 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1213 // Block tramoline pool emission for duration of padding.
1214 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1216 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1217 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1218 while (padding_size > 0) {
1220 padding_size -= v8::internal::Assembler::kInstrSize;
1224 MarkLazyDeoptSite();
1229 } // namespace compiler
1230 } // namespace internal