1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/mips/macro-assembler-mips.h"
10 #include "src/scopes.h"
19 // TODO(plind): Possibly avoid using these lithium names.
20 #define kScratchReg kLithiumScratchReg
21 #define kScratchReg2 kLithiumScratchReg2
22 #define kScratchDoubleReg kLithiumScratchDouble
25 // TODO(plind): consider renaming these macros.
26 #define TRACE_MSG(msg) \
27 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30 #define TRACE_UNIMPL() \
31 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
35 // Adds Mips-specific methods to convert InstructionOperands.
36 class MipsOperandConverter FINAL : public InstructionOperandConverter {
38 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
39 : InstructionOperandConverter(gen, instr) {}
41 FloatRegister OutputSingleRegister(int index = 0) {
42 return ToSingleRegister(instr_->OutputAt(index));
45 FloatRegister InputSingleRegister(int index) {
46 return ToSingleRegister(instr_->InputAt(index));
49 FloatRegister ToSingleRegister(InstructionOperand* op) {
50 // Single (Float) and Double register namespace is same on MIPS,
51 // both are typedefs of FPURegister.
52 return ToDoubleRegister(op);
55 Operand InputImmediate(int index) {
56 Constant constant = ToConstant(instr_->InputAt(index));
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Operand(constant.ToInt32());
60 case Constant::kInt64:
61 return Operand(constant.ToInt64());
62 case Constant::kFloat32:
64 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
65 case Constant::kFloat64:
67 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
68 case Constant::kExternalReference:
69 case Constant::kHeapObject:
70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
71 // maybe not done on arm due to const pool ??
73 case Constant::kRpoNumber:
74 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
78 return Operand(zero_reg);
81 Operand InputOperand(int index) {
82 InstructionOperand* op = instr_->InputAt(index);
83 if (op->IsRegister()) {
84 return Operand(ToRegister(op));
86 return InputImmediate(index);
89 MemOperand MemoryOperand(int* first_index) {
90 const int index = *first_index;
91 switch (AddressingModeField::decode(instr_->opcode())) {
96 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 // TODO(plind): r6 address mode, to be implemented ...
102 return MemOperand(no_reg);
105 MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
107 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(!op->IsRegister());
110 DCHECK(!op->IsDoubleRegister());
111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
112 // The linkage computes where all spill slots are located.
113 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
114 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
119 static inline bool HasRegisterInput(Instruction* instr, int index) {
120 return instr->InputAt(index)->IsRegister();
126 class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
129 : OutOfLineCode(gen), result_(result) {}
131 void Generate() FINAL {
132 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
136 FloatRegister const result_;
140 class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
143 : OutOfLineCode(gen), result_(result) {}
145 void Generate() FINAL {
146 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
150 DoubleRegister const result_;
154 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
157 : OutOfLineCode(gen), result_(result) {}
159 void Generate() FINAL { __ mov(result_, zero_reg); }
162 Register const result_;
166 class OutOfLineRound : public OutOfLineCode {
168 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
169 : OutOfLineCode(gen), result_(result) {}
171 void Generate() FINAL {
172 // Handle rounding to zero case where sign has to be preserved.
173 // High bits of double input already in kScratchReg.
174 __ dsrl(at, kScratchReg, 31);
176 __ mthc1(at, result_);
180 DoubleRegister const result_;
184 class OutOfLineTruncate FINAL : public OutOfLineRound {
186 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
187 : OutOfLineRound(gen, result) {}
191 class OutOfLineFloor FINAL : public OutOfLineRound {
193 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
194 : OutOfLineRound(gen, result) {}
198 class OutOfLineCeil FINAL : public OutOfLineRound {
200 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
201 : OutOfLineRound(gen, result) {}
205 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
211 case kSignedLessThan:
213 case kSignedGreaterThanOrEqual:
215 case kSignedLessThanOrEqual:
217 case kSignedGreaterThan:
219 case kUnsignedLessThan:
221 case kUnsignedGreaterThanOrEqual:
223 case kUnsignedLessThanOrEqual:
225 case kUnsignedGreaterThan:
227 case kUnorderedEqual:
228 case kUnorderedNotEqual:
238 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
252 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
266 FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
267 FlagsCondition condition) {
275 case kUnsignedLessThan:
278 case kUnsignedGreaterThanOrEqual:
281 case kUnsignedLessThanOrEqual:
284 case kUnsignedGreaterThan:
287 case kUnorderedEqual:
288 case kUnorderedNotEqual:
296 return kNoFPUCondition;
302 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
304 auto result = i.Output##width##Register(); \
305 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
306 if (instr->InputAt(0)->IsRegister()) { \
307 auto offset = i.InputRegister(0); \
308 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
309 __ Daddu(at, i.InputRegister(2), offset); \
310 __ asm_instr(result, MemOperand(at, 0)); \
312 auto offset = i.InputOperand(0).immediate(); \
313 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
314 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
316 __ bind(ool->exit()); \
320 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
322 auto result = i.OutputRegister(); \
323 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
324 if (instr->InputAt(0)->IsRegister()) { \
325 auto offset = i.InputRegister(0); \
326 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
327 __ Daddu(at, i.InputRegister(2), offset); \
328 __ asm_instr(result, MemOperand(at, 0)); \
330 auto offset = i.InputOperand(0).immediate(); \
331 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
332 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
334 __ bind(ool->exit()); \
338 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
341 if (instr->InputAt(0)->IsRegister()) { \
342 auto offset = i.InputRegister(0); \
343 auto value = i.Input##width##Register(2); \
344 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
345 __ Daddu(at, i.InputRegister(3), offset); \
346 __ asm_instr(value, MemOperand(at, 0)); \
348 auto offset = i.InputOperand(0).immediate(); \
349 auto value = i.Input##width##Register(2); \
350 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
351 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
357 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
360 if (instr->InputAt(0)->IsRegister()) { \
361 auto offset = i.InputRegister(0); \
362 auto value = i.InputRegister(2); \
363 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
364 __ Daddu(at, i.InputRegister(3), offset); \
365 __ asm_instr(value, MemOperand(at, 0)); \
367 auto offset = i.InputOperand(0).immediate(); \
368 auto value = i.InputRegister(2); \
369 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
370 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
376 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
379 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
381 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
382 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
383 HeapNumber::kExponentBits); \
384 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
385 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
386 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
387 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
388 __ dmfc1(at, i.OutputDoubleRegister()); \
389 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
390 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
391 __ bind(ool->exit()); \
396 // Assembles an instruction after register allocation, producing machine code.
397 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
398 MipsOperandConverter i(this, instr);
399 InstructionCode opcode = instr->opcode();
401 switch (ArchOpcodeField::decode(opcode)) {
402 case kArchCallCodeObject: {
403 EnsureSpaceForLazyDeopt();
404 if (instr->InputAt(0)->IsImmediate()) {
405 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
406 RelocInfo::CODE_TARGET);
408 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
411 AddSafepointAndDeopt(instr);
414 case kArchCallJSFunction: {
415 EnsureSpaceForLazyDeopt();
416 Register func = i.InputRegister(0);
417 if (FLAG_debug_code) {
418 // Check the function's context matches the context argument.
419 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
420 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
423 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
425 AddSafepointAndDeopt(instr);
429 AssembleArchJump(i.InputRpo(0));
431 case kArchLookupSwitch:
432 AssembleArchLookupSwitch(instr);
434 case kArchTableSwitch:
435 AssembleArchTableSwitch(instr);
438 // don't emit code for nops.
443 case kArchStackPointer:
444 __ mov(i.OutputRegister(), sp);
446 case kArchTruncateDoubleToI:
447 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
450 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
453 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
456 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
459 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
462 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
465 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
467 case kMips64MulHighU:
468 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
471 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
474 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
477 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
480 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
483 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
486 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
489 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
492 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
495 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
497 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
500 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
503 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
506 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
509 if (instr->InputAt(1)->IsRegister()) {
510 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
512 int32_t imm = i.InputOperand(1).immediate();
513 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
517 if (instr->InputAt(1)->IsRegister()) {
518 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
520 int32_t imm = i.InputOperand(1).immediate();
521 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
525 if (instr->InputAt(1)->IsRegister()) {
526 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
528 int32_t imm = i.InputOperand(1).immediate();
529 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
533 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
537 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
541 if (instr->InputAt(1)->IsRegister()) {
542 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
544 int32_t imm = i.InputOperand(1).immediate();
546 __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
548 __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
553 if (instr->InputAt(1)->IsRegister()) {
554 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
556 int32_t imm = i.InputOperand(1).immediate();
558 __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
560 __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
565 if (instr->InputAt(1)->IsRegister()) {
566 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
568 int32_t imm = i.InputOperand(1).immediate();
570 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
572 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
577 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
580 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
584 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
588 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
591 // TODO(plind): Should we combine mov/li like this, or use separate instr?
592 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
593 if (HasRegisterInput(instr, 0)) {
594 __ mov(i.OutputRegister(), i.InputRegister(0));
596 __ li(i.OutputRegister(), i.InputOperand(0));
601 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
604 // TODO(plind): add special case: combine mult & add.
605 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
606 i.InputDoubleRegister(1));
609 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
610 i.InputDoubleRegister(1));
613 // TODO(plind): add special case: right op is -1.0, see arm port.
614 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
615 i.InputDoubleRegister(1));
618 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
619 i.InputDoubleRegister(1));
622 // TODO(bmeurer): We should really get rid of this special instruction,
623 // and generate a CallAddress instruction instead.
624 FrameScope scope(masm(), StackFrame::MANUAL);
625 __ PrepareCallCFunction(0, 2, kScratchReg);
626 __ MovToFloatParameters(i.InputDoubleRegister(0),
627 i.InputDoubleRegister(1));
628 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
630 // Move the result in the double result register.
631 __ MovFromFloatResult(i.OutputDoubleRegister());
634 case kMips64Float64Floor: {
635 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
638 case kMips64Float64Ceil: {
639 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
642 case kMips64Float64RoundTruncate: {
643 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
647 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
651 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
655 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
659 FPURegister scratch = kScratchDoubleReg;
660 __ mtc1(i.InputRegister(0), scratch);
661 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
664 case kMips64CvtDUw: {
665 FPURegister scratch = kScratchDoubleReg;
666 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
669 case kMips64TruncWD: {
670 FPURegister scratch = kScratchDoubleReg;
671 // Other arches use round to zero here, so we follow.
672 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
673 __ mfc1(i.OutputRegister(), scratch);
676 case kMips64TruncUwD: {
677 FPURegister scratch = kScratchDoubleReg;
678 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
679 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
682 // ... more basic instructions ...
685 __ lbu(i.OutputRegister(), i.MemoryOperand());
688 __ lb(i.OutputRegister(), i.MemoryOperand());
691 __ sb(i.InputRegister(2), i.MemoryOperand());
694 __ lhu(i.OutputRegister(), i.MemoryOperand());
697 __ lh(i.OutputRegister(), i.MemoryOperand());
700 __ sh(i.InputRegister(2), i.MemoryOperand());
703 __ lw(i.OutputRegister(), i.MemoryOperand());
706 __ ld(i.OutputRegister(), i.MemoryOperand());
709 __ sw(i.InputRegister(2), i.MemoryOperand());
712 __ sd(i.InputRegister(2), i.MemoryOperand());
715 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
720 MemOperand operand = i.MemoryOperand(&index);
721 __ swc1(i.InputSingleRegister(index), operand);
725 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
728 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
731 __ Push(i.InputRegister(0));
733 case kMips64StackClaim: {
734 int words = MiscField::decode(instr->opcode());
735 __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
738 case kMips64StoreToStackSlot: {
739 int slot = MiscField::decode(instr->opcode());
740 __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
743 case kMips64StoreWriteBarrier: {
744 Register object = i.InputRegister(0);
745 Register index = i.InputRegister(1);
746 Register value = i.InputRegister(2);
747 __ daddu(index, object, index);
748 __ sd(value, MemOperand(index));
749 SaveFPRegsMode mode =
750 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
751 RAStatus ra_status = kRAHasNotBeenSaved;
752 __ RecordWrite(object, index, value, ra_status, mode);
755 case kCheckedLoadInt8:
756 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
758 case kCheckedLoadUint8:
759 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
761 case kCheckedLoadInt16:
762 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
764 case kCheckedLoadUint16:
765 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
767 case kCheckedLoadWord32:
768 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
770 case kCheckedLoadFloat32:
771 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
773 case kCheckedLoadFloat64:
774 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
776 case kCheckedStoreWord8:
777 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
779 case kCheckedStoreWord16:
780 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
782 case kCheckedStoreWord32:
783 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
785 case kCheckedStoreFloat32:
786 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
788 case kCheckedStoreFloat64:
789 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
795 #define UNSUPPORTED_COND(opcode, condition) \
796 OFStream out(stdout); \
797 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
800 // Assembles branches after an instruction.
801 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
802 MipsOperandConverter i(this, instr);
803 Label* tlabel = branch->true_label;
804 Label* flabel = branch->false_label;
805 Condition cc = kNoCondition;
807 // MIPS does not have condition code flags, so compare and branch are
808 // implemented differently than on the other arch's. The compare operations
809 // emit mips psuedo-instructions, which are handled here by branch
810 // instructions that do the actual comparison. Essential that the input
811 // registers to compare psuedo-op are not modified before this branch op, as
812 // they are tested here.
813 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
814 // not separated by other instructions.
816 if (instr->arch_opcode() == kMips64Tst) {
817 cc = FlagsConditionToConditionTst(branch->condition);
818 __ And(at, i.InputRegister(0), i.InputOperand(1));
819 __ Branch(tlabel, cc, at, Operand(zero_reg));
820 } else if (instr->arch_opcode() == kMips64Tst32) {
821 cc = FlagsConditionToConditionTst(branch->condition);
822 // Zero-extend registers on MIPS64 only 64-bit operand
823 // branch and compare op. is available.
824 // This is a disadvantage to perform 32-bit operation on MIPS64.
825 // Try to force globally in front-end Word64 representation to be preferred
826 // for MIPS64 even for Word32.
827 __ And(at, i.InputRegister(0), i.InputOperand(1));
828 __ Dext(at, at, 0, 32);
829 __ Branch(tlabel, cc, at, Operand(zero_reg));
830 } else if (instr->arch_opcode() == kMips64Dadd ||
831 instr->arch_opcode() == kMips64Dsub) {
832 cc = FlagsConditionToConditionOvf(branch->condition);
834 __ dsra32(kScratchReg, i.OutputRegister(), 0);
835 __ sra(at, i.OutputRegister(), 31);
836 __ Branch(tlabel, cc, at, Operand(kScratchReg));
837 } else if (instr->arch_opcode() == kMips64Cmp) {
838 cc = FlagsConditionToConditionCmp(branch->condition);
839 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
841 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
843 } else if (instr->arch_opcode() == kMips64Cmp32) {
844 cc = FlagsConditionToConditionCmp(branch->condition);
846 switch (branch->condition) {
849 case kSignedLessThan:
850 case kSignedGreaterThanOrEqual:
851 case kSignedLessThanOrEqual:
852 case kSignedGreaterThan:
853 // Sign-extend registers on MIPS64 only 64-bit operand
854 // branch and compare op. is available.
855 __ sll(i.InputRegister(0), i.InputRegister(0), 0);
856 if (instr->InputAt(1)->IsRegister()) {
857 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
860 case kUnsignedLessThan:
861 case kUnsignedGreaterThanOrEqual:
862 case kUnsignedLessThanOrEqual:
863 case kUnsignedGreaterThan:
864 // Zero-extend registers on MIPS64 only 64-bit operand
865 // branch and compare op. is available.
866 __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
867 if (instr->InputAt(1)->IsRegister()) {
868 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
872 UNSUPPORTED_COND(kMips64Cmp, branch->condition);
875 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
877 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
878 } else if (instr->arch_opcode() == kMips64CmpD) {
879 // TODO(dusmil) optimize unordered checks to use less instructions
880 // even if we have to unfold BranchF macro.
882 switch (branch->condition) {
890 case kUnsignedLessThan:
893 case kUnsignedGreaterThanOrEqual:
897 case kUnsignedLessThanOrEqual:
900 case kUnsignedGreaterThan:
905 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
908 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
909 i.InputDoubleRegister(1));
911 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
913 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
914 instr->arch_opcode());
920 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
921 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
925 // Assembles boolean materializations after an instruction.
926 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
927 FlagsCondition condition) {
928 MipsOperandConverter i(this, instr);
931 // Materialize a full 32-bit 1 or 0 value. The result register is always the
932 // last output of the instruction.
934 DCHECK_NE(0u, instr->OutputCount());
935 Register result = i.OutputRegister(instr->OutputCount() - 1);
936 Condition cc = kNoCondition;
938 // MIPS does not have condition code flags, so compare and branch are
939 // implemented differently than on the other arch's. The compare operations
940 // emit mips pseudo-instructions, which are checked and handled here.
942 // For materializations, we use delay slot to set the result true, and
943 // in the false case, where we fall through the branch, we reset the result
946 if (instr->arch_opcode() == kMips64Tst) {
947 cc = FlagsConditionToConditionTst(condition);
948 __ And(at, i.InputRegister(0), i.InputOperand(1));
949 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
950 __ li(result, Operand(1)); // In delay slot.
951 } else if (instr->arch_opcode() == kMips64Tst32) {
952 cc = FlagsConditionToConditionTst(condition);
953 // Zero-extend register on MIPS64 only 64-bit operand
954 // branch and compare op. is available.
955 __ And(at, i.InputRegister(0), i.InputOperand(1));
956 __ Dext(at, at, 0, 32);
957 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
958 __ li(result, Operand(1)); // In delay slot.
959 } else if (instr->arch_opcode() == kMips64Dadd ||
960 instr->arch_opcode() == kMips64Dsub) {
961 cc = FlagsConditionToConditionOvf(condition);
962 __ dsra32(kScratchReg, i.OutputRegister(), 0);
963 __ sra(at, i.OutputRegister(), 31);
964 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
965 __ li(result, Operand(1)); // In delay slot.
966 } else if (instr->arch_opcode() == kMips64Cmp) {
967 Register left = i.InputRegister(0);
968 Operand right = i.InputOperand(1);
969 cc = FlagsConditionToConditionCmp(condition);
970 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
971 __ li(result, Operand(1)); // In delay slot.
972 } else if (instr->arch_opcode() == kMips64Cmp32) {
973 Register left = i.InputRegister(0);
974 Operand right = i.InputOperand(1);
975 cc = FlagsConditionToConditionCmp(condition);
980 case kSignedLessThan:
981 case kSignedGreaterThanOrEqual:
982 case kSignedLessThanOrEqual:
983 case kSignedGreaterThan:
984 // Sign-extend registers on MIPS64 only 64-bit operand
985 // branch and compare op. is available.
986 __ sll(left, left, 0);
987 if (instr->InputAt(1)->IsRegister()) {
988 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
991 case kUnsignedLessThan:
992 case kUnsignedGreaterThanOrEqual:
993 case kUnsignedLessThanOrEqual:
994 case kUnsignedGreaterThan:
995 // Zero-extend registers on MIPS64 only 64-bit operand
996 // branch and compare op. is available.
997 __ Dext(left, left, 0, 32);
998 if (instr->InputAt(1)->IsRegister()) {
999 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
1003 UNSUPPORTED_COND(kMips64Cmp32, condition);
1006 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
1007 __ li(result, Operand(1)); // In delay slot.
1008 } else if (instr->arch_opcode() == kMips64CmpD) {
1009 FPURegister left = i.InputDoubleRegister(0);
1010 FPURegister right = i.InputDoubleRegister(1);
1013 FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
1014 if (kArchVariant != kMips64r6) {
1015 __ li(result, Operand(1));
1016 __ c(cc, D, left, right);
1018 __ Movf(result, zero_reg);
1020 __ Movt(result, zero_reg);
1023 __ cmp(cc, L, kDoubleCompareReg, left, right);
1024 __ dmfc1(at, kDoubleCompareReg);
1025 __ dsrl32(result, at, 31); // Cmp returns all 1s for true.
1026 if (!predicate) // Toggle result for not equal.
1027 __ xori(result, result, 1);
1031 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
1032 instr->arch_opcode());
1036 // Fallthru case is the false materialization.
1037 __ bind(&false_value);
1038 __ li(result, Operand(static_cast<int64_t>(0)));
1043 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1044 MipsOperandConverter i(this, instr);
1045 Register input = i.InputRegister(0);
1046 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1047 __ li(at, Operand(i.InputInt32(index + 0)));
1048 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
1050 __ nop(); // Branch delay slot of the last beq.
1051 AssembleArchJump(i.InputRpo(1));
1055 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1056 MipsOperandConverter i(this, instr);
1057 Register input = i.InputRegister(0);
1058 size_t const case_count = instr->InputCount() - 2;
1061 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
1062 __ BlockTrampolinePoolFor(case_count * 2 + 7);
1063 // Ensure that dd-ed labels use 8 byte aligned addresses.
1064 if ((masm()->pc_offset() & 7) != 0) {
1068 __ dsll(at, input, 3); // Branch delay slot.
1070 __ daddu(at, at, ra);
1071 __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
1073 __ nop(); // Branch delay slot nop.
1074 for (size_t index = 0; index < case_count; ++index) {
1075 __ dd(GetLabel(i.InputRpo(index + 2)));
1080 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1081 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1082 isolate(), deoptimization_id, Deoptimizer::LAZY);
1083 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1087 void CodeGenerator::AssemblePrologue() {
1088 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1089 int stack_slots = frame()->GetSpillSlotCount();
1090 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1093 const RegList saves = descriptor->CalleeSavedRegisters();
1094 if (saves != 0) { // Save callee-saved registers.
1095 // TODO(plind): make callee save size const, possibly DCHECK it.
1096 int register_save_area_size = 0;
1097 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1098 if (!((1 << i) & saves)) continue;
1099 register_save_area_size += kPointerSize;
1101 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1102 __ MultiPush(saves);
1104 } else if (descriptor->IsJSFunctionCall()) {
1105 CompilationInfo* info = this->info();
1106 __ Prologue(info->IsCodePreAgingActive());
1107 frame()->SetRegisterSaveAreaSize(
1108 StandardFrameConstants::kFixedFrameSizeFromFp);
1109 } else if (stack_slots > 0) {
1111 frame()->SetRegisterSaveAreaSize(
1112 StandardFrameConstants::kFixedFrameSizeFromFp);
1115 if (info()->is_osr()) {
1116 // TurboFan OSR-compiled functions cannot be entered directly.
1117 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1119 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1120 // frame is still on the stack. Optimized code uses OSR values directly from
1121 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1122 // remaining stack slots.
1123 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1124 osr_pc_offset_ = __ pc_offset();
1125 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1126 stack_slots -= frame()->GetOsrStackSlotCount();
1129 if (stack_slots > 0) {
1130 __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
1135 void CodeGenerator::AssembleReturn() {
1136 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1137 int stack_slots = frame()->GetSpillSlotCount();
1138 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1139 if (frame()->GetRegisterSaveAreaSize() > 0) {
1140 // Remove this frame's spill slots first.
1141 if (stack_slots > 0) {
1142 __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
1144 // Restore registers.
1145 const RegList saves = descriptor->CalleeSavedRegisters();
1153 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1156 int pop_count = descriptor->IsJSFunctionCall()
1157 ? static_cast<int>(descriptor->JSParameterCount())
1159 __ DropAndRet(pop_count);
1166 void CodeGenerator::AssembleMove(InstructionOperand* source,
1167 InstructionOperand* destination) {
1168 MipsOperandConverter g(this, NULL);
1169 // Dispatch on the source and destination operand kinds. Not all
1170 // combinations are possible.
1171 if (source->IsRegister()) {
1172 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1173 Register src = g.ToRegister(source);
1174 if (destination->IsRegister()) {
1175 __ mov(g.ToRegister(destination), src);
1177 __ sd(src, g.ToMemOperand(destination));
1179 } else if (source->IsStackSlot()) {
1180 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1181 MemOperand src = g.ToMemOperand(source);
1182 if (destination->IsRegister()) {
1183 __ ld(g.ToRegister(destination), src);
1185 Register temp = kScratchReg;
1187 __ sd(temp, g.ToMemOperand(destination));
1189 } else if (source->IsConstant()) {
1190 Constant src = g.ToConstant(source);
1191 if (destination->IsRegister() || destination->IsStackSlot()) {
1193 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1194 switch (src.type()) {
1195 case Constant::kInt32:
1196 __ li(dst, Operand(src.ToInt32()));
1198 case Constant::kFloat32:
1199 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1201 case Constant::kInt64:
1202 __ li(dst, Operand(src.ToInt64()));
1204 case Constant::kFloat64:
1205 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1207 case Constant::kExternalReference:
1208 __ li(dst, Operand(src.ToExternalReference()));
1210 case Constant::kHeapObject:
1211 __ li(dst, src.ToHeapObject());
1213 case Constant::kRpoNumber:
1214 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
1217 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
1218 } else if (src.type() == Constant::kFloat32) {
1219 if (destination->IsDoubleStackSlot()) {
1220 MemOperand dst = g.ToMemOperand(destination);
1221 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1224 FloatRegister dst = g.ToSingleRegister(destination);
1225 __ Move(dst, src.ToFloat32());
1228 DCHECK_EQ(Constant::kFloat64, src.type());
1229 DoubleRegister dst = destination->IsDoubleRegister()
1230 ? g.ToDoubleRegister(destination)
1231 : kScratchDoubleReg;
1232 __ Move(dst, src.ToFloat64());
1233 if (destination->IsDoubleStackSlot()) {
1234 __ sdc1(dst, g.ToMemOperand(destination));
1237 } else if (source->IsDoubleRegister()) {
1238 FPURegister src = g.ToDoubleRegister(source);
1239 if (destination->IsDoubleRegister()) {
1240 FPURegister dst = g.ToDoubleRegister(destination);
1243 DCHECK(destination->IsDoubleStackSlot());
1244 __ sdc1(src, g.ToMemOperand(destination));
1246 } else if (source->IsDoubleStackSlot()) {
1247 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1248 MemOperand src = g.ToMemOperand(source);
1249 if (destination->IsDoubleRegister()) {
1250 __ ldc1(g.ToDoubleRegister(destination), src);
1252 FPURegister temp = kScratchDoubleReg;
1254 __ sdc1(temp, g.ToMemOperand(destination));
1262 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1263 InstructionOperand* destination) {
1264 MipsOperandConverter g(this, NULL);
1265 // Dispatch on the source and destination operand kinds. Not all
1266 // combinations are possible.
1267 if (source->IsRegister()) {
1268 // Register-register.
1269 Register temp = kScratchReg;
1270 Register src = g.ToRegister(source);
1271 if (destination->IsRegister()) {
1272 Register dst = g.ToRegister(destination);
1277 DCHECK(destination->IsStackSlot());
1278 MemOperand dst = g.ToMemOperand(destination);
1283 } else if (source->IsStackSlot()) {
1284 DCHECK(destination->IsStackSlot());
1285 Register temp_0 = kScratchReg;
1286 Register temp_1 = kScratchReg2;
1287 MemOperand src = g.ToMemOperand(source);
1288 MemOperand dst = g.ToMemOperand(destination);
1293 } else if (source->IsDoubleRegister()) {
1294 FPURegister temp = kScratchDoubleReg;
1295 FPURegister src = g.ToDoubleRegister(source);
1296 if (destination->IsDoubleRegister()) {
1297 FPURegister dst = g.ToDoubleRegister(destination);
1302 DCHECK(destination->IsDoubleStackSlot());
1303 MemOperand dst = g.ToMemOperand(destination);
1308 } else if (source->IsDoubleStackSlot()) {
1309 DCHECK(destination->IsDoubleStackSlot());
1310 Register temp_0 = kScratchReg;
1311 FPURegister temp_1 = kScratchDoubleReg;
1312 MemOperand src0 = g.ToMemOperand(source);
1313 MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
1314 MemOperand dst0 = g.ToMemOperand(destination);
1315 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
1316 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1317 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1318 __ sw(temp_0, dst0);
1319 __ lw(temp_0, src1);
1320 __ sw(temp_0, dst1);
1321 __ sdc1(temp_1, src0);
1323 // No other combinations are possible.
1329 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1330 // On 64-bit MIPS we emit the jump tables inline.
1335 void CodeGenerator::AddNopForSmiCodeInlining() {
1336 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1337 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1338 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1342 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1343 int space_needed = Deoptimizer::patch_size();
1344 if (!info()->IsStub()) {
1345 // Ensure that we have enough space after the previous lazy-bailout
1346 // instruction for patching the code here.
1347 int current_pc = masm()->pc_offset();
1348 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1349 // Block tramoline pool emission for duration of padding.
1350 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1352 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1353 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1354 while (padding_size > 0) {
1356 padding_size -= v8::internal::Assembler::kInstrSize;
1360 MarkLazyDeoptSite();
1365 } // namespace compiler
1366 } // namespace internal