1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/mips/macro-assembler-mips.h"
10 #include "src/scopes.h"
19 // TODO(plind): Possibly avoid using these lithium names.
20 #define kScratchReg kLithiumScratchReg
21 #define kScratchReg2 kLithiumScratchReg2
22 #define kScratchDoubleReg kLithiumScratchDouble
25 // TODO(plind): consider renaming these macros.
26 #define TRACE_MSG(msg) \
27 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30 #define TRACE_UNIMPL() \
31 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
35 // Adds Mips-specific methods to convert InstructionOperands.
36 class MipsOperandConverter FINAL : public InstructionOperandConverter {
38 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
39 : InstructionOperandConverter(gen, instr) {}
41 FloatRegister OutputSingleRegister(size_t index = 0) {
42 return ToSingleRegister(instr_->OutputAt(index));
45 FloatRegister InputSingleRegister(size_t index) {
46 return ToSingleRegister(instr_->InputAt(index));
49 FloatRegister ToSingleRegister(InstructionOperand* op) {
50 // Single (Float) and Double register namespace is same on MIPS,
51 // both are typedefs of FPURegister.
52 return ToDoubleRegister(op);
55 Operand InputImmediate(size_t index) {
56 Constant constant = ToConstant(instr_->InputAt(index));
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Operand(constant.ToInt32());
60 case Constant::kInt64:
61 return Operand(constant.ToInt64());
62 case Constant::kFloat32:
64 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
65 case Constant::kFloat64:
67 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
68 case Constant::kExternalReference:
69 case Constant::kHeapObject:
70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
71 // maybe not done on arm due to const pool ??
73 case Constant::kRpoNumber:
74 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
78 return Operand(zero_reg);
81 Operand InputOperand(size_t index) {
82 InstructionOperand* op = instr_->InputAt(index);
83 if (op->IsRegister()) {
84 return Operand(ToRegister(op));
86 return InputImmediate(index);
89 MemOperand MemoryOperand(size_t* first_index) {
90 const size_t index = *first_index;
91 switch (AddressingModeField::decode(instr_->opcode())) {
96 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 // TODO(plind): r6 address mode, to be implemented ...
102 return MemOperand(no_reg);
105 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
107 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(!op->IsRegister());
110 DCHECK(!op->IsDoubleRegister());
111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
112 // The linkage computes where all spill slots are located.
113 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
114 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
119 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
120 return instr->InputAt(index)->IsRegister();
126 class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
129 : OutOfLineCode(gen), result_(result) {}
131 void Generate() FINAL {
132 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
136 FloatRegister const result_;
140 class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
143 : OutOfLineCode(gen), result_(result) {}
145 void Generate() FINAL {
146 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
150 DoubleRegister const result_;
154 class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
157 : OutOfLineCode(gen), result_(result) {}
159 void Generate() FINAL { __ mov(result_, zero_reg); }
162 Register const result_;
166 class OutOfLineRound : public OutOfLineCode {
168 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
169 : OutOfLineCode(gen), result_(result) {}
171 void Generate() FINAL {
172 // Handle rounding to zero case where sign has to be preserved.
173 // High bits of double input already in kScratchReg.
174 __ dsrl(at, kScratchReg, 31);
176 __ mthc1(at, result_);
180 DoubleRegister const result_;
184 class OutOfLineTruncate FINAL : public OutOfLineRound {
186 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
187 : OutOfLineRound(gen, result) {}
191 class OutOfLineFloor FINAL : public OutOfLineRound {
193 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
194 : OutOfLineRound(gen, result) {}
198 class OutOfLineCeil FINAL : public OutOfLineRound {
200 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
201 : OutOfLineRound(gen, result) {}
205 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
211 case kSignedLessThan:
213 case kSignedGreaterThanOrEqual:
215 case kSignedLessThanOrEqual:
217 case kSignedGreaterThan:
219 case kUnsignedLessThan:
221 case kUnsignedGreaterThanOrEqual:
223 case kUnsignedLessThanOrEqual:
225 case kUnsignedGreaterThan:
227 case kUnorderedEqual:
228 case kUnorderedNotEqual:
238 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
252 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
266 FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
267 FlagsCondition condition) {
275 case kUnsignedLessThan:
278 case kUnsignedGreaterThanOrEqual:
281 case kUnsignedLessThanOrEqual:
284 case kUnsignedGreaterThan:
287 case kUnorderedEqual:
288 case kUnorderedNotEqual:
296 return kNoFPUCondition;
302 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
304 auto result = i.Output##width##Register(); \
305 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
306 if (instr->InputAt(0)->IsRegister()) { \
307 auto offset = i.InputRegister(0); \
308 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
309 __ Daddu(at, i.InputRegister(2), offset); \
310 __ asm_instr(result, MemOperand(at, 0)); \
312 auto offset = i.InputOperand(0).immediate(); \
313 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
314 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
316 __ bind(ool->exit()); \
320 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
322 auto result = i.OutputRegister(); \
323 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
324 if (instr->InputAt(0)->IsRegister()) { \
325 auto offset = i.InputRegister(0); \
326 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
327 __ Daddu(at, i.InputRegister(2), offset); \
328 __ asm_instr(result, MemOperand(at, 0)); \
330 auto offset = i.InputOperand(0).immediate(); \
331 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
332 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
334 __ bind(ool->exit()); \
338 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
341 if (instr->InputAt(0)->IsRegister()) { \
342 auto offset = i.InputRegister(0); \
343 auto value = i.Input##width##Register(2); \
344 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
345 __ Daddu(at, i.InputRegister(3), offset); \
346 __ asm_instr(value, MemOperand(at, 0)); \
348 auto offset = i.InputOperand(0).immediate(); \
349 auto value = i.Input##width##Register(2); \
350 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
351 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
357 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
360 if (instr->InputAt(0)->IsRegister()) { \
361 auto offset = i.InputRegister(0); \
362 auto value = i.InputRegister(2); \
363 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
364 __ Daddu(at, i.InputRegister(3), offset); \
365 __ asm_instr(value, MemOperand(at, 0)); \
367 auto offset = i.InputOperand(0).immediate(); \
368 auto value = i.InputRegister(2); \
369 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
370 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
376 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
379 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
381 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
382 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
383 HeapNumber::kExponentBits); \
384 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
385 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
386 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
387 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
388 __ dmfc1(at, i.OutputDoubleRegister()); \
389 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
390 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
391 __ bind(ool->exit()); \
396 // Assembles an instruction after register allocation, producing machine code.
397 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
398 MipsOperandConverter i(this, instr);
399 InstructionCode opcode = instr->opcode();
401 switch (ArchOpcodeField::decode(opcode)) {
402 case kArchCallCodeObject: {
403 EnsureSpaceForLazyDeopt();
404 if (instr->InputAt(0)->IsImmediate()) {
405 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
406 RelocInfo::CODE_TARGET);
408 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
411 RecordCallPosition(instr);
414 case kArchCallJSFunction: {
415 EnsureSpaceForLazyDeopt();
416 Register func = i.InputRegister(0);
417 if (FLAG_debug_code) {
418 // Check the function's context matches the context argument.
419 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
420 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
423 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
425 RecordCallPosition(instr);
429 AssembleArchJump(i.InputRpo(0));
431 case kArchLookupSwitch:
432 AssembleArchLookupSwitch(instr);
434 case kArchTableSwitch:
435 AssembleArchTableSwitch(instr);
438 // don't emit code for nops.
440 case kArchDeoptimize: {
442 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
443 AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
449 case kArchStackPointer:
450 __ mov(i.OutputRegister(), sp);
452 case kArchTruncateDoubleToI:
453 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
456 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
459 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
462 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
465 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
468 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
471 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
473 case kMips64MulHighU:
474 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
477 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
480 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
483 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
486 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
489 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
492 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
495 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
498 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
501 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
503 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
506 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
509 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
512 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
515 __ Clz(i.OutputRegister(), i.InputRegister(0));
518 if (instr->InputAt(1)->IsRegister()) {
519 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
521 int32_t imm = i.InputOperand(1).immediate();
522 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
526 if (instr->InputAt(1)->IsRegister()) {
527 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
529 int32_t imm = i.InputOperand(1).immediate();
530 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
534 if (instr->InputAt(1)->IsRegister()) {
535 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
537 int32_t imm = i.InputOperand(1).immediate();
538 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
542 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
546 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
550 if (instr->InputAt(1)->IsRegister()) {
551 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
553 int32_t imm = i.InputOperand(1).immediate();
555 __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
557 __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
562 if (instr->InputAt(1)->IsRegister()) {
563 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
565 int32_t imm = i.InputOperand(1).immediate();
567 __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
569 __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
574 if (instr->InputAt(1)->IsRegister()) {
575 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
577 int32_t imm = i.InputOperand(1).immediate();
579 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
581 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
586 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
589 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
592 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
595 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
598 // TODO(plind): Should we combine mov/li like this, or use separate instr?
599 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
600 if (HasRegisterInput(instr, 0)) {
601 __ mov(i.OutputRegister(), i.InputRegister(0));
603 __ li(i.OutputRegister(), i.InputOperand(0));
608 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
611 // TODO(plind): add special case: combine mult & add.
612 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
613 i.InputDoubleRegister(1));
616 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
617 i.InputDoubleRegister(1));
620 // TODO(plind): add special case: right op is -1.0, see arm port.
621 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
622 i.InputDoubleRegister(1));
625 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
626 i.InputDoubleRegister(1));
629 // TODO(bmeurer): We should really get rid of this special instruction,
630 // and generate a CallAddress instruction instead.
631 FrameScope scope(masm(), StackFrame::MANUAL);
632 __ PrepareCallCFunction(0, 2, kScratchReg);
633 __ MovToFloatParameters(i.InputDoubleRegister(0),
634 i.InputDoubleRegister(1));
635 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
637 // Move the result in the double result register.
638 __ MovFromFloatResult(i.OutputDoubleRegister());
641 case kMips64Float64RoundDown: {
642 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
645 case kMips64Float64RoundTruncate: {
646 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
649 case kMips64Float64RoundUp: {
650 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
654 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
658 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
662 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
666 FPURegister scratch = kScratchDoubleReg;
667 __ mtc1(i.InputRegister(0), scratch);
668 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
671 case kMips64CvtDUw: {
672 FPURegister scratch = kScratchDoubleReg;
673 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
676 case kMips64TruncWD: {
677 FPURegister scratch = kScratchDoubleReg;
678 // Other arches use round to zero here, so we follow.
679 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
680 __ mfc1(i.OutputRegister(), scratch);
683 case kMips64TruncUwD: {
684 FPURegister scratch = kScratchDoubleReg;
685 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
686 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
689 case kMips64Float64ExtractLowWord32:
690 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
692 case kMips64Float64ExtractHighWord32:
693 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
695 case kMips64Float64InsertLowWord32:
696 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
698 case kMips64Float64InsertHighWord32:
699 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
701 // ... more basic instructions ...
704 __ lbu(i.OutputRegister(), i.MemoryOperand());
707 __ lb(i.OutputRegister(), i.MemoryOperand());
710 __ sb(i.InputRegister(2), i.MemoryOperand());
713 __ lhu(i.OutputRegister(), i.MemoryOperand());
716 __ lh(i.OutputRegister(), i.MemoryOperand());
719 __ sh(i.InputRegister(2), i.MemoryOperand());
722 __ lw(i.OutputRegister(), i.MemoryOperand());
725 __ ld(i.OutputRegister(), i.MemoryOperand());
728 __ sw(i.InputRegister(2), i.MemoryOperand());
731 __ sd(i.InputRegister(2), i.MemoryOperand());
734 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
739 MemOperand operand = i.MemoryOperand(&index);
740 __ swc1(i.InputSingleRegister(index), operand);
744 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
747 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
750 __ Push(i.InputRegister(0));
752 case kMips64StackClaim: {
753 int words = MiscField::decode(instr->opcode());
754 __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
757 case kMips64StoreToStackSlot: {
758 int slot = MiscField::decode(instr->opcode());
759 __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
762 case kMips64StoreWriteBarrier: {
763 Register object = i.InputRegister(0);
764 Register index = i.InputRegister(1);
765 Register value = i.InputRegister(2);
766 __ daddu(index, object, index);
767 __ sd(value, MemOperand(index));
768 SaveFPRegsMode mode =
769 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
770 RAStatus ra_status = kRAHasNotBeenSaved;
771 __ RecordWrite(object, index, value, ra_status, mode);
774 case kCheckedLoadInt8:
775 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
777 case kCheckedLoadUint8:
778 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
780 case kCheckedLoadInt16:
781 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
783 case kCheckedLoadUint16:
784 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
786 case kCheckedLoadWord32:
787 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
789 case kCheckedLoadFloat32:
790 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
792 case kCheckedLoadFloat64:
793 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
795 case kCheckedStoreWord8:
796 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
798 case kCheckedStoreWord16:
799 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
801 case kCheckedStoreWord32:
802 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
804 case kCheckedStoreFloat32:
805 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
807 case kCheckedStoreFloat64:
808 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
814 #define UNSUPPORTED_COND(opcode, condition) \
815 OFStream out(stdout); \
816 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
819 // Assembles branches after an instruction.
820 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
821 MipsOperandConverter i(this, instr);
822 Label* tlabel = branch->true_label;
823 Label* flabel = branch->false_label;
824 Condition cc = kNoCondition;
826 // MIPS does not have condition code flags, so compare and branch are
827 // implemented differently than on the other arch's. The compare operations
828 // emit mips psuedo-instructions, which are handled here by branch
829 // instructions that do the actual comparison. Essential that the input
830 // registers to compare pseudo-op are not modified before this branch op, as
831 // they are tested here.
833 if (instr->arch_opcode() == kMips64Tst) {
834 cc = FlagsConditionToConditionTst(branch->condition);
835 __ And(at, i.InputRegister(0), i.InputOperand(1));
836 __ Branch(tlabel, cc, at, Operand(zero_reg));
837 } else if (instr->arch_opcode() == kMips64Dadd ||
838 instr->arch_opcode() == kMips64Dsub) {
839 cc = FlagsConditionToConditionOvf(branch->condition);
841 __ dsra32(kScratchReg, i.OutputRegister(), 0);
842 __ sra(at, i.OutputRegister(), 31);
843 __ Branch(tlabel, cc, at, Operand(kScratchReg));
844 } else if (instr->arch_opcode() == kMips64Cmp) {
845 cc = FlagsConditionToConditionCmp(branch->condition);
846 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
848 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
849 } else if (instr->arch_opcode() == kMips64CmpD) {
850 // TODO(dusmil) optimize unordered checks to use less instructions
851 // even if we have to unfold BranchF macro.
853 switch (branch->condition) {
861 case kUnsignedLessThan:
864 case kUnsignedGreaterThanOrEqual:
868 case kUnsignedLessThanOrEqual:
871 case kUnsignedGreaterThan:
876 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
879 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
880 i.InputDoubleRegister(1));
882 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
884 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
885 instr->arch_opcode());
891 void CodeGenerator::AssembleArchJump(RpoNumber target) {
892 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
896 // Assembles boolean materializations after an instruction.
897 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
898 FlagsCondition condition) {
899 MipsOperandConverter i(this, instr);
902 // Materialize a full 32-bit 1 or 0 value. The result register is always the
903 // last output of the instruction.
905 DCHECK_NE(0u, instr->OutputCount());
906 Register result = i.OutputRegister(instr->OutputCount() - 1);
907 Condition cc = kNoCondition;
909 // MIPS does not have condition code flags, so compare and branch are
910 // implemented differently than on the other arch's. The compare operations
911 // emit mips pseudo-instructions, which are checked and handled here.
913 // For materializations, we use delay slot to set the result true, and
914 // in the false case, where we fall through the branch, we reset the result
917 if (instr->arch_opcode() == kMips64Tst) {
918 cc = FlagsConditionToConditionTst(condition);
919 __ And(at, i.InputRegister(0), i.InputOperand(1));
920 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
921 __ li(result, Operand(1)); // In delay slot.
922 } else if (instr->arch_opcode() == kMips64Dadd ||
923 instr->arch_opcode() == kMips64Dsub) {
924 cc = FlagsConditionToConditionOvf(condition);
925 __ dsra32(kScratchReg, i.OutputRegister(), 0);
926 __ sra(at, i.OutputRegister(), 31);
927 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
928 __ li(result, Operand(1)); // In delay slot.
929 } else if (instr->arch_opcode() == kMips64Cmp) {
930 Register left = i.InputRegister(0);
931 Operand right = i.InputOperand(1);
932 cc = FlagsConditionToConditionCmp(condition);
933 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
934 __ li(result, Operand(1)); // In delay slot.
935 } else if (instr->arch_opcode() == kMips64CmpD) {
936 FPURegister left = i.InputDoubleRegister(0);
937 FPURegister right = i.InputDoubleRegister(1);
940 FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
941 if (kArchVariant != kMips64r6) {
942 __ li(result, Operand(1));
943 __ c(cc, D, left, right);
945 __ Movf(result, zero_reg);
947 __ Movt(result, zero_reg);
950 __ cmp(cc, L, kDoubleCompareReg, left, right);
951 __ dmfc1(at, kDoubleCompareReg);
952 __ dsrl32(result, at, 31); // Cmp returns all 1s for true.
953 if (!predicate) // Toggle result for not equal.
954 __ xori(result, result, 1);
958 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
959 instr->arch_opcode());
963 // Fallthru case is the false materialization.
964 __ bind(&false_value);
965 __ li(result, Operand(static_cast<int64_t>(0)));
970 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
971 MipsOperandConverter i(this, instr);
972 Register input = i.InputRegister(0);
973 for (size_t index = 2; index < instr->InputCount(); index += 2) {
974 __ li(at, Operand(i.InputInt32(index + 0)));
975 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
977 __ nop(); // Branch delay slot of the last beq.
978 AssembleArchJump(i.InputRpo(1));
982 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
983 MipsOperandConverter i(this, instr);
984 Register input = i.InputRegister(0);
985 size_t const case_count = instr->InputCount() - 2;
988 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
989 __ BlockTrampolinePoolFor(case_count * 2 + 7);
990 // Ensure that dd-ed labels use 8 byte aligned addresses.
991 if ((masm()->pc_offset() & 7) != 0) {
995 __ dsll(at, input, 3); // Branch delay slot.
997 __ daddu(at, at, ra);
998 __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
1000 __ nop(); // Branch delay slot nop.
1001 for (size_t index = 0; index < case_count; ++index) {
1002 __ dd(GetLabel(i.InputRpo(index + 2)));
1007 void CodeGenerator::AssembleDeoptimizerCall(
1008 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1009 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1010 isolate(), deoptimization_id, bailout_type);
1011 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1015 void CodeGenerator::AssemblePrologue() {
1016 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1017 int stack_slots = frame()->GetSpillSlotCount();
1018 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1021 const RegList saves = descriptor->CalleeSavedRegisters();
1022 if (saves != 0) { // Save callee-saved registers.
1023 // TODO(plind): make callee save size const, possibly DCHECK it.
1024 int register_save_area_size = 0;
1025 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1026 if (!((1 << i) & saves)) continue;
1027 register_save_area_size += kPointerSize;
1029 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1030 __ MultiPush(saves);
1032 } else if (descriptor->IsJSFunctionCall()) {
1033 CompilationInfo* info = this->info();
1034 __ Prologue(info->IsCodePreAgingActive());
1035 frame()->SetRegisterSaveAreaSize(
1036 StandardFrameConstants::kFixedFrameSizeFromFp);
1037 } else if (stack_slots > 0) {
1039 frame()->SetRegisterSaveAreaSize(
1040 StandardFrameConstants::kFixedFrameSizeFromFp);
1043 if (info()->is_osr()) {
1044 // TurboFan OSR-compiled functions cannot be entered directly.
1045 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1047 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1048 // frame is still on the stack. Optimized code uses OSR values directly from
1049 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1050 // remaining stack slots.
1051 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1052 osr_pc_offset_ = __ pc_offset();
1053 // TODO(titzer): cannot address target function == local #-1
1054 __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1055 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1056 stack_slots -= frame()->GetOsrStackSlotCount();
1059 if (stack_slots > 0) {
1060 __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
1065 void CodeGenerator::AssembleReturn() {
1066 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1067 int stack_slots = frame()->GetSpillSlotCount();
1068 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1069 if (frame()->GetRegisterSaveAreaSize() > 0) {
1070 // Remove this frame's spill slots first.
1071 if (stack_slots > 0) {
1072 __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
1074 // Restore registers.
1075 const RegList saves = descriptor->CalleeSavedRegisters();
1083 } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1086 int pop_count = descriptor->IsJSFunctionCall()
1087 ? static_cast<int>(descriptor->JSParameterCount())
1089 __ DropAndRet(pop_count);
1096 void CodeGenerator::AssembleMove(InstructionOperand* source,
1097 InstructionOperand* destination) {
1098 MipsOperandConverter g(this, NULL);
1099 // Dispatch on the source and destination operand kinds. Not all
1100 // combinations are possible.
1101 if (source->IsRegister()) {
1102 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1103 Register src = g.ToRegister(source);
1104 if (destination->IsRegister()) {
1105 __ mov(g.ToRegister(destination), src);
1107 __ sd(src, g.ToMemOperand(destination));
1109 } else if (source->IsStackSlot()) {
1110 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1111 MemOperand src = g.ToMemOperand(source);
1112 if (destination->IsRegister()) {
1113 __ ld(g.ToRegister(destination), src);
1115 Register temp = kScratchReg;
1117 __ sd(temp, g.ToMemOperand(destination));
1119 } else if (source->IsConstant()) {
1120 Constant src = g.ToConstant(source);
1121 if (destination->IsRegister() || destination->IsStackSlot()) {
1123 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1124 switch (src.type()) {
1125 case Constant::kInt32:
1126 __ li(dst, Operand(src.ToInt32()));
1128 case Constant::kFloat32:
1129 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1131 case Constant::kInt64:
1132 __ li(dst, Operand(src.ToInt64()));
1134 case Constant::kFloat64:
1135 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1137 case Constant::kExternalReference:
1138 __ li(dst, Operand(src.ToExternalReference()));
1140 case Constant::kHeapObject:
1141 __ li(dst, src.ToHeapObject());
1143 case Constant::kRpoNumber:
1144 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
1147 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
1148 } else if (src.type() == Constant::kFloat32) {
1149 if (destination->IsDoubleStackSlot()) {
1150 MemOperand dst = g.ToMemOperand(destination);
1151 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1154 FloatRegister dst = g.ToSingleRegister(destination);
1155 __ Move(dst, src.ToFloat32());
1158 DCHECK_EQ(Constant::kFloat64, src.type());
1159 DoubleRegister dst = destination->IsDoubleRegister()
1160 ? g.ToDoubleRegister(destination)
1161 : kScratchDoubleReg;
1162 __ Move(dst, src.ToFloat64());
1163 if (destination->IsDoubleStackSlot()) {
1164 __ sdc1(dst, g.ToMemOperand(destination));
1167 } else if (source->IsDoubleRegister()) {
1168 FPURegister src = g.ToDoubleRegister(source);
1169 if (destination->IsDoubleRegister()) {
1170 FPURegister dst = g.ToDoubleRegister(destination);
1173 DCHECK(destination->IsDoubleStackSlot());
1174 __ sdc1(src, g.ToMemOperand(destination));
1176 } else if (source->IsDoubleStackSlot()) {
1177 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1178 MemOperand src = g.ToMemOperand(source);
1179 if (destination->IsDoubleRegister()) {
1180 __ ldc1(g.ToDoubleRegister(destination), src);
1182 FPURegister temp = kScratchDoubleReg;
1184 __ sdc1(temp, g.ToMemOperand(destination));
1192 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1193 InstructionOperand* destination) {
1194 MipsOperandConverter g(this, NULL);
1195 // Dispatch on the source and destination operand kinds. Not all
1196 // combinations are possible.
1197 if (source->IsRegister()) {
1198 // Register-register.
1199 Register temp = kScratchReg;
1200 Register src = g.ToRegister(source);
1201 if (destination->IsRegister()) {
1202 Register dst = g.ToRegister(destination);
1207 DCHECK(destination->IsStackSlot());
1208 MemOperand dst = g.ToMemOperand(destination);
1213 } else if (source->IsStackSlot()) {
1214 DCHECK(destination->IsStackSlot());
1215 Register temp_0 = kScratchReg;
1216 Register temp_1 = kScratchReg2;
1217 MemOperand src = g.ToMemOperand(source);
1218 MemOperand dst = g.ToMemOperand(destination);
1223 } else if (source->IsDoubleRegister()) {
1224 FPURegister temp = kScratchDoubleReg;
1225 FPURegister src = g.ToDoubleRegister(source);
1226 if (destination->IsDoubleRegister()) {
1227 FPURegister dst = g.ToDoubleRegister(destination);
1232 DCHECK(destination->IsDoubleStackSlot());
1233 MemOperand dst = g.ToMemOperand(destination);
1238 } else if (source->IsDoubleStackSlot()) {
1239 DCHECK(destination->IsDoubleStackSlot());
1240 Register temp_0 = kScratchReg;
1241 FPURegister temp_1 = kScratchDoubleReg;
1242 MemOperand src0 = g.ToMemOperand(source);
1243 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
1244 MemOperand dst0 = g.ToMemOperand(destination);
1245 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
1246 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1247 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1248 __ sw(temp_0, dst0);
1249 __ lw(temp_0, src1);
1250 __ sw(temp_0, dst1);
1251 __ sdc1(temp_1, src0);
1253 // No other combinations are possible.
1259 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1260 // On 64-bit MIPS we emit the jump tables inline.
1265 void CodeGenerator::AddNopForSmiCodeInlining() {
1266 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1267 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1268 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1272 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1273 int space_needed = Deoptimizer::patch_size();
1274 if (!info()->IsStub()) {
1275 // Ensure that we have enough space after the previous lazy-bailout
1276 // instruction for patching the code here.
1277 int current_pc = masm()->pc_offset();
1278 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1279 // Block tramoline pool emission for duration of padding.
1280 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1282 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1283 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1284 while (padding_size > 0) {
1286 padding_size -= v8::internal::Assembler::kInstrSize;
1290 MarkLazyDeoptSite();
1295 } // namespace compiler
1296 } // namespace internal