#include "src/compiler/arm64/instruction-codes-arm64.h"
#elif V8_TARGET_ARCH_IA32
#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/compiler/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
#else
--- /dev/null
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kCompareReg kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+ __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+ MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(int index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(int index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on MIPS,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+ // maybe not done on arm due to const pool ??
+ break;
+ }
+ UNREACHABLE();
+ return Operand(zero_reg);
+ }
+
+ Operand InputOperand(int index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ // TODO(plind): r6 address mode, to be implemented ...
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(no_reg);
+ }
+
+ MemOperand MemoryOperand() {
+ int index = 0;
+ return MemoryOperand(&index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Call(at);
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+
+ __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchJmp:
+ __ Branch(code_->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), sp);
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsAdd:
+ __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsAddOvf:
+ __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kCompareReg, kScratchReg);
+ break;
+ case kMipsSub:
+ __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsSubOvf:
+ __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kCompareReg, kScratchReg);
+ break;
+ case kMipsMul:
+ __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsDiv:
+ __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsDivU:
+ __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsMod:
+ __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsModU:
+ __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsAnd:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsOr:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsXor:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsShl:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMipsShr:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMipsSar:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMipsRor:
+ __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsTst:
+ // Psuedo-instruction used for tst/branch.
+ __ And(kCompareReg, i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMipsCmp:
+ // Psuedo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kMipsMov:
+ // TODO(plind): Should we combine mov/li like this, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kMipsCmpD:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMipsAddD:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsSubD:
+ __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMulD:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsDivD:
+ __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsModD: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ break;
+ }
+ case kMipsSqrtD: {
+ __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMipsCvtSD: {
+ __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMipsCvtDS: {
+ __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kMipsCvtDW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMipsCvtDUw: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ break;
+ }
+ case kMipsTruncWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ // Other arches use round to zero here, so we follow.
+ __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsTruncUwD: {
+ FPURegister scratch = kScratchDoubleReg;
+ // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+ __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ break;
+ }
+ // ... more basic instructions ...
+
+ case kMipsLbu:
+ __ lbu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMipsLb:
+ __ lb(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMipsSb:
+ __ sb(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMipsLhu:
+ __ lhu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMipsLh:
+ __ lh(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMipsSh:
+ __ sh(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMipsLw:
+ __ lw(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMipsSw:
+ __ sw(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMipsLwc1: {
+ __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kMipsSwc1: {
+ int index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ __ swc1(i.InputSingleRegister(index), operand);
+ break;
+ }
+ case kMipsLdc1:
+ __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kMipsSdc1:
+ __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+ break;
+ case kMipsPush:
+ __ Push(i.InputRegister(0));
+ break;
+ case kMipsStoreWriteBarrier:
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ addu(index, object, index);
+ __ sw(value, MemOperand(index));
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RAStatus ra_status = kRAHasNotBeenSaved;
+ __ RecordWrite(object, index, value, ra_status, mode);
+ break;
+ }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ OFStream out(stdout); \
+ out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+ UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ MipsOperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ Condition cc = kNoCondition;
+
+ // MIPS does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit mips psuedo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare psuedo-op are not modified before this branch op, as
+ // they are tested here.
+ // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+ // not separated by other instructions.
+
+ if (instr->arch_opcode() == kMipsTst) {
+ // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsTst, condition);
+ break;
+ }
+ __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+
+ } else if (instr->arch_opcode() == kMipsAddOvf ||
+ instr->arch_opcode() == kMipsSubOvf) {
+ // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+ switch (condition) {
+ case kOverflow:
+ cc = lt;
+ break;
+ case kNotOverflow:
+ cc = ge;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
+ break;
+ }
+ __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+
+ } else if (instr->arch_opcode() == kMipsCmp) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsCmp, condition);
+ break;
+ }
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+ __ bind(&done);
+
+ } else if (instr->arch_opcode() == kMipsCmpD) {
+ // TODO(dusmil) optimize unordered checks to use less instructions
+ // even if we have to unfold BranchF macro.
+ Label* nan = flabel;
+ switch (condition) {
+ case kUnorderedEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ cc = ne;
+ nan = tlabel;
+ break;
+ case kUnorderedLessThan:
+ cc = lt;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ cc = ge;
+ nan = tlabel;
+ break;
+ case kUnorderedLessThanOrEqual:
+ cc = le;
+ break;
+ case kUnorderedGreaterThan:
+ cc = gt;
+ nan = tlabel;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsCmpD, condition);
+ break;
+ }
+ __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+ __ bind(&done);
+
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ MipsOperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label false_value;
+ DCHECK_NE(0, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+
+ // MIPS does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit mips psuedo-instructions, which are checked and handled here.
+
+ // For materializations, we use delay slot to set the result true, and
+ // in the false case, where we fall thru the branch, we reset the result
+ // false.
+
+ // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+ // not separated by other instructions.
+ if (instr->arch_opcode() == kMipsTst) {
+ // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsTst, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+ __ li(result, Operand(1)); // In delay slot.
+
+ } else if (instr->arch_opcode() == kMipsAddOvf ||
+ instr->arch_opcode() == kMipsSubOvf) {
+ // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
+ switch (condition) {
+ case kOverflow:
+ cc = lt;
+ break;
+ case kNotOverflow:
+ cc = ge;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+ __ li(result, Operand(1)); // In delay slot.
+
+
+ } else if (instr->arch_opcode() == kMipsCmp) {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsCmp, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot.
+
+ } else if (instr->arch_opcode() == kMipsCmpD) {
+ FPURegister left = i.InputDoubleRegister(0);
+ FPURegister right = i.InputDoubleRegister(1);
+ // TODO(plind): Provide NaN-testing macro-asm function without need for
+ // BranchF.
+ FPURegister dummy1 = f0;
+ FPURegister dummy2 = f2;
+ switch (condition) {
+ case kUnorderedEqual:
+ // TODO(plind): improve the NaN testing throughout this function.
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = ne;
+ break;
+ case kUnorderedLessThan:
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = lt;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = ge;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = le;
+ break;
+ case kUnorderedGreaterThan:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = gt;
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsCmp, condition);
+ break;
+ }
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
+ // Fall-thru (branch not taken) returns 0.
+
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+ // Fallthru case is the false materialization.
+ __ bind(&false_value);
+ __ li(result, Operand(0));
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ // TODO(plind): make callee save size const, possibly DCHECK it.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ MultiPush(saves);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ lw(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+ __ sw(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+ }
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ __ Ret();
+ } else {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ DropAndRet(pop_count);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ MipsOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ sw(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ lw(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ lw(temp, src);
+ __ sw(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kInt64:
+ UNREACHABLE();
+ break;
+ case Constant::kFloat64:
+ __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ li(dst, src.ToHeapObject());
+ break;
+ }
+ if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ FPURegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg.low();
+ // TODO(turbofan): Can we do better here?
+ __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ mtc1(at, dst);
+ if (destination->IsDoubleStackSlot()) {
+ __ swc1(dst, g.ToMemOperand(destination));
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64());
+ if (destination->IsDoubleStackSlot()) {
+ __ sdc1(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ sdc1(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldc1(g.ToDoubleRegister(destination), src);
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ __ ldc1(temp, src);
+ __ sdc1(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ MipsOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(temp, src);
+ __ lw(src, dst);
+ __ sw(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = kCompareReg;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ lw(temp_0, src);
+ __ lw(temp_1, dst);
+ __ sw(temp_0, dst);
+ __ sw(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ ldc1(src, dst);
+ __ sdc1(temp, dst);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Register temp_0 = kScratchReg;
+ FPURegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ __ ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ sdc1(temp_1, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // Unused on 32-bit ARM. Still exists on 64-bit arm.
+ // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+ __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(MipsAdd) \
+ V(MipsAddOvf) \
+ V(MipsSub) \
+ V(MipsSubOvf) \
+ V(MipsMul) \
+ V(MipsDiv) \
+ V(MipsDivU) \
+ V(MipsMod) \
+ V(MipsModU) \
+ V(MipsAnd) \
+ V(MipsOr) \
+ V(MipsXor) \
+ V(MipsShl) \
+ V(MipsShr) \
+ V(MipsSar) \
+ V(MipsRor) \
+ V(MipsMov) \
+ V(MipsTst) \
+ V(MipsCmp) \
+ V(MipsCmpD) \
+ V(MipsAddD) \
+ V(MipsSubD) \
+ V(MipsMulD) \
+ V(MipsDivD) \
+ V(MipsModD) \
+ V(MipsSqrtD) \
+ V(MipsCvtSD) \
+ V(MipsCvtDS) \
+ V(MipsTruncWD) \
+ V(MipsTruncUwD) \
+ V(MipsCvtDW) \
+ V(MipsCvtDUw) \
+ V(MipsLb) \
+ V(MipsLbu) \
+ V(MipsSb) \
+ V(MipsLh) \
+ V(MipsLhu) \
+ V(MipsSh) \
+ V(MipsLw) \
+ V(MipsSw) \
+ V(MipsLwc1) \
+ V(MipsSwc1) \
+ V(MipsLdc1) \
+ V(MipsSdc1) \
+ V(MipsPush) \
+ V(MipsStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class MipsOperandGenerator FINAL : public OperandGenerator {
+ public:
+ explicit MipsOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode) {
+ Int32Matcher m(node);
+ if (!m.HasValue()) return false;
+ int32_t value = m.Value();
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kMipsShl:
+ case kMipsSar:
+ case kMipsShr:
+ return is_uint5(value);
+ case kMipsXor:
+ return is_uint16(value);
+ case kMipsLdc1:
+ case kMipsSdc1:
+ return is_int16(value + kIntSize);
+ default:
+ return is_int16(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMipsLwc1;
+ break;
+ case kRepFloat64:
+ opcode = kMipsLdc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kMipsLw;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand* addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+ Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
+ g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMipsSwc1;
+ break;
+ case kRepFloat64:
+ opcode = kMipsSdc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kMipsSb;
+ break;
+ case kRepWord16:
+ opcode = kMipsSh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kMipsSw;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand* addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+ g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kMipsAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kMipsOr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ VisitBinop(this, node, kMipsXor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitRRO(this, kMipsShl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kMipsShr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitRRO(this, kMipsSar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kMipsRor, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ MipsOperandGenerator g(this);
+
+ // TODO(plind): Consider multiply & add optimization from arm port.
+ VisitBinop(this, node, kMipsAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kMipsSub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int32_t value = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(value)) {
+ Emit(kMipsShl | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value - 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value + 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ Emit(kMipsSub | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsTruncUwD, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kMipsAddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kMipsSubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kMipsMulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kMipsDivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+ g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ MipsOperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(call, &buffer, true, false);
+
+ // TODO(dcarney): might be possible to use claim/poke instead
+ // Push any stack arguments.
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ // TODO(plind): inefficient for MIPS, use MultiPush here.
+ // - Also need to align the stack. See arm64.
+ // - Maybe combine with arg slot stuff in DirectCEntry stub.
+ Emit(kMipsPush, NULL, g.UseRegister(*input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kMipsAddOvf, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kMipsSubOvf, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand* left, InstructionOperand* right,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ // TODO(plind): Revisit and test this path.
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ MipsOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord32And:
+ // TODO(plind): understand the significance of 'IR and' special case.
+ return VisitWordCompare(this, node, kMipsTst, cont, true);
+ default:
+ break;
+ }
+
+ MipsOperandGenerator g(this);
+ // kMipsTst is a pseudo-instruction to do logical 'and' and leave the result
+ // in a dedicated tmp register.
+ VisitCompare(this, kMipsTst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kMipsCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
+ cont);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+ static Register ReturnValueReg() { return v0; }
+ static Register ReturnValue2Reg() { return v1; }
+ static Register JSCallFunctionReg() { return a1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return a1; }
+ static Register RuntimeCallArgCountReg() { return a0; }
+ static RegList CCalleeSaveRegisters() {
+ return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+ s6.bit() | s7.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {a0, a1, a2, a3};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CallInterfaceDescriptor descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_ARM64
+ V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
Simulator::CallArgument::End()};
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
}
-#elif USE_SIMULATOR && V8_TARGET_ARCH_ARM
+#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+ MachInst2 mi;
+ uint32_t expected_size;
+};
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kMipsCmpD, kMachFloat64},
+ kUnorderedLessThanOrEqual},
+ {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
+ kMachFloat64},
+ kUnorderedLessThan},
+ {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+ "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
+ kUnorderedLessThanOrEqual}};
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, kMachInt16},
+ {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, kMachInt16},
+ {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, kMachInt16},
+ {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, kMachInt32},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, kMachInt32},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kShiftInstructions[] = {
+ {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, kMachInt16},
+ {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, kMachInt16},
+ {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, kMachInt16},
+ {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, kMachInt16},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, kMachInt32},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, kMachInt32},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, kMachInt32},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, kMachInt32},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, kMachInt32},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, kMachUint32},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, kMachFloat64},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kModInstructions[] = {
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, kMachInt32},
+ {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, kMachInt32},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, kMachFloat64},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, kMachInt32},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kMipsAddOvf, kMachInt32},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kMipsSubOvf, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+
+const MachInst1 kAddSubOneInstructions[] = {
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, kMachInt32},
+ // TODO(dusmil): check this ...
+ // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst, kMachInt32}
+};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+
+const IntCmp kCmpInstructions[] = {
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, kMachInt16}, 1U},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, kMachInt16},
+ 2U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp,
+ kMachInt32},
+ 2U},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp,
+ kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp,
+ kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kMipsCmp, kMachInt32},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp,
+ kMachUint32},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kMipsCmp, kMachUint32},
+ 1U}};
+
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+ // Conversion instructions are related to machine_operator.h:
+ // FPU conversions:
+ // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+ // defined for these operators, since they are intended only for use with
+ // integers.
+ // mips instruction: cvt_d_w
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kMipsCvtDW, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: cvt_d_uw
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kMipsCvtDUw, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: trunc_w_d
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kMipsTruncWD, kMachFloat64},
+ kMachInt32},
+
+ // mips instruction: trunc_uw_d
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kMipsTruncUwD, kMachFloat64},
+ kMachInt32}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
+
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+ const IntCmp cmp = GetParam();
+ const MachineType type = cmp.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(cmp.expected_size, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
+
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Integer arithmetic.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorIntArithTwoTest;
+
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+ const MachInst2 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst1>
+ InstructionSelectorIntArithOneTest;
+
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+ const MachInst1 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {kMachInt8, kMipsLb, kMipsSb},
+ {kMachUint8, kMipsLbu, kMipsSb},
+ {kMachInt16, kMipsLh, kMipsSh},
+ {kMachUint16, kMipsLhu, kMipsSh},
+ {kMachInt32, kMipsLw, kMipsSw},
+ {kRepFloat32, kMipsLwc1, kMipsSwc1},
+ {kRepFloat64, kMipsLdc1, kMipsSdc1}};
+
+
+struct MemoryAccessImm {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+ return os << acc.type;
+}
+
+
+struct MemoryAccessImm1 {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[5];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+ return os << acc.type;
+}
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values.
+// ----------------------------------------------------------------------------
+
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+ {kMachInt8,
+ kMipsLb,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint8,
+ kMipsLbu,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachInt16,
+ kMipsLh,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachUint16,
+ kMipsLhu,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachInt32,
+ kMipsLw,
+ kMipsSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachFloat32,
+ kMipsLwc1,
+ kMipsSwc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {kMachFloat64,
+ kMipsLdc1,
+ kMipsSdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+ -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+ 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+ {kMachInt8,
+ kMipsLb,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt8,
+ kMipsLbu,
+ kMipsSb,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt16,
+ kMipsLh,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt16,
+ kMipsLhu,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachInt32,
+ kMipsLw,
+ kMipsSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachFloat32,
+ kMipsLwc1,
+ kMipsSwc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {kMachFloat64,
+ kMipsLdc1,
+ kMipsSdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}}};
+
+} // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+ InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ m.Return(m.Load(memacc.type, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm>
+ InstructionSelectorMemoryAccessImmTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
+
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ LoadWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, kMachPtr);
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // kMipsAdd is expected opcode.
+ // size more than 16 bits wide.
+ EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ StoreWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+ m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+ m.Parameter(1));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // kMipsAdd is expected opcode
+ // size more than 16 bits wide
+ EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+
+// ----------------------------------------------------------------------------
+// kMipsTst testing.
+// ----------------------------------------------------------------------------
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsTst, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
'compiler/ia32/instruction-selector-ia32-unittest.cc',
],
}],
+ ['v8_target_arch=="mipsel"', {
+ 'sources': [ ### gcmole(arch:mipsel) ###
+ 'compiler/mips/instruction-selector-mips-unittest.cc',
+ ],
+ }],
['v8_target_arch=="x64"', {
'sources': [ ### gcmole(arch:x64) ###
'compiler/x64/instruction-selector-x64-unittest.cc',
'../../src/mips/regexp-macro-assembler-mips.cc',
'../../src/mips/regexp-macro-assembler-mips.h',
'../../src/mips/simulator-mips.cc',
+ '../../src/compiler/mips/code-generator-mips.cc',
+ '../../src/compiler/mips/instruction-codes-mips.h',
+ '../../src/compiler/mips/instruction-selector-mips.cc',
+ '../../src/compiler/mips/linkage-mips.cc',
'../../src/ic/mips/access-compiler-mips.cc',
'../../src/ic/mips/handler-compiler-mips.cc',
'../../src/ic/mips/ic-mips.cc',