'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
+ ['mips_arch_variant=="r6"', {
+ 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
+ 'ldflags': [
+ '-mips64r6', '-mabi=64',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
- ['mips_arch_variant=="loongson"', {
- 'cflags': ['-mips3', '-Wa,-mips3'],
- }],
],
}],
],
'__mips_soft_float=1'
],
}],
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R6',],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
- ['mips_arch_variant=="loongson"', {
- 'defines': ['_MIPS_ARCH_LOONGSON',],
- }],
],
}], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1EQZ) ||
+ (opcode == COP1 && rs_field == BC1NEZ);
}
// Returns the next free trampoline entry.
int32_t Assembler::get_trampoline_entry(int32_t pos) {
int32_t trampoline_entry = kInvalidSlotPos;
-
if (!internal_trampoline_exception_) {
if (trampoline_.start() > pos) {
trampoline_entry = trampoline_.take_slot();
uint64_t Assembler::jump_address(Label* L) {
int64_t target_pos;
-
if (L->is_bound()) {
target_pos = L->pos();
} else {
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
-
if (L->is_bound()) {
target_pos = L->pos();
} else {
}
+int32_t Assembler::branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ ASSERT((offset & 3) == 0);
+ ASSERT(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ ASSERT((offset & 3) == 0);
+ ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ ASSERT((offset & 3) == 0);
+ ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
}
+void Assembler::bgezc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(!(rt.is(zero_reg)));
+ ASSERT(rs.code() != rt.code());
+ GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(!(rt.is(zero_reg)));
+ ASSERT(rs.code() != rt.code());
+ GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
void Assembler::bgezal(Register rs, int16_t offset) {
+ ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
}
+void Assembler::bgtzc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
}
+void Assembler::blezc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(!(rt.is(zero_reg)));
+ ASSERT(rs.code() != rt.code());
+ GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(!(rt.is(zero_reg)));
+ ASSERT(rs.code() != rt.code());
+ GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
void Assembler::bltzal(Register rs, int16_t offset) {
+ ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
}
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(rs.code() >= rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ ASSERT(rs.code() >= rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rt.is(zero_reg)));
+ GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(rs.code() < rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(rs.code() < rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(!(rs.is(zero_reg)));
+ Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
void Assembler::j(int64_t target) {
#if DEBUG
// Get pc of delay slot.
void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
+ if (kArchVariant != kMips64r6) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+ } else {
+ jalr(rs, zero_reg);
}
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ if (kArchVariant == kMips64r6) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ } else {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ }
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::dmul(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
+}
+
+
+void Assembler::dmuh(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
+}
+
+
+void Assembler::dmulu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
+}
+
+
+void Assembler::dmuhu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
}
void Assembler::mult(Register rs, Register rt) {
+ ASSERT(kArchVariant != kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
}
void Assembler::multu(Register rs, Register rt) {
+ ASSERT(kArchVariant != kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
}
}
+void Assembler::div(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
+void Assembler::divu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
+}
+
+
void Assembler::daddu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
}
}
+void Assembler::ddiv(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
+}
+
+
+void Assembler::dmod(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
+}
+
+
void Assembler::ddivu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
}
+void Assembler::ddivu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
+}
+
+
+void Assembler::dmodu(Register rd, Register rs, Register rt) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
}
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+ // This instruction uses same opcode as 'lui'. The difference in encoding is
+ // 'lui' has zero reg. for rs field.
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
+void Assembler::daui(Register rs, Register rt, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(DAUI, rs, rt, j);
+}
+
+
+void Assembler::dahi(Register rs, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(REGIMM, rs, DAHI, j);
+}
+
+
+void Assembler::dati(Register rs, int32_t j) {
+ ASSERT(is_uint16(j));
+ GenInstrImmediate(REGIMM, rs, DATI, j);
+}
+
+
void Assembler::ldl(Register rd, const MemOperand& rs) {
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
}
}
+void Assembler::sel(SecondaryField fmt, FPURegister fd,
+ FPURegister ft, FPURegister fs, uint8_t sel) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(fmt == D);
+ ASSERT(fmt == S);
+
+ Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | SEL;
+ emit(instr);
+}
+
+
+// GPR.
+void Assembler::seleqz(Register rs, Register rt, Register rd) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
+}
+
+
+// FPR.
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
+ FPURegister ft, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(fmt == D);
+ ASSERT(fmt == S);
+
+ Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
+ emit(instr);
+}
+
+
+// GPR.
+void Assembler::selnez(Register rs, Register rt, Register rd) {
+ ASSERT(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
+}
+
+
+// FPR.
+void Assembler::selnez(SecondaryField fmt, FPURegister fd,
+ FPURegister ft, FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT(fmt == D);
+ ASSERT(fmt == S);
+
+ Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
+ emit(instr);
+}
+
+
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ if (kArchVariant != kMips64r6) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+ }
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips64r2);
+ ASSERT((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips64r2);
+ ASSERT(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- ASSERT(kArchVariant != kLoongson);
ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- ASSERT(kArchVariant != kLoongson);
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
}
}
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
}
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister fs, FPURegister ft) {
+ ASSERT(kArchVariant == kMips64r6);
+ ASSERT((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+ ASSERT(kArchVariant == kMips64r6);
+ Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+ ASSERT(kArchVariant == kMips64r6);
+ Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
+ ASSERT(kArchVariant != kMips64r6);
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
- Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
+ Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
emit(instr);
}
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ int32_t shifted_branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t o = branch_offset_compact(L, jump_elimination_allowed);
+ ASSERT((o & 3) == 0); // Assert the offset is aligned.
+ return o >> 2;
+ }
uint64_t jump_address(Label* L);
// Puts a labels target address at the given position.
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
+ void bgezc(Register rt, int16_t offset);
+ void bgezc(Register rt, Label* L) {
+ bgezc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgeuc(Register rs, Register rt, int16_t offset);
+ void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgec(Register rs, Register rt, int16_t offset);
+ void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
void bgezal(Register rs, int16_t offset);
+ void bgezalc(Register rt, int16_t offset);
+ void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgezall(Register rs, int16_t offset);
+ void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L, false)>>2);
+ }
void bgtz(Register rs, int16_t offset);
+ void bgtzc(Register rt, int16_t offset);
+ void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, branch_offset_compact(L, false)>>2);
+ }
void blez(Register rs, int16_t offset);
+ void blezc(Register rt, int16_t offset);
+ void blezc(Register rt, Label* L) {
+ blezc(rt, branch_offset_compact(L, false)>>2);
+ }
void bltz(Register rs, int16_t offset);
+ void bltzc(Register rt, int16_t offset);
+ void bltzc(Register rt, Label* L) {
+ bltzc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltuc(Register rs, Register rt, int16_t offset);
+ void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltc(Register rs, Register rt, int16_t offset);
+ void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+
void bltzal(Register rs, int16_t offset);
+ void blezalc(Register rt, int16_t offset);
+ void blezalc(Register rt, Label* L) {
+ blezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltzalc(Register rt, int16_t offset);
+ void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgtzalc(Register rt, int16_t offset);
+ void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzalc(Register rt, int16_t offset);
+ void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqc(Register rs, Register rt, int16_t offset);
+ void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzc(Register rs, int32_t offset);
+ void beqzc(Register rs, Label* L) {
+ beqzc(rs, branch_offset21_compact(L, false)>>2);
+ }
+ void bnezalc(Register rt, int16_t offset);
+ void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnec(Register rs, Register rt, int16_t offset);
+ void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnezc(Register rt, int32_t offset);
+ void bnezc(Register rt, Label* L) {
+ bnezc(rt, branch_offset21_compact(L, false)>>2);
+ }
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
+ void bovc(Register rs, Register rt, int16_t offset);
+ void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnvc(Register rs, Register rt, int16_t offset);
+ void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
// Arithmetic.
void addu(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
- void mult(Register rs, Register rt);
- void multu(Register rs, Register rt);
+
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
+ void ddiv(Register rs, Register rt);
+ void ddivu(Register rs, Register rt);
+ void div(Register rd, Register rs, Register rt);
+ void divu(Register rd, Register rs, Register rt);
+ void ddiv(Register rd, Register rs, Register rt);
+ void ddivu(Register rd, Register rs, Register rt);
+ void mod(Register rd, Register rs, Register rt);
+ void modu(Register rd, Register rs, Register rt);
+ void dmod(Register rd, Register rs, Register rt);
+ void dmodu(Register rd, Register rs, Register rt);
+
void mul(Register rd, Register rs, Register rt);
+ void muh(Register rd, Register rs, Register rt);
+ void mulu(Register rd, Register rs, Register rt);
+ void muhu(Register rd, Register rs, Register rt);
+ void mult(Register rs, Register rt);
+ void multu(Register rs, Register rt);
+ void dmul(Register rd, Register rs, Register rt);
+ void dmuh(Register rd, Register rs, Register rt);
+ void dmulu(Register rd, Register rs, Register rt);
+ void dmuhu(Register rd, Register rs, Register rt);
void daddu(Register rd, Register rs, Register rt);
void dsubu(Register rd, Register rs, Register rt);
void dmult(Register rs, Register rt);
void dmultu(Register rs, Register rt);
- void ddiv(Register rs, Register rt);
- void ddivu(Register rs, Register rt);
void addiu(Register rd, Register rs, int32_t j);
void daddiu(Register rd, Register rs, int32_t j);
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
+ void aui(Register rs, Register rt, int32_t j);
+ void daui(Register rs, Register rt, int32_t j);
+ void dahi(Register rs, int32_t j);
+ void dati(Register rs, int32_t j);
// Shifts.
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0);
+ void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs, uint8_t sel);
+ void seleqz(Register rs, Register rt, Register rd);
+ void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+ void selnez(Register rs, Register rt, Register rd);
+ void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
- // Conditions and branches.
+ // Conditions and branches for MIPSr6.
+ void cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister ft, FPURegister fs);
+
+ void bc1eqz(int16_t offset, FPURegister ft);
+ void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(branch_offset(L, false)>>2, ft);
+ }
+ void bc1nez(int16_t offset, FPURegister ft);
+ void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(branch_offset(L, false)>>2, ft);
+ }
+
+ // Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+ void bc1f(Label* L, uint16_t cc = 0) {
+ bc1f(branch_offset(L, false)>>2, cc);
+ }
void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+ void bc1t(Label* L, uint16_t cc = 0) {
+ bc1t(branch_offset(L, false)>>2, cc);
+ }
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, a4);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, a5);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, a6);
+ if (kArchVariant != kMips64r6) {
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, a4);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, a5);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, a6);
+ } else {
+ Label skip;
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+ __ mov(v0, a4); // Return LESS as result.
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+ __ mov(v0, a6); // Return EQUAL as result.
+
+ __ mov(v0, a5); // Return GREATER as result.
+ __ bind(&skip);
+ }
__ Ret();
__ bind(&nan);
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
return kImmediateType;
default:
return kRegisterType;
case BNEL:
case BLEZL:
case BGTZL:
+ case BEQZC:
+ case BNEZC:
case LB:
case LH:
case LWL:
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
- kMips32r2,
- kMips32r1,
- kLoongson,
- kMips64r2
+ kMips64r2,
+ kMips64r6
};
#ifdef _MIPS_ARCH_MIPS64R2
static const ArchVariants kArchVariant = kMips64r2;
-#elif _MIPS_ARCH_LOONGSON
-// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
-// which predates (and is a subset of) the mips32r2 and r1 architectures.
- static const ArchVariants kArchVariant = kLoongson;
+#elif _MIPS_ARCH_MIPS64R6
+ static const ArchVariants kArchVariant = kMips64r6;
#else
- static const ArchVariants kArchVariant = kMips64r1;
+ static const ArchVariants kArchVariant = kMips64r2;
#endif
const int kImm16Shift = 0;
const int kImm16Bits = 16;
+const int kImm21Shift = 0;
+const int kImm21Bits = 21;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ DAUI = ((3 << 3) + 5) << kOpcodeShift,
+ BEQC = ((2 << 3) + 0) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
- DADDI = ((3 << 3) + 0) << kOpcodeShift,
+ DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
DADDIU = ((3 << 3) + 1) << kOpcodeShift,
LDL = ((3 << 3) + 2) << kOpcodeShift,
LDR = ((3 << 3) + 3) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LLD = ((6 << 3) + 4) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ BEQZC = ((6 << 3) + 6) << kOpcodeShift,
LD = ((6 << 3) + 7) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SCD = ((7 << 3) + 4) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ BNEZC = ((7 << 3) + 6) << kOpcodeShift,
SD = ((7 << 3) + 7) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
+ CLZ_R6 = ((2 << 3) + 0),
+ CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2),
DSLLV = ((2 << 3) + 4),
DSRLV = ((2 << 3) + 6),
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
+ SELEQZ_S = ((6 << 3) + 5),
TNE = ((6 << 3) + 6),
+ SELNEZ_S = ((6 << 3) + 7),
DSLL = ((7 << 3) + 0),
DSRL = ((7 << 3) + 2),
DSLL32 = ((7 << 3) + 4),
DSRL32 = ((7 << 3) + 6),
DSRA32 = ((7 << 3) + 7),
+
+ // Multiply integers in r6.
+ MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+ D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
+ D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U.
+
+ MUL_OP = ((0 << 3) + 2),
+ MUH_OP = ((0 << 3) + 3),
+ DIV_OP = ((0 << 3) + 2),
+ MOD_OP = ((0 << 3) + 3),
+
+ DIV_MOD = ((3 << 3) + 2),
+ DIV_MOD_U = ((3 << 3) + 3),
+ D_DIV_MOD = ((3 << 3) + 6),
+ D_DIV_MOD_U = ((3 << 3) + 7),
+
// drotr in special4?
// SPECIAL2 Encoding of Function Field.
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
+ BGEZALL = ((2 << 3) + 3) << 16,
+ DAHI = ((0 << 3) + 6) << 16,
+ DATI = ((3 << 3) + 6) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
+ BC1EQZ = ((2 << 2) + 1) << 21,
+ BC1NEZ = ((3 << 2) + 1) << 21,
+ // COP1 CMP positive predicates Bit 5..4 = 00.
+ CMP_AF = ((0 << 3) + 0),
+ CMP_UN = ((0 << 3) + 1),
+ CMP_EQ = ((0 << 3) + 2),
+ CMP_UEQ = ((0 << 3) + 3),
+ CMP_LT = ((0 << 3) + 4),
+ CMP_ULT = ((0 << 3) + 5),
+ CMP_LE = ((0 << 3) + 6),
+ CMP_ULE = ((0 << 3) + 7),
+ CMP_SAF = ((1 << 3) + 0),
+ CMP_SUN = ((1 << 3) + 1),
+ CMP_SEQ = ((1 << 3) + 2),
+ CMP_SUEQ = ((1 << 3) + 3),
+ CMP_SSLT = ((1 << 3) + 4),
+ CMP_SSULT = ((1 << 3) + 5),
+ CMP_SLE = ((1 << 3) + 6),
+ CMP_SULE = ((1 << 3) + 7),
+ // COP1 CMP negative predicates Bit 5..4 = 01.
+ CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2 << 3) + 1),
+ CMP_UNE = ((2 << 3) + 2),
+ CMP_NE = ((2 << 3) + 3),
+ CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3 << 3) + 1),
+ CMP_SUNE = ((3 << 3) + 2),
+ CMP_SNE = ((3 << 3) + 3),
+ CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2 << 3) + 0),
+ SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
// ----- Emulated conditions.
-// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
-// Oppposite conditions must be paired as odd/even numbers
+// Opposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
+ inline int32_t Imm21Value() const {
+ ASSERT(InstructionType() == kImmediateType);
+ return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+ }
+
inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
+ void PrintXImm21(Instruction* instr);
void PrintXImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name.
}
+// Print 21-bit immediate value.
+void Decoder::PrintXImm21(Instruction* instr) {
+ uint32_t imm = instr->Imm21Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
// Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
PrintXImm16(instr);
}
return 6;
- } else {
+ } else if (format[3] == '2' && format[4] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "imm21x"));
+ PrintXImm21(instr);
+ return 6;
+ } else if (format[3] == '2' && format[4] == '6') {
ASSERT(STRING_STARTS_WITH(format, "imm26x"));
PrintXImm26(instr);
return 6;
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // bc1 handled in DecodeTypeImmediate.
- UNREACHABLE();
- break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
break;
}
break;
- case S:
- UNIMPLEMENTED_MIPS();
- break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W: // Convert word to float (single).
- Format(instr, "cvt.s.w 'fd, 'fs");
- break;
case CVT_D_W: // Convert word to double.
Format(instr, "cvt.d.w 'fd, 'fs");
break;
case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs");
break;
+ case CMP_UN:
+ Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.d 'fd, 'fs, 'ft");
+ break;
default:
UNREACHABLE();
}
break;
- case PS:
- UNIMPLEMENTED_MIPS();
- break;
default:
UNREACHABLE();
}
Format(instr, "jalr 'rs");
break;
case SLL:
- if ( 0x0 == static_cast<int>(instr->InstructionBits()))
+ if (0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
Format(instr, "sll 'rd, 'rt, 'sa");
break;
case DSLL:
- Format(instr, "dsll 'rd, 'rt, 'sa");
+ Format(instr, "dsll 'rd, 'rt, 'sa");
+ break;
+ case D_MUL_MUH: // Equals to DMUL.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "dmult 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "dmul 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "dmuh 'rd, 'rs, 'rt");
+ }
+ }
break;
case DSLL32:
Format(instr, "dsll32 'rd, 'rt, 'sa");
Format(instr, "dsrav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ if (instr->Bits(25, 16) == 0) {
+ Format(instr, "mfhi 'rd");
+ } else {
+ if ((instr->FunctionFieldRaw() == CLZ_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clz 'rd, 'rs");
+ } else if ((instr->FunctionFieldRaw() == CLO_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clo 'rd, 'rs");
+ }
+ }
break;
case MFLO:
Format(instr, "mflo 'rd");
break;
- case MULT:
- Format(instr, "mult 'rs, 'rt");
- break;
- case DMULT:
- Format(instr, "dmult 'rs, 'rt");
+ case D_MUL_MUH_U: // Equals to DMULTU.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "dmultu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "dmulu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "dmuhu 'rd, 'rs, 'rt");
+ }
+ }
break;
- case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ case MULT: // @Mips64r6 == MUL_MUH.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "mult 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muh 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DMULTU:
- Format(instr, "dmultu 'rs, 'rt");
+ case MULTU: // @Mips64r6 == MUL_MUH_U.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "multu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mulu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muhu 'rd, 'rs, 'rt");
+ }
+ }
+
break;
- case DIV:
- Format(instr, "div 'rs, 'rt");
+ case DIV: // @Mips64r6 == DIV_MOD.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "div 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "div 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "mod 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DDIV:
- Format(instr, "ddiv 'rs, 'rt");
+ case DDIV: // @Mips64r6 == D_DIV_MOD.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "ddiv 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "ddiv 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "dmod 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ case DIVU: // @Mips64r6 == DIV_MOD_U.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "divu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "divu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "modu 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DDIVU:
- Format(instr, "ddivu 'rs, 'rt");
+ case DDIVU: // @Mips64r6 == D_DIV_MOD_U.
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "ddivu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "ddivu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "dmodu 'rd, 'rs, 'rt");
+ }
+ }
break;
case ADD:
Format(instr, "add 'rd, 'rs, 'rt");
Format(instr, "movf 'rd, 'rs, 'bc");
}
break;
+ case SELEQZ_S:
+ Format(instr, "seleqz 'rd, 'rs, 'rt");
+ break;
+ case SELNEZ_S:
+ Format(instr, "selnez 'rd, 'rs, 'rt");
+ break;
default:
UNREACHABLE();
}
Format(instr, "mul 'rd, 'rs, 'rt");
break;
case CLZ:
- Format(instr, "clz 'rd, 'rs");
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "clz 'rd, 'rs");
+ }
break;
default:
UNREACHABLE();
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
- if (kArchVariant == kMips64r2) {
- Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
- } else {
- Unknown(instr);
- }
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
break;
}
case EXT: {
- if (kArchVariant == kMips64r2) {
- Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
- } else {
- Unknown(instr);
- }
+ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
break;
}
default:
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- // ------------- REGIMM class.
case COP1:
switch (instr->RsFieldRaw()) {
case BC1:
Format(instr, "bc1f 'bc, 'imm16u");
}
break;
+ case BC1EQZ:
+ Format(instr, "bc1eqz 'ft, 'imm16u");
+ break;
+ case BC1NEZ:
+ Format(instr, "bc1nez 'ft, 'imm16u");
+ break;
+ case W: // CMP.S instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case L: // CMP.D instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case S:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.S 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.S 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.S 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.S 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.S 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.S 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case D:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.D 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.D 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.D 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.D 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.D 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.D 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
default:
UNREACHABLE();
}
+
break; // Case COP1.
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u");
break;
+ case BGEZALL:
+ Format(instr, "bgezall 'rs, 'imm16u");
+ break;
+ case DAHI:
+ Format(instr, "dahi 'rs, 'imm16u");
+ break;
+ case DATI:
+ Format(instr, "dati 'rs, 'imm16u");
+ break;
default:
UNREACHABLE();
}
Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "blez 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezalc 'rs, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezalc 'rs, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "bgtz 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzalc 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzalc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BLEZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgec 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BGTZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BEQZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "beqzc 'rs, 'imm21x");
+ }
+ break;
+ case BNEZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "bnezc 'rs, 'imm21x");
+ }
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
+ } else {
+ // Check if BOVC or BEQC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bovc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
break;
case DADDI:
- Format(instr, "daddi 'rt, 'rs, 'imm16s");
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "daddi 'rt, 'rs, 'imm16s");
+ } else {
+ // Check if BNVC or BNEC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bnvc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
break;
case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s");
Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "lui 'rt, 'imm16x");
+ } else {
+ if (instr->RsValue() != 0) {
+ Format(instr, "aui 'rt, 'imm16x");
+ } else {
+ Format(instr, "lui 'rt, 'imm16x");
+ }
+ }
+ break;
+ case DAUI:
+ Format(instr, "daui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
__ BranchOnOverflow(&stub_call, scratch1);
break;
case Token::MUL: {
- __ SmiUntag(scratch1, right);
- __ Dmult(left, scratch1);
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ dsra32(scratch1, scratch1, 31);
- __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
- __ mflo(v0);
- __ Branch(&done, ne, v0, Operand(zero_reg));
+ __ Dmulh(v0, left, right);
+ __ dsra32(scratch2, v0, 0);
+ __ sra(scratch1, v0, 31);
+ __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1));
+ __ SmiTag(v0);
+ __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
__ Daddu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
ASSERT(Smi::FromInt(0) == 0);
__ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Dsubu(string_length, string_length, Operand(scratch1));
__ SmiUntag(scratch1);
- __ Dmult(array_length, scratch1);
+ __ Dmul(scratch2, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ mfhi(scratch2);
+ __ dsra32(scratch1, scratch2, 0);
__ Branch(&bailout, ne, scratch2, Operand(zero_reg));
- __ mflo(scratch2);
__ SmiUntag(string_length);
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
const Register result_reg = ToRegister(instr->result());
// div runs in the background while we check for special cases.
- __ ddiv(left_reg, right_reg);
+ __ Dmod(result_reg, left_reg, right_reg);
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
}
// If we care about -0, test if the dividend is <0 and the result is 0.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+ __ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ ddiv(dividend, divisor);
+ __ Ddiv(result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
- __ mflo(result);
- } else {
- __ mflo(result);
+ // Calculate remainder.
+ Register remainder = ToRegister(instr->temp());
+ if (kArchVariant != kMips64r6) {
+ __ mfhi(remainder);
+ } else {
+ __ dmod(remainder, dividend, divisor);
+ }
+ DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
}
}
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ ddiv(dividend, divisor);
+ __ Ddiv(result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
// We performed a truncating division. Correct the result if necessary.
Label done;
Register remainder = scratch0();
- __ mfhi(remainder);
- __ mflo(result);
+ if (kArchVariant != kMips64r6) {
+ __ mfhi(remainder);
+ } else {
+ __ dmod(remainder, dividend, divisor);
+ }
__ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
__ Xor(remainder, remainder, Operand(divisor));
__ Branch(&done, ge, remainder, Operand(zero_reg));
if (overflow) {
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ dmult(result, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Dmulh(result, left, right);
} else {
- __ dmult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Dmul(result, left, right);
}
- __ dsra32(at, result, 31);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
- if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
- DeoptimizeIf(lt, instr->environment(), result, Operand(kMinInt));
+ __ dsra32(scratch, result, 0);
+ __ sra(at, result, 31);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiTag(result);
}
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* dividend, LOperand* divisor) {
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
inputs_[1] = divisor;
+ temps_[0] = temp;
}
LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
+ mul(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ mul(rd, rs, at);
+ }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
mult(rs, rt.rm());
- mflo(rd);
+ mfhi(rd);
} else {
- mul(rd, rs, rt.rm());
+ muh(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (kArchVariant != kMips64r6) {
mult(rs, at);
- mflo(rd);
+ mfhi(rd);
} else {
- mul(rd, rs, at);
+ muh(rd, rs, at);
}
}
}
void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
- dmult(rs, rt.rm());
- mflo(rd);
+ if (kArchVariant == kMips64r6) {
+ dmul(rd, rs, rt.rm());
} else {
- // TODO(yuyin):
- // dmul(rd, rs, rt.rm());
dmult(rs, rt.rm());
mflo(rd);
}
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (kArchVariant == kMips64r6) {
+ dmul(rd, rs, at);
+ } else {
dmult(rs, at);
mflo(rd);
+ }
+ }
+}
+
+
+void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant == kMips64r6) {
+ dmuh(rd, rs, rt.rm());
+ } else {
+ dmult(rs, rt.rm());
+ mfhi(rd);
+ }
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant == kMips64r6) {
+ dmuh(rd, rs, at);
} else {
- // TODO(yuyin):
- // dmul(rd, rs, at);
dmult(rs, at);
- mflo(rd);
+ mfhi(rd);
}
}
}
}
+void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
+ if (kArchVariant != kMips64r6) {
+ if (rt.is_reg()) {
+ ddiv(rs, rt.rm());
+ mflo(rd);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ ddiv(rs, at);
+ mflo(rd);
+ }
+ } else {
+ if (rt.is_reg()) {
+ ddiv(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ ddiv(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
}
+void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
+ if (kArchVariant != kMips64r6) {
+ if (rt.is_reg()) {
+ ddiv(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ ddiv(rs, at);
+ mfhi(rd);
+ }
+ } else {
+ if (rt.is_reg()) {
+ dmod(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ dmod(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
- if (kArchVariant == kLoongson) {
- lw(zero_reg, rs);
- } else {
pref(hint, rs);
- }
}
ASSERT(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case lt:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- case nue:
- c(UEQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
+ if (kArchVariant != kMips64r6) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ } else {
+ // Use f31 for comparison result. It has to be unavailable to lithium
+ // register allocator.
+ ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
+ cmp(UN, L, f31, cmp1, cmp2);
+ bc1nez(nan, f31);
+ }
+ }
+
+ if (kArchVariant != kMips64r6) {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+ } else {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
+ ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
+ switch (cc) {
+ case lt:
+ cmp(OLT, L, f31, cmp1, cmp2);
+ bc1nez(target, f31);
+ break;
+ case gt:
+ cmp(ULE, L, f31, cmp1, cmp2);
+ bc1eqz(target, f31);
+ break;
+ case ge:
+ cmp(ULT, L, f31, cmp1, cmp2);
+ bc1eqz(target, f31);
+ break;
+ case le:
+ cmp(OLE, L, f31, cmp1, cmp2);
+ bc1nez(target, f31);
+ break;
+ case eq:
+ cmp(EQ, L, f31, cmp1, cmp2);
+ bc1nez(target, f31);
+ break;
+ case ueq:
+ cmp(UEQ, L, f31, cmp1, cmp2);
+ bc1nez(target, f31);
+ break;
+ case ne:
+ cmp(EQ, L, f31, cmp1, cmp2);
+ bc1eqz(target, f31);
+ break;
+ case nue:
+ cmp(UEQ, L, f31, cmp1, cmp2);
+ bc1eqz(target, f31);
+ break;
+ default:
+ CHECK(0);
+ }
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
// Signed comparison.
case greater:
+ // rs > rt
slt(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
- bgezal(scratch, offset);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case greater_equal:
+ // rs >= rt
slt(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
- bltzal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case less:
+ // rs < r2
slt(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
- bgezal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case less_equal:
+ // rs <= r2
slt(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
- bltzal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
+
// Unsigned comparison.
case Ugreater:
+ // rs > rt
sltu(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
- bgezal(scratch, offset);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case Ugreater_equal:
+ // rs >= rt
sltu(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
- bltzal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case Uless:
+ // rs < r2
sltu(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
- bgezal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
case Uless_equal:
+ // rs <= r2
sltu(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
- bltzal(scratch, offset);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
break;
-
default:
UNREACHABLE();
}
// Signed comparison.
case greater:
+ // rs > rt
slt(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
+ beq(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
+ bal(offset);
break;
case greater_equal:
+ // rs >= rt
slt(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
+ bal(offset);
break;
case less:
+ // rs < r2
slt(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
+ bal(offset);
break;
case less_equal:
+ // rs <= r2
slt(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
+ bal(offset);
break;
+
// Unsigned comparison.
case Ugreater:
+ // rs > rt
sltu(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
+ beq(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
+ bal(offset);
break;
case Ugreater_equal:
+ // rs >= rt
sltu(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
+ bal(offset);
break;
case Uless:
+ // rs < r2
sltu(scratch, rs, r2);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
+ bal(offset);
break;
case Uless_equal:
+ // rs <= r2
sltu(scratch, r2, rs);
- daddiu(scratch, scratch, -1);
+ bne(scratch, zero_reg, 2);
+ nop();
offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
+ bal(offset);
break;
default:
int mask,
Condition cc,
Label* condition_met) {
- // TODO(plind): Fix li() so we can use constant embedded inside And().
- // And(scratch, object, Operand(~Page::kPageAlignmentMask));
- li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK
- And(scratch, object, at);
+ And(scratch, object, Operand(~Page::kPageAlignmentMask));
ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
ASSERT(!result.is(at));
MultiplierAndShift ms(divisor);
li(at, Operand(ms.multiplier()));
- Mult(dividend, Operand(at));
- mfhi(result);
+ Mulh(result, dividend, Operand(at));
if (divisor > 0 && ms.multiplier() < 0) {
Addu(result, result, Operand(dividend));
}
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Daddu);
+ DEFINE_INSTRUCTION(Ddiv);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Dsubu);
+ DEFINE_INSTRUCTION(Dmod);
DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION(Dmul);
+ DEFINE_INSTRUCTION(Dmulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Dmult);
DEFINE_INSTRUCTION2(Multu);
switch (op) {
case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // Handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- break;
case CFC1:
// At the moment only FCSR is supported.
ASSERT(fs_reg == kFCSRRegister);
case MTC1:
case DMTC1:
case MTHC1:
- // Do the store in the execution step.
- break;
case S:
case D:
case W:
// Do everything in the execution step.
break;
default:
- UNIMPLEMENTED_MIPS();
+ // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
+ UNREACHABLE();
}
break;
case COP1X:
case DSRAV:
*alu_out = rt >> rs;
break;
- case MFHI:
- *alu_out = get_register(HI);
+ case MFHI: // MFHI == CLZ on R6.
+ if (kArchVariant != kMips64r6) {
+ ASSERT(instr->SaValue() == 0);
+ *alu_out = get_register(HI);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ ASSERT(instr->SaValue() == 1);
+ *alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ }
break;
case MFLO:
*alu_out = get_register(LO);
break;
- case MULT:
+ case MULT: // MULT == D_MUL_MUH.
// TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo
// regs.
// TODO(plind) - make the 32-bit MULT ops conform to spec regarding
case MULTU:
*u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
- case DMULT:
- *i128resultH = MultiplyHighSigned(rs, rt);
- *i128resultL = rs * rt;
+ case DMULT: // DMULT == D_MUL_MUH.
+ if (kArchVariant != kMips64r6) {
+ *i128resultH = MultiplyHighSigned(rs, rt);
+ *i128resultL = rs * rt;
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ *i128resultL = rs * rt;
+ break;
+ case MUH_OP:
+ *i128resultH = MultiplyHighSigned(rs, rt);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DMULTU:
UNIMPLEMENTED_MIPS();
case COP1:
switch (instr->RsFieldRaw()) {
case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
UNREACHABLE();
break;
case CFC1:
f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f));
break;
- case CVT_W_S:
- case CVT_L_S:
- case TRUNC_W_S:
- case TRUNC_L_S:
- case ROUND_W_S:
- case ROUND_L_S:
- case FLOOR_W_S:
- case FLOOR_L_S:
- case CEIL_W_S:
- case CEIL_L_S:
- case CVT_PS_S:
- UNIMPLEMENTED_MIPS();
- break;
default:
+ // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
break;
alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default:
+ default: // Mips64r6 CMP.S instructions unimplemented.
UNREACHABLE();
}
break;
case L:
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: // Mips32r2 instruction.
- i64 = get_fpu_register(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(i64));
- break;
+ case CVT_D_L: // Mips32r2 instruction.
+ i64 = get_fpu_register(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ break;
case CVT_S_L:
UNIMPLEMENTED_MIPS();
break;
- default:
+ case CMP_AF: // Mips64r6 CMP.D instructions.
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED
UNREACHABLE();
}
break;
- case PS:
- break;
default:
UNREACHABLE();
}
}
// Instructions using HI and LO registers.
case MULT:
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ if (kArchVariant != kMips64r6) {
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg,
+ static_cast<int32_t>(i64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case MULTU:
set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
- case DMULT:
- set_register(LO, static_cast<int64_t>(i128resultL));
- set_register(HI, static_cast<int64_t>(i128resultH));
+ case DMULT: // DMULT == D_MUL_MUH.
+ if (kArchVariant != kMips64r6) {
+ set_register(LO, static_cast<int64_t>(i128resultL));
+ set_register(HI, static_cast<int64_t>(i128resultH));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg, static_cast<int64_t>(i128resultL));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int64_t>(i128resultH));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DMULTU:
UNIMPLEMENTED_MIPS();
break;
+ case DSLL:
+ set_register(rd_reg, alu_out);
+ break;
case DIV:
case DDIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0
- // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
- // return INT_MIN which is what the hardware does.
- if (rs == INT_MIN && rt == -1) {
- set_register(LO, INT_MIN);
- set_register(HI, 0);
- } else if (rt != 0) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ switch (kArchVariant) {
+ case kMips64r2:
+ // Divide by zero and overflow was not checked in the
+ // configuration step - div and divu do not raise exceptions. On
+ // division by 0 the result will be UNPREDICTABLE. On overflow
+ // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
+ break;
+ case kMips64r6:
+ switch (instr->SaValue()) {
+ case DIV_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, INT_MIN);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs / rt);
+ }
+ break;
+ case MOD_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, 0);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs % rt);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ break;
+ default:
+ break;
}
break;
case DIVU:
int16_t imm16 = instr->Imm16Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
+ int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
next_pc = current_pc + kBranchReturnOffset;
}
break;
+ case BC1EQZ:
+ do_branch = (ft & 0x1) ? false : true;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ case BC1NEZ:
+ do_branch = (ft & 0x1) ? true : false;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
default:
UNREACHABLE();
}
uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
- !defined(USE_SIMULATOR)
+ !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
- !defined(USE_SIMULATOR)
+ !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ c(UN, D, f4, f6);
- __ bc1f(&neither_is_nan);
+ if (kArchVariant != kMips64r6) {
+ __ c(UN, D, f4, f6);
+ __ bc1f(&neither_is_nan);
+ } else {
+ __ cmp(UN, L, f2, f4, f6);
+ __ bc1eqz(&neither_is_nan, f2);
+ }
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
__ bind(&neither_is_nan);
- if (kArchVariant == kLoongson) {
- __ c(OLT, D, f6, f4);
- __ bc1t(&less_than);
+ if (kArchVariant == kMips64r6) {
+ __ cmp(OLT, L, f2, f6, f4);
+ __ bc1nez(&less_than, f2);
} else {
__ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2);
}
+
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
TEST(MIPS11) {
- // Test LWL, LWR, SWL and SWR instructions.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
-
- typedef struct {
- int32_t reg_init;
- int32_t mem_init;
- int32_t lwl_0;
- int32_t lwl_1;
- int32_t lwl_2;
- int32_t lwl_3;
- int32_t lwr_0;
- int32_t lwr_1;
- int32_t lwr_2;
- int32_t lwr_3;
- int32_t swl_0;
- int32_t swl_1;
- int32_t swl_2;
- int32_t swl_3;
- int32_t swr_0;
- int32_t swr_1;
- int32_t swr_2;
- int32_t swr_3;
- } T;
- T t;
-
- Assembler assm(isolate, NULL, 0);
+ // Do not run test on MIPS64r6, as these instructions are removed.
+ if (kArchVariant != kMips64r6) {
+ // Test LWL, LWR, SWL and SWR instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t reg_init;
+ int32_t mem_init;
+ int32_t lwl_0;
+ int32_t lwl_1;
+ int32_t lwl_2;
+ int32_t lwl_3;
+ int32_t lwr_0;
+ int32_t lwr_1;
+ int32_t lwr_2;
+ int32_t lwr_3;
+ int32_t swl_0;
+ int32_t swl_1;
+ int32_t swl_2;
+ int32_t swl_3;
+ int32_t swr_0;
+ int32_t swr_1;
+ int32_t swr_2;
+ int32_t swr_3;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Test all combinations of LWL and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
+
+ // Test all combinations of LWR and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
+
+ // Test all combinations of SWL and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
+
+ // Test all combinations of SWR and vAddr.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
+
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
+ __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
+
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
+ __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
+
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
+ __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
+ __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
- // Test all combinations of LWL and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)) );
-
- // Test all combinations of LWR and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
-
- // Test all combinations of SWL and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)) );
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)) );
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3) );
-
- // Test all combinations of SWR and vAddr.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)) );
-
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)) );
- __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1) );
-
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)) );
- __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2) );
-
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)) );
- __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)) );
- __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3) );
-
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.reg_init = 0xaabbccdd;
- t.mem_init = 0x11223344;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.reg_init = 0xaabbccdd;
+ t.mem_init = 0x11223344;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(0x44bbccdd, t.lwl_0);
- CHECK_EQ(0x3344ccdd, t.lwl_1);
- CHECK_EQ(0x223344dd, t.lwl_2);
- CHECK_EQ(0x11223344, t.lwl_3);
-
- CHECK_EQ(0x11223344, t.lwr_0);
- CHECK_EQ(0xaa112233, t.lwr_1);
- CHECK_EQ(0xaabb1122, t.lwr_2);
- CHECK_EQ(0xaabbcc11, t.lwr_3);
-
- CHECK_EQ(0x112233aa, t.swl_0);
- CHECK_EQ(0x1122aabb, t.swl_1);
- CHECK_EQ(0x11aabbcc, t.swl_2);
- CHECK_EQ(0xaabbccdd, t.swl_3);
-
- CHECK_EQ(0xaabbccdd, t.swr_0);
- CHECK_EQ(0xbbccdd44, t.swr_1);
- CHECK_EQ(0xccdd3344, t.swr_2);
- CHECK_EQ(0xdd223344, t.swr_3);
+ CHECK_EQ(0x44bbccdd, t.lwl_0);
+ CHECK_EQ(0x3344ccdd, t.lwl_1);
+ CHECK_EQ(0x223344dd, t.lwl_2);
+ CHECK_EQ(0x11223344, t.lwl_3);
+
+ CHECK_EQ(0x11223344, t.lwr_0);
+ CHECK_EQ(0xaa112233, t.lwr_1);
+ CHECK_EQ(0xaabb1122, t.lwr_2);
+ CHECK_EQ(0xaabbcc11, t.lwr_3);
+
+ CHECK_EQ(0x112233aa, t.swl_0);
+ CHECK_EQ(0x1122aabb, t.swl_1);
+ CHECK_EQ(0x11aabbcc, t.swl_2);
+ CHECK_EQ(0xaabbccdd, t.swl_3);
+
+ CHECK_EQ(0xaabbccdd, t.swr_0);
+ CHECK_EQ(0xbbccdd44, t.swr_1);
+ CHECK_EQ(0xccdd3344, t.swr_2);
+ CHECK_EQ(0xdd223344, t.swr_3);
+ }
}
COMPARE(dsubu(v0, v1, s0),
"0070102f dsubu v0, v1, s0");
- COMPARE(mult(a0, a1),
- "00850018 mult a0, a1");
- COMPARE(dmult(a0, a1),
- "0085001c dmult a0, a1");
- COMPARE(mult(a6, a7),
- "014b0018 mult a6, a7");
- COMPARE(dmult(a6, a7),
- "014b001c dmult a6, a7");
- COMPARE(mult(v0, v1),
- "00430018 mult v0, v1");
- COMPARE(dmult(v0, v1),
- "0043001c dmult v0, v1");
-
- COMPARE(multu(a0, a1),
- "00850019 multu a0, a1");
- COMPARE(dmultu(a0, a1),
- "0085001d dmultu a0, a1");
- COMPARE(multu(a6, a7),
- "014b0019 multu a6, a7");
- COMPARE(dmultu(a6, a7),
- "014b001d dmultu a6, a7");
- COMPARE(multu(v0, v1),
- "00430019 multu v0, v1");
- COMPARE(dmultu(v0, v1),
- "0043001d dmultu v0, v1");
-
- COMPARE(div(a0, a1),
- "0085001a div a0, a1");
- COMPARE(div(a6, a7),
- "014b001a div a6, a7");
- COMPARE(div(v0, v1),
- "0043001a div v0, v1");
- COMPARE(ddiv(a0, a1),
- "0085001e ddiv a0, a1");
- COMPARE(ddiv(a6, a7),
- "014b001e ddiv a6, a7");
- COMPARE(ddiv(v0, v1),
- "0043001e ddiv v0, v1");
-
- COMPARE(divu(a0, a1),
- "0085001b divu a0, a1");
- COMPARE(divu(a6, a7),
- "014b001b divu a6, a7");
- COMPARE(divu(v0, v1),
- "0043001b divu v0, v1");
- COMPARE(ddivu(a0, a1),
- "0085001f ddivu a0, a1");
- COMPARE(ddivu(a6, a7),
- "014b001f ddivu a6, a7");
- COMPARE(ddivu(v0, v1),
- "0043001f ddivu v0, v1");
-
- if (kArchVariant != kLoongson) {
+ if (kArchVariant != kMips64r6) {
+ COMPARE(mult(a0, a1),
+ "00850018 mult a0, a1");
+ COMPARE(dmult(a0, a1),
+ "0085001c dmult a0, a1");
+ COMPARE(mult(a6, a7),
+ "014b0018 mult a6, a7");
+ COMPARE(dmult(a6, a7),
+ "014b001c dmult a6, a7");
+ COMPARE(mult(v0, v1),
+ "00430018 mult v0, v1");
+ COMPARE(dmult(v0, v1),
+ "0043001c dmult v0, v1");
+
+ COMPARE(multu(a0, a1),
+ "00850019 multu a0, a1");
+ COMPARE(dmultu(a0, a1),
+ "0085001d dmultu a0, a1");
+ COMPARE(multu(a6, a7),
+ "014b0019 multu a6, a7");
+ COMPARE(dmultu(a6, a7),
+ "014b001d dmultu a6, a7");
+ COMPARE(multu(v0, v1),
+ "00430019 multu v0, v1");
+ COMPARE(dmultu(v0, v1),
+ "0043001d dmultu v0, v1");
+
+ COMPARE(div(a0, a1),
+ "0085001a div a0, a1");
+ COMPARE(div(a6, a7),
+ "014b001a div a6, a7");
+ COMPARE(div(v0, v1),
+ "0043001a div v0, v1");
+ COMPARE(ddiv(a0, a1),
+ "0085001e ddiv a0, a1");
+ COMPARE(ddiv(a6, a7),
+ "014b001e ddiv a6, a7");
+ COMPARE(ddiv(v0, v1),
+ "0043001e ddiv v0, v1");
+
+ COMPARE(divu(a0, a1),
+ "0085001b divu a0, a1");
+ COMPARE(divu(a6, a7),
+ "014b001b divu a6, a7");
+ COMPARE(divu(v0, v1),
+ "0043001b divu v0, v1");
+ COMPARE(ddivu(a0, a1),
+ "0085001f ddivu a0, a1");
+ COMPARE(ddivu(a6, a7),
+ "014b001f ddivu a6, a7");
+ COMPARE(ddivu(v0, v1),
+ "0043001f ddivu v0, v1");
COMPARE(mul(a0, a1, a2),
"70a62002 mul a0, a1, a2");
COMPARE(mul(a6, a7, t0),
"716c5002 mul a6, a7, t0");
COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0");
+ } else { // MIPS64r6.
+ COMPARE(mul(a0, a1, a2),
+ "00a62098 mul a0, a1, a2");
+ COMPARE(muh(a0, a1, a2),
+ "00a620d8 muh a0, a1, a2");
+ COMPARE(dmul(a0, a1, a2),
+ "00a6209c dmul a0, a1, a2");
+ COMPARE(dmuh(a0, a1, a2),
+ "00a620dc dmuh a0, a1, a2");
+ COMPARE(mul(a5, a6, a7),
+ "014b4898 mul a5, a6, a7");
+ COMPARE(muh(a5, a6, a7),
+ "014b48d8 muh a5, a6, a7");
+ COMPARE(dmul(a5, a6, a7),
+ "014b489c dmul a5, a6, a7");
+ COMPARE(dmuh(a5, a6, a7),
+ "014b48dc dmuh a5, a6, a7");
+ COMPARE(mul(v0, v1, a0),
+ "00641098 mul v0, v1, a0");
+ COMPARE(muh(v0, v1, a0),
+ "006410d8 muh v0, v1, a0");
+ COMPARE(dmul(v0, v1, a0),
+ "0064109c dmul v0, v1, a0");
+ COMPARE(dmuh(v0, v1, a0),
+ "006410dc dmuh v0, v1, a0");
+
+ COMPARE(mulu(a0, a1, a2),
+ "00a62099 mulu a0, a1, a2");
+ COMPARE(muhu(a0, a1, a2),
+ "00a620d9 muhu a0, a1, a2");
+ COMPARE(dmulu(a0, a1, a2),
+ "00a6209d dmulu a0, a1, a2");
+ COMPARE(dmuhu(a0, a1, a2),
+ "00a620dd dmuhu a0, a1, a2");
+ COMPARE(mulu(a5, a6, a7),
+ "014b4899 mulu a5, a6, a7");
+ COMPARE(muhu(a5, a6, a7),
+ "014b48d9 muhu a5, a6, a7");
+ COMPARE(dmulu(a5, a6, a7),
+ "014b489d dmulu a5, a6, a7");
+ COMPARE(dmuhu(a5, a6, a7),
+ "014b48dd dmuhu a5, a6, a7");
+ COMPARE(mulu(v0, v1, a0),
+ "00641099 mulu v0, v1, a0");
+ COMPARE(muhu(v0, v1, a0),
+ "006410d9 muhu v0, v1, a0");
+ COMPARE(dmulu(v0, v1, a0),
+ "0064109d dmulu v0, v1, a0");
+ COMPARE(dmuhu(v0, v1, a0),
+ "006410dd dmuhu v0, v1, a0");
+
+ COMPARE(div(a0, a1, a2),
+ "00a6209a div a0, a1, a2");
+ COMPARE(mod(a0, a1, a2),
+ "00a620da mod a0, a1, a2");
+ COMPARE(ddiv(a0, a1, a2),
+ "00a6209e ddiv a0, a1, a2");
+ COMPARE(dmod(a0, a1, a2),
+ "00a620de dmod a0, a1, a2");
+ COMPARE(div(a5, a6, a7),
+ "014b489a div a5, a6, a7");
+ COMPARE(mod(a5, a6, a7),
+ "014b48da mod a5, a6, a7");
+ COMPARE(ddiv(a5, a6, a7),
+ "014b489e ddiv a5, a6, a7");
+ COMPARE(dmod(a5, a6, a7),
+ "014b48de dmod a5, a6, a7");
+ COMPARE(div(v0, v1, a0),
+ "0064109a div v0, v1, a0");
+ COMPARE(mod(v0, v1, a0),
+ "006410da mod v0, v1, a0");
+ COMPARE(ddiv(v0, v1, a0),
+ "0064109e ddiv v0, v1, a0");
+ COMPARE(dmod(v0, v1, a0),
+ "006410de dmod v0, v1, a0");
+
+ COMPARE(divu(a0, a1, a2),
+ "00a6209b divu a0, a1, a2");
+ COMPARE(modu(a0, a1, a2),
+ "00a620db modu a0, a1, a2");
+ COMPARE(ddivu(a0, a1, a2),
+ "00a6209f ddivu a0, a1, a2");
+ COMPARE(dmodu(a0, a1, a2),
+ "00a620df dmodu a0, a1, a2");
+ COMPARE(divu(a5, a6, a7),
+ "014b489b divu a5, a6, a7");
+ COMPARE(modu(a5, a6, a7),
+ "014b48db modu a5, a6, a7");
+ COMPARE(ddivu(a5, a6, a7),
+ "014b489f ddivu a5, a6, a7");
+ COMPARE(dmodu(a5, a6, a7),
+ "014b48df dmodu a5, a6, a7");
+ COMPARE(divu(v0, v1, a0),
+ "0064109b divu v0, v1, a0");
+ COMPARE(modu(v0, v1, a0),
+ "006410db modu v0, v1, a0");
+ COMPARE(ddivu(v0, v1, a0),
+ "0064109f ddivu v0, v1, a0");
+ COMPARE(dmodu(v0, v1, a0),
+ "006410df dmodu v0, v1, a0");
+
+ COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
+ "20840000 bovc a0, a0, 0");
+ COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
+ "20a40000 bovc a1, a0, 0");
+ COMPARE(bovc(a1, a0, 32767),
+ "20a47fff bovc a1, a0, 32767");
+ COMPARE(bovc(a1, a0, -32768),
+ "20a48000 bovc a1, a0, -32768");
+
+ COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
+ "60840000 bnvc a0, a0, 0");
+ COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
+ "60a40000 bnvc a1, a0, 0");
+ COMPARE(bnvc(a1, a0, 32767),
+ "60a47fff bnvc a1, a0, 32767");
+ COMPARE(bnvc(a1, a0, -32768),
+ "60a48000 bnvc a1, a0, -32768");
+
+ COMPARE(beqzc(a0, 0),
+ "d8800000 beqzc a0, 0x0");
+ COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "d88fffff beqzc a0, 0xfffff");
+ COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576.
+ "d8900000 beqzc a0, 0x100000");
+
+ COMPARE(bnezc(a0, 0),
+ "f8800000 bnezc a0, 0x0");
+ COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "f88fffff bnezc a0, 0xfffff");
+ COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576.
+ "f8900000 bnezc a0, 0x100000");
}
COMPARE(addiu(a0, a1, 0x0),
"2d6a8000 sltiu a6, a7, -32768");
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
-
- if (kArchVariant != kLoongson) {
- COMPARE(movz(a0, a1, a2),
- "00a6200a movz a0, a1, a2");
- COMPARE(movz(s0, s1, s2),
- "0232800a movz s0, s1, s2");
- COMPARE(movz(a6, a7, t0),
- "016c500a movz a6, a7, t0");
- COMPARE(movz(v0, v1, a2),
- "0066100a movz v0, v1, a2");
- COMPARE(movn(a0, a1, a2),
- "00a6200b movn a0, a1, a2");
- COMPARE(movn(s0, s1, s2),
- "0232800b movn s0, s1, s2");
- COMPARE(movn(a6, a7, t0),
- "016c500b movn a6, a7, t0");
- COMPARE(movn(v0, v1, a2),
- "0066100b movn v0, v1, a2");
-
- COMPARE(movt(a0, a1, 1),
- "00a52001 movt a0, a1, 1");
- COMPARE(movt(s0, s1, 2),
- "02298001 movt s0, s1, 2");
- COMPARE(movt(a6, a7, 3),
- "016d5001 movt a6, a7, 3");
- COMPARE(movt(v0, v1, 7),
- "007d1001 movt v0, v1, 7");
- COMPARE(movf(a0, a1, 0),
- "00a02001 movf a0, a1, 0");
- COMPARE(movf(s0, s1, 4),
- "02308001 movf s0, s1, 4");
- COMPARE(movf(a6, a7, 5),
- "01745001 movf a6, a7, 5");
- COMPARE(movf(v0, v1, 6),
- "00781001 movf v0, v1, 6");
-
+ COMPARE(movz(a0, a1, a2),
+ "00a6200a movz a0, a1, a2");
+ COMPARE(movz(s0, s1, s2),
+ "0232800a movz s0, s1, s2");
+ COMPARE(movz(a6, a7, t0),
+ "016c500a movz a6, a7, t0");
+ COMPARE(movz(v0, v1, a2),
+ "0066100a movz v0, v1, a2");
+ COMPARE(movn(a0, a1, a2),
+ "00a6200b movn a0, a1, a2");
+ COMPARE(movn(s0, s1, s2),
+ "0232800b movn s0, s1, s2");
+ COMPARE(movn(a6, a7, t0),
+ "016c500b movn a6, a7, t0");
+ COMPARE(movn(v0, v1, a2),
+ "0066100b movn v0, v1, a2");
+
+ COMPARE(movt(a0, a1, 1),
+ "00a52001 movt a0, a1, 1");
+ COMPARE(movt(s0, s1, 2),
+ "02298001 movt s0, s1, 2");
+ COMPARE(movt(a6, a7, 3),
+ "016d5001 movt a6, a7, 3");
+ COMPARE(movt(v0, v1, 7),
+ "007d1001 movt v0, v1, 7");
+ COMPARE(movf(a0, a1, 0),
+ "00a02001 movf a0, a1, 0");
+ COMPARE(movf(s0, s1, 4),
+ "02308001 movf s0, s1, 4");
+ COMPARE(movf(a6, a7, 5),
+ "01745001 movf a6, a7, 5");
+ COMPARE(movf(v0, v1, 6),
+ "00781001 movf v0, v1, 6");
+
+ if (kArchVariant == kMips64r6) {
+ COMPARE(clz(a0, a1),
+ "00a02050 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "02e0b050 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "00601050 clz v0, v1");
+ } else {
COMPARE(clz(a0, a1),
"70a42020 clz a0, a1");
COMPARE(clz(s6, s7),
"70621020 clz v0, v1");
}
- if (kArchVariant == kMips64r2) {
- COMPARE(ins_(a0, a1, 31, 1),
- "7ca4ffc4 ins a0, a1, 31, 1");
- COMPARE(ins_(s6, s7, 30, 2),
- "7ef6ff84 ins s6, s7, 30, 2");
- COMPARE(ins_(v0, v1, 0, 32),
- "7c62f804 ins v0, v1, 0, 32");
- COMPARE(ext_(a0, a1, 31, 1),
- "7ca407c0 ext a0, a1, 31, 1");
- COMPARE(ext_(s6, s7, 30, 2),
- "7ef60f80 ext s6, s7, 30, 2");
- COMPARE(ext_(v0, v1, 0, 32),
- "7c62f800 ext v0, v1, 0, 32");
- }
+ COMPARE(ins_(a0, a1, 31, 1),
+ "7ca4ffc4 ins a0, a1, 31, 1");
+ COMPARE(ins_(s6, s7, 30, 2),
+ "7ef6ff84 ins s6, s7, 30, 2");
+ COMPARE(ins_(v0, v1, 0, 32),
+ "7c62f804 ins v0, v1, 0, 32");
+ COMPARE(ext_(a0, a1, 31, 1),
+ "7ca407c0 ext a0, a1, 31, 1");
+ COMPARE(ext_(s6, s7, 30, 2),
+ "7ef60f80 ext s6, s7, 30, 2");
+ COMPARE(ext_(v0, v1, 0, 32),
+ "7c62f800 ext v0, v1, 0, 32");
VERIFY_RUN();
}
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
+
+ # BUG(v8:3457).
+ 'deserialize-reference': [SKIP],
}], # 'arch == mips64el'
['arch == mips64el and simulator_run == False', {