# Default arch variant for MIPS.
'mips_arch_variant%': 'r2',
+ # Possible values fp32, fp64, fpxx.
+ # fp32 - 32 32-bit FPU registers are available, doubles are placed in
+ # register pairs.
+ # fp64 - 32 64-bit FPU registers are available.
+ # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
+ # detection
+ 'mips_fpu_mode%': 'fp32',
+
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
+ ['mips_fpu_mode=="fp64"', {
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'cflags': ['-mfp32'],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'cflags!': ['-mfp32'],
+ 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'ldflags': [
+ '-mips32r6',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'cflags!': ['-mfp64'],
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
'__mips_soft_float=1'
],
}],
+ ['mips_arch_variant=="rx"', {
+ 'defines': ['_MIPS_ARCH_MIPS32RX',],
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS32R6', 'FPU_MODE_FP64',],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': ['FPU_MODE_FP64',],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': ['FPU_MODE_FP32',],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mips"
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
+ ['mips_fpu_mode=="fp64"', {
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'cflags': ['-mfp32'],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'cflags!': ['-mfp32'],
+ 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'ldflags': [
+ '-mips32r6',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'cflags!': ['-mfp64'],
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
- }],
+ }],
['mips_arch_variant=="loongson"', {
+ 'cflags!': ['-mfp64'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
'__mips_soft_float=1'
],
}],
+ ['mips_arch_variant=="rx"', {
+ 'defines': ['_MIPS_ARCH_MIPS32RX',],
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS32R6', 'FPU_MODE_FP64',],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': ['FPU_MODE_FP64',],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': ['FPU_MODE_FPXX',],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': ['FPU_MODE_FP32',],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': ['FPU_MODE_FP32',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
+ 'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mipsel"
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
- "ll %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "sc %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
+ "ll %0, 0(%4)\n" // prev = *ptr
+ "bne %0, %2, 2f\n" // if (prev != old_value) goto 2
+ "move %1, %3\n" // tmp = new_value
+ "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "=&r" (prev), "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value), "r" (ptr)
: "memory");
return prev;
}
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
+ ".set at\n"
"1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "ll %1, 0(%3)\n" // old = *ptr
+ "move %0, %2\n" // temp = new_value
+ "sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
+ : "=&r" (temp), "=&r" (old)
+ : "r" (new_value), "r" (ptr)
: "memory");
return old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "ll %0, 0(%3)\n" // temp = *ptr
+ "addu %1, %0, %2\n" // temp2 = temp + increment
+ "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
+ "addu %1, %0, %2\n" // temp2 = temp + increment
".set pop\n"
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
+ : "=&r" (temp), "=&r" (temp2)
+ : "Ir" (increment), "r" (ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
#endif // V8_HOST_ARCH_ARM
+#if V8_HOST_ARCH_MIPS
+int __detect_fp64_mode(void) {
+ double result = 0;
+ // Bit representation of (double)1 is 0x3FF0000000000000.
+ asm(
+ "lui $t0, 0x3FF0\n\t"
+ "ldc1 $f0, %0\n\t"
+ "mtc1 $t0, $f1\n\t"
+ "sdc1 $f0, %0\n\t"
+ : "+m" (result)
+ : : "t0", "$f0", "$f1", "memory");
+
+ return !(result == 1);
+}
+
+
+int __detect_mips_arch_revision(void) {
+ // TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
+ // kernel. Currently fail-back to the least common denominator which is
+ // mips32 revision 1.
+ return 1;
+}
+#endif
+
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo V8_FINAL {
public:
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
+#ifdef V8_HOST_ARCH_MIPS
+ is_fp64_mode_ = __detect_fp64_mode();
+ architecture_ = __detect_mips_arch_revision();
+#endif
#elif V8_HOST_ARCH_ARM64
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
+ // mips features
+ bool is_fp64_mode() const { return is_fp64_mode_; }
+
private:
char vendor_[13];
int stepping_;
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
+ bool is_fp64_mode_;
};
} } // namespace v8::base
MOVW_MOVT_IMMEDIATE_LOADS,
VFP32DREGS,
NEON,
- // MIPS
+ // MIPS, MIPS64
FPU,
+ FP64,
+ MIPSr1,
+ MIPSr2,
+ MIPSr6,
// ARM64
ALWAYS_ALIGN_CSP,
NUMBER_OF_CPU_FEATURES
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+ // FP64 mode is implied on r6.
+ supported_ |= 1u << FP64;
+#endif
+#if defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(FPU_MODE_FPXX)
+ if (cpu.is_fp64_mode()) supported_ |= 1u << FP64;
+#elif defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64;
+#endif
+#if defined(_MIPS_ARCH_MIPS32RX)
+ if (cpu.architecture() == 6) {
+ supported_ |= 1u << MIPSr6;
+ } else if (cpu.architecture() == 2) {
+ supported_ |= 1u << MIPSr1;
+ supported_ |= 1u << MIPSr2;
+ } else {
+ supported_ |= 1u << MIPSr1;
+ }
+#endif
#endif
}
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1EQZ) ||
+ (opcode == COP1 && rs_field == BC1NEZ);
}
bool Assembler::IsJr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ } else {
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
+ }
}
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
}
}
+int32_t Assembler::branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
}
+void Assembler::bgezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
void Assembler::bgezal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
}
+void Assembler::bgtzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
}
+void Assembler::blezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
void Assembler::bltzal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
}
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+ } else {
+ jalr(rs, zero_reg);
}
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ } else {
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ }
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
}
+void Assembler::div(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
+void Assembler::divu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
}
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+ // This instruction uses same opcode as 'lui'. The difference in encoding is
+ // 'lui' has zero reg. for rs field.
+ DCHECK(is_uint16(j));
+ GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+ }
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- DCHECK(kArchVariant != kLoongson);
+ DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ GenInstrImmediate(LW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ mthc1(at, fd);
+ } else {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ } else {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
}
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
}
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
}
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
// The following code is an optimization for the common case of Call()
// or Jump() which is load to register, and jump through register:
if (IsJalr(instr3)) {
// Try to convert JALR to JAL.
if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
patched_jump = true;
}
} else if (IsJr(instr3)) {
// Try to convert JR to J, skip returns (jr ra).
bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
patched_jump = true;
}
} else if (IsJal(instr3)) {
if (in_range) {
// We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
} else {
// Patch JAL, but out of range, revert to JALR.
// JALR rs reg is the rt reg specified in the ORI instruction.
} else if (IsJ(instr3)) {
if (in_range) {
// We are patching an already converted J (jump).
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
} else {
// Trying patch J, but out of range, just go back to JR.
// JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
}
patched_jump = true;
}
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
DCHECK(GetOpcodeField(instr1) == LUI);
DCHECK(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc + 2, sizeof(Address));
}
}
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
+// Used on mips32r6 for compare operations.
+#define kDoubleCompareReg f31
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
+ int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
DCHECK((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ int32_t shifted_branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t o = branch_offset_compact(L, jump_elimination_allowed);
+ DCHECK((o & 3) == 0); // Assert the offset is aligned.
+ return o >> 2;
+ }
uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
+ void bgezc(Register rt, int16_t offset);
+ void bgezc(Register rt, Label* L) {
+ bgezc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgeuc(Register rs, Register rt, int16_t offset);
+ void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgec(Register rs, Register rt, int16_t offset);
+ void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
void bgezal(Register rs, int16_t offset);
+ void bgezalc(Register rt, int16_t offset);
+ void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgezall(Register rs, int16_t offset);
+ void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L, false)>>2);
+ }
void bgtz(Register rs, int16_t offset);
+ void bgtzc(Register rt, int16_t offset);
+ void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, branch_offset_compact(L, false)>>2);
+ }
void blez(Register rs, int16_t offset);
+ void blezc(Register rt, int16_t offset);
+ void blezc(Register rt, Label* L) {
+ blezc(rt, branch_offset_compact(L, false)>>2);
+ }
void bltz(Register rs, int16_t offset);
+ void bltzc(Register rt, int16_t offset);
+ void bltzc(Register rt, Label* L) {
+ bltzc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltuc(Register rs, Register rt, int16_t offset);
+ void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltc(Register rs, Register rt, int16_t offset);
+ void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
void bltzal(Register rs, int16_t offset);
+ void blezalc(Register rt, int16_t offset);
+ void blezalc(Register rt, Label* L) {
+ blezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bltzalc(Register rt, int16_t offset);
+ void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bgtzalc(Register rt, int16_t offset);
+ void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzalc(Register rt, int16_t offset);
+ void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqc(Register rs, Register rt, int16_t offset);
+ void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void beqzc(Register rs, int32_t offset);
+ void beqzc(Register rs, Label* L) {
+ beqzc(rs, branch_offset21_compact(L, false)>>2);
+ }
+ void bnezalc(Register rt, int16_t offset);
+ void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnec(Register rs, Register rt, int16_t offset);
+ void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnezc(Register rt, int32_t offset);
+ void bnezc(Register rt, Label* L) {
+ bnezc(rt, branch_offset21_compact(L, false)>>2);
+ }
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
+ void bovc(Register rs, Register rt, int16_t offset);
+ void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
+ void bnvc(Register rs, Register rt, int16_t offset);
+ void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ }
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
void multu(Register rs, Register rt);
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
+ void div(Register rd, Register rs, Register rt);
+ void divu(Register rd, Register rs, Register rt);
+ void mod(Register rd, Register rs, Register rt);
+ void modu(Register rd, Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
+ void muh(Register rd, Register rs, Register rt);
+ void mulu(Register rd, Register rs, Register rt);
+ void muhu(Register rd, Register rs, Register rt);
void addiu(Register rd, Register rs, int32_t j);
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
+ void aui(Register rs, Register rt, int32_t j);
// Shifts.
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0);
+ void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs, uint8_t sel);
+ void seleqz(Register rs, Register rt, Register rd);
+ void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+ void selnez(Register rs, Register rt, Register rd);
+ void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs);
+
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void sdc1(FPURegister fs, const MemOperand& dst);
void mtc1(Register rt, FPURegister fs);
+ void mthc1(Register rt, FPURegister fs);
+
void mfc1(Register rt, FPURegister fs);
+ void mfhc1(Register rt, FPURegister fs);
void ctc1(Register rt, FPUControlRegister fs);
void cfc1(Register rt, FPUControlRegister fs);
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+ void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
- // Conditions and branches.
+ // Conditions and branches for MIPSr6.
+ void cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister ft, FPURegister fs);
+
+ void bc1eqz(int16_t offset, FPURegister ft);
+ void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(branch_offset(L, false)>>2, ft);
+ }
+ void bc1nez(int16_t offset, FPURegister ft);
+ void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(branch_offset(L, false)>>2, ft);
+ }
+
+ // Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
+ } else {
+ Label skip;
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+ __ mov(v0, t0); // Return LESS as result.
+
+ __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+ __ mov(v0, t2); // Return EQUAL as result.
+
+ __ mov(v0, t1); // Return GREATER as result.
+ __ bind(&skip);
+ }
__ Ret();
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
+ defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
size_t actual_size;
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
return kImmediateType;
default:
return kRegisterType;
case BLEZ:
case BGTZ:
case ADDI:
+ case DADDI:
case ADDIU:
case SLTI:
case SLTIU:
case BNEL:
case BLEZL:
case BGTZL:
+ case BEQZC:
+ case BNEZC:
case LB:
case LH:
case LWL:
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
-
+#include "src/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
- kMips32r2,
- kMips32r1,
+ kMips32r1 = v8::internal::MIPSr1,
+ kMips32r2 = v8::internal::MIPSr2,
+ kMips32r6 = v8::internal::MIPSr6,
kLoongson
};
#ifdef _MIPS_ARCH_MIPS32R2
static const ArchVariants kArchVariant = kMips32r2;
+#elif _MIPS_ARCH_MIPS32R6
+ static const ArchVariants kArchVariant = kMips32r6;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
+#elif _MIPS_ARCH_MIPS32RX
+// This flags referred to compatibility mode that creates universal code that
+// can run on any MIPS32 architecture revision. The dynamically generated code
+// by v8 is specialized for the MIPS host detected in runtime probing.
+ static const ArchVariants kArchVariant = kMips32r1;
#else
static const ArchVariants kArchVariant = kMips32r1;
#endif
#error Unknown endianness
#endif
+enum FpuMode {
+ kFP32,
+ kFP64,
+ kFPXX
+};
+
+#if defined(FPU_MODE_FP32)
+ static const FpuMode kFpuMode = kFP32;
+#elif defined(FPU_MODE_FP64)
+ static const FpuMode kFpuMode = kFP64;
+#elif defined(FPU_MODE_FPXX)
+ static const FpuMode kFpuMode = kFPXX;
+#else
+ static const FpuMode kFpuMode = kFP32;
+#endif
+
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
#error Unknown endianness
#endif
+#ifndef FPU_MODE_FPXX
+#define IsFp64Mode() \
+ (kFpuMode == kFP64)
+#else
+#define IsFp64Mode() \
+ (CpuFeatures::IsSupported(FP64))
+#endif
+
+#ifndef _MIPS_ARCH_MIPS32RX
+#define IsMipsArchVariant(check) \
+ (kArchVariant == check)
+#else
+#define IsMipsArchVariant(check) \
+ (CpuFeatures::IsSupported(check))
+#endif
+
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
+const int kImm21Shift = 0;
+const int kImm21Bits = 21;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
// In branches and jumps immediate fields point to words, not bytes,
// and are therefore shifted by 2.
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ BEQC = ((2 << 3) + 0) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+ DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ BEQZC = ((6 << 3) + 6) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ BNEZC = ((7 << 3) + 6) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift
};
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
+ CLZ_R6 = ((2 << 3) + 0),
+ CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2),
MULT = ((3 << 3) + 0),
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
+ SELEQZ_S = ((6 << 3) + 5),
TNE = ((6 << 3) + 6),
+ SELNEZ_S = ((6 << 3) + 7),
+
+ // Multiply integers in r6.
+ MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+
+ MUL_OP = ((0 << 3) + 2),
+ MUH_OP = ((0 << 3) + 3),
+ DIV_OP = ((0 << 3) + 2),
+ MOD_OP = ((0 << 3) + 3),
+
+ DIV_MOD = ((3 << 3) + 2),
+ DIV_MOD_U = ((3 << 3) + 3),
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
+ BGEZALL = ((2 << 3) + 3) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
+ BC1EQZ = ((2 << 2) + 1) << 21,
+ BC1NEZ = ((3 << 2) + 1) << 21,
+ // COP1 CMP positive predicates Bit 5..4 = 00.
+ CMP_AF = ((0 << 3) + 0),
+ CMP_UN = ((0 << 3) + 1),
+ CMP_EQ = ((0 << 3) + 2),
+ CMP_UEQ = ((0 << 3) + 3),
+ CMP_LT = ((0 << 3) + 4),
+ CMP_ULT = ((0 << 3) + 5),
+ CMP_LE = ((0 << 3) + 6),
+ CMP_ULE = ((0 << 3) + 7),
+ CMP_SAF = ((1 << 3) + 0),
+ CMP_SUN = ((1 << 3) + 1),
+ CMP_SEQ = ((1 << 3) + 2),
+ CMP_SUEQ = ((1 << 3) + 3),
+ CMP_SSLT = ((1 << 3) + 4),
+ CMP_SSULT = ((1 << 3) + 5),
+ CMP_SLE = ((1 << 3) + 6),
+ CMP_SULE = ((1 << 3) + 7),
+ // COP1 CMP negative predicates Bit 5..4 = 01.
+ CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2 << 3) + 1),
+ CMP_UNE = ((2 << 3) + 2),
+ CMP_NE = ((2 << 3) + 3),
+ CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3 << 3) + 1),
+ CMP_SUNE = ((3 << 3) + 2),
+ CMP_SNE = ((3 << 3) + 3),
+ CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2 << 3) + 0),
+ SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
+ inline int32_t Imm21Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+ }
+
inline int32_t Imm26Value() const {
DCHECK(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
+ void PrintXImm21(Instruction* instr);
void PrintXImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name.
}
+// Print 21-bit immediate value.
+void Decoder::PrintXImm21(Instruction* instr) {
+ uint32_t imm = instr->Imm21Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
// Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
PrintXImm16(instr);
}
return 6;
- } else {
+ } else if (format[3] == '2' && format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+ PrintXImm21(instr);
+ return 6;
+ } else if (format[3] == '2' && format[4] == '6') {
DCHECK(STRING_STARTS_WITH(format, "imm26x"));
PrintXImm26(instr);
return 6;
case CVT_W_D:
Format(instr, "cvt.w.d 'fd, 'fs");
break;
- case CVT_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_L_D:
+ Format(instr, "cvt.l.d 'fd, 'fs");
break;
- }
case TRUNC_W_D:
Format(instr, "trunc.w.d 'fd, 'fs");
break;
- case TRUNC_L_D: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "trunc.l.d 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case TRUNC_L_D:
+ Format(instr, "trunc.l.d 'fd, 'fs");
break;
- }
case ROUND_W_D:
Format(instr, "round.w.d 'fd, 'fs");
break;
break;
case L:
switch (instr->FunctionFieldRaw()) {
- case CVT_D_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.d.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_D_L:
+ Format(instr, "cvt.d.l 'fd, 'fs");
break;
- }
- case CVT_S_L: {
- if (kArchVariant == kMips32r2) {
- Format(instr, "cvt.s.l 'fd, 'fs");
- } else {
- Unknown(instr);
- }
+ case CVT_S_L:
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.d 'fd, 'fs, 'ft");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.d 'fd, 'fs, 'ft");
break;
- }
default:
UNREACHABLE();
}
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ if (instr->Bits(25, 16) == 0) {
+ Format(instr, "mfhi 'rd");
+ } else {
+ if ((instr->FunctionFieldRaw() == CLZ_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clz 'rd, 'rs");
+ } else if ((instr->FunctionFieldRaw() == CLO_R6)
+ && (instr->FdValue() == 1)) {
+ Format(instr, "clo 'rd, 'rs");
+ }
+ }
break;
case MFLO:
Format(instr, "mflo 'rd");
break;
- case MULT:
- Format(instr, "mult 'rs, 'rt");
+ case MULT: // @Mips32r6 == MUL_MUH.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "mult 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muh 'rd, 'rs, 'rt");
+ }
+ }
break;
- case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ case MULTU: // @Mips32r6 == MUL_MUH_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "multu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == MUL_OP) {
+ Format(instr, "mulu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "muhu 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DIV:
- Format(instr, "div 'rs, 'rt");
+ case DIV: // @Mips32r6 == DIV_MOD.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "div 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "div 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "mod 'rd, 'rs, 'rt");
+ }
+ }
break;
- case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ case DIVU: // @Mips32r6 == DIV_MOD_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "divu 'rs, 'rt");
+ } else {
+ if (instr->SaValue() == DIV_OP) {
+ Format(instr, "divu 'rd, 'rs, 'rt");
+ } else {
+ Format(instr, "modu 'rd, 'rs, 'rt");
+ }
+ }
break;
case ADD:
Format(instr, "add 'rd, 'rs, 'rt");
Format(instr, "movf 'rd, 'rs, 'bc");
}
break;
+ case SELEQZ_S:
+ Format(instr, "seleqz 'rd, 'rs, 'rt");
+ break;
+ case SELNEZ_S:
+ Format(instr, "selnez 'rd, 'rs, 'rt");
+ break;
default:
UNREACHABLE();
}
Format(instr, "mul 'rd, 'rs, 'rt");
break;
case CLZ:
- Format(instr, "clz 'rd, 'rs");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "clz 'rd, 'rs");
+ }
break;
default:
UNREACHABLE();
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
break;
}
case EXT: {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- // ------------- REGIMM class.
case COP1:
switch (instr->RsFieldRaw()) {
case BC1:
Format(instr, "bc1f 'bc, 'imm16u");
}
break;
+ case BC1EQZ:
+ Format(instr, "bc1eqz 'ft, 'imm16u");
+ break;
+ case BC1NEZ:
+ Format(instr, "bc1nez 'ft, 'imm16u");
+ break;
+ case W: // CMP.S instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.S 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case L: // CMP.D instruction.
+ switch (instr->FunctionValue()) {
+ case CMP_AF:
+ Format(instr, "cmp.af.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UN:
+ Format(instr, "cmp.un.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_EQ:
+ Format(instr, "cmp.eq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UEQ:
+ Format(instr, "cmp.ueq.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LT:
+ Format(instr, "cmp.lt.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULT:
+ Format(instr, "cmp.ult.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_LE:
+ Format(instr, "cmp.le.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_ULE:
+ Format(instr, "cmp.ule.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_OR:
+ Format(instr, "cmp.or.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_UNE:
+ Format(instr, "cmp.une.D 'ft, 'fs, 'fd");
+ break;
+ case CMP_NE:
+ Format(instr, "cmp.ne.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case S:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.S 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.S 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.S 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.S 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.S 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.S 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.S 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case D:
+ switch (instr->FunctionValue()) {
+ case SEL:
+ Format(instr, "sel.D 'ft, 'fs, 'fd");
+ break;
+ case SELEQZ_C:
+ Format(instr, "seleqz.D 'ft, 'fs, 'fd");
+ break;
+ case SELNEZ_C:
+ Format(instr, "selnez.D 'ft, 'fs, 'fd");
+ break;
+ case MIN:
+ Format(instr, "min.D 'ft, 'fs, 'fd");
+ break;
+ case MINA:
+ Format(instr, "mina.D 'ft, 'fs, 'fd");
+ break;
+ case MAX:
+ Format(instr, "max.D 'ft, 'fs, 'fd");
+ break;
+ case MAXA:
+ Format(instr, "maxa.D 'ft, 'fs, 'fd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
default:
UNREACHABLE();
}
+
break; // Case COP1.
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u");
break;
+ case BGEZALL:
+ Format(instr, "bgezall 'rs, 'imm16u");
+ break;
default:
UNREACHABLE();
}
Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "blez 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezalc 'rs, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezalc 'rs, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ if ((instr->RtFieldRaw() == 0)
+ && (instr->RsFieldRaw() != 0)) {
+ Format(instr, "bgtz 'rs, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltuc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzalc 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzalc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BLEZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgezc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgec 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "blezc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BGTZL:
+ if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltzc 'rt, 'imm16u");
+ } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+ && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bltc 'rs, 'rt, 'imm16u");
+ } else if ((instr->RsFieldRaw() == 0)
+ && (instr->RtFieldRaw() != 0)) {
+ Format(instr, "bgtzc 'rt, 'imm16u");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case BEQZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "beqzc 'rs, 'imm21x");
+ }
+ break;
+ case BNEZC:
+ if (instr->RsFieldRaw() != 0) {
+ Format(instr, "bnezc 'rs, 'imm21x");
+ }
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
+ } else {
+ // Check if BOVC or BEQC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bovc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
+ break;
+ case DADDI:
+ if (IsMipsArchVariant(kMips32r6)) {
+ // Check if BNVC or BNEC instruction.
+ if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+ Format(instr, "bnvc 'rs, 'rt, 'imm16s");
+ } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s");
+ } else {
+ UNREACHABLE();
+ }
+ }
break;
case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s");
Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ if (!IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "lui 'rt, 'imm16x");
+ } else {
+ if (instr->RsValue() != 0) {
+ Format(instr, "aui 'rt, 'imm16x");
+ } else {
+ Format(instr, "lui 'rt, 'imm16x");
+ }
+ }
break;
// ------------- Memory instructions.
case LB:
Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
+ printf("a 0x%x \n", instr->OpcodeFieldRaw());
UNREACHABLE();
break;
}
break;
case Token::MUL: {
__ SmiUntag(scratch1, right);
- __ Mult(left, scratch1);
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
+ __ Mul(scratch2, v0, left, scratch1);
+ __ sra(scratch1, v0, 31);
__ Branch(&stub_call, ne, scratch1, Operand(scratch2));
- __ mflo(v0);
__ Branch(&done, ne, v0, Operand(zero_reg));
__ Addu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
// smi but the other values are, so the result is a smi.
__ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
- __ Mult(array_length, scratch1);
+ __ Mul(scratch3, scratch2, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ mfhi(scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
- __ mflo(scratch2);
+ __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ And(scratch3, scratch2, Operand(0x80000000));
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
const Register result_reg = ToRegister(instr->result());
// div runs in the background while we check for special cases.
- __ div(left_reg, right_reg);
+ __ Mod(result_reg, left_reg, right_reg);
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
}
// If we care about -0, test if the dividend is <0 and the result is 0.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+ __ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
+ Register remainder = ToRegister(instr->temp());
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(dividend, divisor);
+ __ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- __ mfhi(result);
- DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
- __ mflo(result);
- } else {
- __ mflo(result);
+ DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
}
}
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
-
+ Register remainder = scratch0();
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(dividend, divisor);
+ __ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
// We performed a truncating division. Correct the result if necessary.
Label done;
- Register remainder = scratch0();
- __ mfhi(remainder);
- __ mflo(result);
__ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
__ Xor(remainder, remainder, Operand(divisor));
__ Branch(&done, ge, remainder, Operand(zero_reg));
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
- __ mult(result, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Mul(scratch, result, result, right);
} else {
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ __ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
// Test for -0.
Label done;
__ Branch(&done, ne, result, Operand(zero_reg));
- __ mfc1(scratch1, input.high());
+ __ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
Label done, check_sign_on_zero;
// Extract exponent bits.
- __ mfc1(result, input.high());
+ __ Mfhc1(result, input);
__ Ext(scratch,
result,
HeapNumber::kExponentShift,
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
- __ mfc1(result, double_scratch0().high());
+ __ Mfhc1(result, double_scratch0());
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
__ bind(&check_sign_on_zero);
- __ mfc1(scratch, input.high());
+ __ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
}
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
+ __ Mfhc1(scratch, result_reg);
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_scratch.high());
+ __ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_input.high());
+ __ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfc1(scratch1, double_input.high());
+ __ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = TempRegister();
LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
HAdd* add = HAdd::cast(instr->uses().value());
if (instr == add->left()) {
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->left()->IsMul())
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* dividend, LOperand* divisor) {
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
inputs_[1] = divisor;
+ temps_[0] = temp;
}
LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, rt.rm());
mflo(rd);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, at);
mflo(rd);
} else {
}
+void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ muh(rd_hi, rs, rt.rm());
+ mul(rd_lo, rs, rt.rm());
+ } else {
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ mul(rd_lo, rs, rt.rm());
+ muh(rd_hi, rs, rt.rm());
+ }
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ muh(rd_hi, rs, at);
+ mul(rd_lo, rs, at);
+ } else {
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ mul(rd_lo, rs, at);
+ muh(rd_hi, rs, at);
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ muh(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mfhi(rd);
+ } else {
+ muh(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
}
+void MacroAssembler::Div(Register rem, Register res,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, rt.rm());
+ mod(rem, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, at);
+ mod(rem, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ mod(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mfhi(rd);
+ } else {
+ mod(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
lw(zero_reg, rs);
} else {
pref(hint, rs);
DCHECK(pos < 32);
DCHECK(pos + size < 33);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
DCHECK(pos + size <= 32);
DCHECK(size != 0);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
} else {
DCHECK(!rt.is(t8) && !rs.is(t8));
// Load 2^31 into f20 as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Add it to fd.
add_d(fd, fd, scratch);
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
trunc_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
trunc_w_d(fd, fs);
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
round_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
round_w_d(fd, fs);
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
floor_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
floor_w_d(fd, fs);
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
ceil_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
ceil_w_d(fd, fs);
}
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
}
+void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mthc1(rt, fs);
+ } else {
+ mtc1(rt, fs.high());
+ }
+}
+
+
+void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mfhc1(rt, fs);
+ } else {
+ mfc1(rt, fs.high());
+ }
+}
+
+
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case lt:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- case nue:
- c(UEQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ } else {
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium register allocator.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ }
+ }
+
+ if (!IsMipsArchVariant(kMips32r6)) {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+ } else {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ // Use kDoubleCompareReg for comparison result, it is
+ // valid in fp64 (FR = 1) mode which is implied for mips32r6.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ switch (cc) {
+ case lt:
+ cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case gt:
+ cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case ge:
+ cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case le:
+ cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case eq:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ueq:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ne:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case nue:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ default:
+ CHECK(0);
+ }
}
}
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
- mtc1(at, dst.high());
+ Mthc1(at, dst);
} else {
- mtc1(zero_reg, dst.high());
+ Mthc1(zero_reg, dst);
}
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
DCHECK(cc == 0);
void MacroAssembler::Clz(Register rd, Register rs) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
default:
UNREACHABLE();
}
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
li(r2, rt);
}
- {
+ if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
bltzal(scratch, offset);
break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
default:
UNREACHABLE();
}
}
+
// Check that offset could actually hold on an int16_t.
DCHECK(is_int16(offset));
DCHECK(!result.is(at));
MultiplierAndShift ms(divisor);
li(at, Operand(ms.multiplier()));
- Mult(dividend, Operand(at));
- mfhi(result);
+ Mulh(result, dividend, Operand(at));
if (divisor > 0 && ms.multiplier() < 0) {
Addu(result, result, Operand(dividend));
}
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ Mfhc1(dst_high, src);
}
inline void FmoveHigh(Register dst_high, FPURegister src) {
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ Mfhc1(dst_high, src);
}
inline void FmoveLow(Register dst_low, FPURegister src) {
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
- mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+ Mthc1(src_high, dst);
}
// Conditional move.
instr(rs, Operand(j)); \
}
+#define DEFINE_INSTRUCTION3(instr) \
+ void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
+ void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
+ instr(rd_hi, rd_lo, rs, Operand(rt)); \
+ } \
+ void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
+ instr(rd_hi, rd_lo, rs, Operand(j)); \
+ }
+
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
+ DEFINE_INSTRUCTION3(Div);
+ DEFINE_INSTRUCTION3(Mul);
+
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+ // FP32 mode: Move the general purpose register into
+ // the high part of the double-register pair.
+ // FP64 mode: Move the general-purpose register into
+ // the higher 32 bits of the 64-bit coprocessor register,
+ // while leaving the low bits unchanged.
+ void Mthc1(Register rt, FPURegister fs);
+
+ // FP32 mode: move the high part of the double-register pair into
+ // general purpose register.
+ // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
+ // general-purpose register.
+ void Mfhc1(Register rt, FPURegister fs);
+
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
- int32_t GetFPURegisterValueInt(int regnum);
- int64_t GetFPURegisterValueLong(int regnum);
+ int32_t GetFPURegisterValue32(int regnum);
+ int64_t GetFPURegisterValue64(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
bool GetValue(const char* desc, int32_t* value);
+ bool GetValue(const char* desc, int64_t* value);
// Set or delete a breakpoint. Returns true if successful.
bool SetBreakpoint(Instruction* breakpc);
}
-int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+int32_t MipsDebugger::GetFPURegisterValue32(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
} else {
- return sim_->get_fpu_register(regnum);
+ return sim_->get_fpu_register_word(regnum);
}
}
-int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+int64_t MipsDebugger::GetFPURegisterValue64(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
} else {
- return sim_->get_fpu_register_long(regnum);
+ return sim_->get_fpu_register(regnum);
}
}
*value = GetRegisterValue(regnum);
return true;
} else if (fpuregnum != kInvalidFPURegister) {
- *value = GetFPURegisterValueInt(fpuregnum);
+ *value = GetFPURegisterValue32(fpuregnum);
return true;
} else if (strncmp(desc, "0x", 2) == 0) {
return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
}
+bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue64(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return false;
+}
+
+
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
- GetFPURegisterValueInt(n+1), \
- GetFPURegisterValueInt(n), \
- GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO32(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+ GetFPURegisterValue32(n+1), \
+ GetFPURegisterValue32(n), \
+ GetFPURegisterValueDouble(n)
+
+#define FPU_REG_INFO64(n) FPURegisters::Name(n), \
+ GetFPURegisterValue64(n), \
+ GetFPURegisterValueDouble(n)
PrintAllRegs();
PrintF("\n\n");
// f0, f1, f2, ... f31.
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+ // This must be a compile-time switch,
+ // compiler will throw out warnings otherwise.
+ if (kFpuMode == kFP64) {
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(10));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(11));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(12));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(13));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(14));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(15));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(16));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(17));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(18));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(19));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(20));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(21));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(22));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(23));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(24));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(25));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(26));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(27));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(28));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(29));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(30));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(31));
+ } else {
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(10));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(12));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(14));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(16));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(18));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(20));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(22));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(24));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(26));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(28));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(30));
+ }
#undef REG_INFO
-#undef FPU_REG_INFO
+#undef FPU_REG_INFO32
+#undef FPU_REG_INFO64
}
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (argc == 2) {
- int32_t value;
- float fvalue;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
} else if (strcmp(arg1, "allf") == 0) {
int fpuregnum = FPURegisters::Number(arg1);
if (regnum != kInvalidRegister) {
+ int32_t value;
value = GetRegisterValue(regnum);
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (fpuregnum != kInvalidFPURegister) {
- if (fpuregnum % 2 == 1) {
- value = GetFPURegisterValueInt(fpuregnum);
- fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ if (IsFp64Mode()) {
+ int64_t value;
+ double dvalue;
+ value = GetFPURegisterValue64(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016llx %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
} else {
- double dfvalue;
- int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
- int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
- dfvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
- FPURegisters::Name(fpuregnum+1),
- FPURegisters::Name(fpuregnum),
- lvalue1,
- lvalue2,
- dfvalue);
+ if (fpuregnum % 2 == 1) {
+ int32_t value;
+ float fvalue;
+ value = GetFPURegisterValue32(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ double dfvalue;
+ int32_t lvalue1 = GetFPURegisterValue32(fpuregnum);
+ int32_t lvalue2 = GetFPURegisterValue32(fpuregnum + 1);
+ dfvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+ FPURegisters::Name(fpuregnum+1),
+ FPURegisters::Name(fpuregnum),
+ lvalue1,
+ lvalue2,
+ dfvalue);
+ }
}
} else {
PrintF("%s unrecognized\n", arg1);
int fpuregnum = FPURegisters::Number(arg1);
if (fpuregnum != kInvalidFPURegister) {
- value = GetFPURegisterValueInt(fpuregnum);
+ value = GetFPURegisterValue32(fpuregnum);
fvalue = GetFPURegisterValueFloat(fpuregnum);
PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
} else {
next_arg++;
}
- int32_t words;
- if (argc == next_arg) {
- words = 10;
+ // TODO(palfia): optimize this.
+ if (IsFp64Mode()) {
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
} else {
- if (!GetValue(argv[next_arg], &words)) {
+ int32_t words;
+ if (argc == next_arg) {
words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
}
+ end = cur + words;
}
- end = cur + words;
while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d",
}
-void Simulator::set_fpu_register(int fpureg, int32_t value) {
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ // TODO(plind): big endian issue.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ *pword = value;
+}
+
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ // TODO(plind): big endian issue.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ *phiword = value;
+}
+
+
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
*BitCast<float*>(&FPUregisters_[fpureg]) = value;
void Simulator::set_fpu_register_double(int fpureg, double value) {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ if (IsFp64Mode()) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ } else {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ int64_t i64 = BitCast<int64_t>(value);
+ set_fpu_register_word(fpureg, i64 & 0xffffffff);
+ set_fpu_register_word(fpureg + 1, i64 >> 32);
+ }
}
double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
double dm_val = 0.0;
}
-int32_t Simulator::get_fpu_register(int fpureg) const {
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
-int64_t Simulator::get_fpu_register_long(int fpureg) const {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<int64_t*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
}
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return *BitCast<float*>(
- const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ const_cast<int64_t*>(&FPUregisters_[fpureg]));
}
double Simulator::get_fpu_register_double(int fpureg) const {
- DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ if (IsFp64Mode()) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<double*>(&FPUregisters_[fpureg]);
+ } else {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ int64_t i64;
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
+ i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
+ return BitCast<double>(i64);
+ }
}
*y = get_fpu_register_double(14);
*z = get_register(a2);
} else {
+ // TODO(plind): bad ABI stuff, refactor or remove.
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
if (!std::isfinite(original) || !std::isfinite(rounded)) {
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
ret = true;
}
- if (rounded > INT_MAX || rounded < INT_MIN) {
+ if (rounded > max_int32 || rounded < min_int32) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_fpu_register(f14);
- arg3 = get_fpu_register(f15);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_hi_word(f14);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_word(f15);
+ }
break;
case ExternalReference::BUILTIN_FP_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
arg2 = get_register(a2);
break;
default:
switch (op) {
case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // Handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- break;
case CFC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg == kFCSRRegister);
*alu_out = FCSR_;
break;
case MFC1:
- *alu_out = get_fpu_register(fs_reg);
+ *alu_out = get_fpu_register_word(fs_reg);
break;
case MFHC1:
- UNIMPLEMENTED_MIPS();
+ *alu_out = get_fpu_register_hi_word(fs_reg);
break;
case CTC1:
case MTC1:
case MTHC1:
- // Do the store in the execution step.
- break;
case S:
case D:
case W:
// Do everything in the execution step.
break;
default:
- UNIMPLEMENTED_MIPS();
+ // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
+ UNREACHABLE();
}
break;
case COP1X:
case SRAV:
*alu_out = rt >> rs;
break;
- case MFHI:
- *alu_out = get_register(HI);
+ case MFHI: // MFHI == CLZ on R6.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ DCHECK(instr->SaValue() == 0);
+ *alu_out = get_register(HI);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ DCHECK(instr->SaValue() == 1);
+ *alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+ }
break;
case MFLO:
*alu_out = get_register(LO);
break;
- case MULT:
- *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ case MULT: // MULT == MUL_MUH.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ case MUH_OP:
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
- case MULTU:
- *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ case MULTU: // MULTU == MUL_MUH_U.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ *u64hilo = static_cast<uint64_t>(rs_u) *
+ static_cast<uint64_t>(rt_u);
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ case MUH_OP:
+ *u64hilo = static_cast<uint64_t>(rs_u) *
+ static_cast<uint64_t>(rt_u);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case ADD:
if (HaveSameSign(rs, rt)) {
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- UNREACHABLE();
- break;
case CFC1:
set_register(rt_reg, alu_out);
+ break;
case MFC1:
set_register(rt_reg, alu_out);
break;
case MFHC1:
- UNIMPLEMENTED_MIPS();
+ set_register(rt_reg, alu_out);
break;
case CTC1:
// At the moment only FCSR is supported.
FCSR_ = registers_[rt_reg];
break;
case MTC1:
- FPUregisters_[fs_reg] = registers_[rt_reg];
+ // Hardware writes upper 32-bits to zero on mtc1.
+ set_fpu_register_hi_word(fs_reg, 0);
+ set_fpu_register_word(fs_reg, registers_[rt_reg]);
break;
case MTHC1:
- UNIMPLEMENTED_MIPS();
+ set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
break;
case S:
float f;
f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f));
break;
- case CVT_W_S:
- case CVT_L_S:
- case TRUNC_W_S:
- case TRUNC_L_S:
- case ROUND_W_S:
- case ROUND_L_S:
- case FLOOR_W_S:
- case FLOOR_L_S:
- case CEIL_W_S:
- case CEIL_L_S:
- case CVT_PS_S:
- UNIMPLEMENTED_MIPS();
- break;
default:
+ // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
break;
// round to the even one.
result--;
}
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register_word(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
double rounded =
fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
}
case FLOOR_L_D: // Mips32r2 instruction.
i64 = static_cast<int64_t>(std::floor(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
case CEIL_L_D: // Mips32r2 instruction.
i64 = static_cast<int64_t>(std::ceil(fs));
- set_fpu_register(fd_reg, i64 & 0xffffffff);
- set_fpu_register(fd_reg + 1, i64 >> 32);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ } else {
+ set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+ set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ }
break;
case C_F_D:
UNIMPLEMENTED_MIPS();
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register(fs_reg);
+ alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register(fs_reg);
+ alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default:
+ default: // Mips64r6 CMP.S instructions unimplemented.
UNREACHABLE();
}
break;
case L:
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
- i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
+ if (IsFp64Mode()) {
+ i64 = get_fpu_register(fs_reg);
+ } else {
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
+ i64 |= static_cast<int64_t>(
+ get_fpu_register_word(fs_reg + 1)) << 32;
+ }
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
UNIMPLEMENTED_MIPS();
break;
- default:
+ case CMP_AF: // Mips64r6 CMP.D instructions.
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED.
UNREACHABLE();
}
break;
- case PS:
- break;
default:
UNREACHABLE();
}
}
// Instructions using HI and LO registers.
case MULT:
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg,
+ static_cast<int32_t>(i64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case MULTU:
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ } else {
+ switch (instr->SaValue()) {
+ case MUL_OP:
+ set_register(rd_reg,
+ static_cast<int32_t>(u64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0
- // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
- // return INT_MIN which is what the hardware does.
- if (rs == INT_MIN && rt == -1) {
- set_register(LO, INT_MIN);
- set_register(HI, 0);
- } else if (rt != 0) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (instr->SaValue()) {
+ case DIV_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, INT_MIN);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs / rt);
+ }
+ break;
+ case MOD_OP:
+ if (rs == INT_MIN && rt == -1) {
+ set_register(rd_reg, 0);
+ } else if (rt != 0) {
+ set_register(rd_reg, rs % rt);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ // Divide by zero and overflow was not checked in the
+ // configuration step - div and divu do not raise exceptions. On
+ // division by 0 the result will be UNPREDICTABLE. On overflow
+ // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
}
break;
case DIVU:
- if (rt_u != 0) {
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (instr->SaValue()) {
+ case DIV_OP:
+ if (rt_u != 0) {
+ set_register(rd_reg, rs_u / rt_u);
+ }
+ break;
+ case MOD_OP:
+ if (rt_u != 0) {
+ set_register(rd_reg, rs_u % rt_u);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
}
break;
// Break and trap instructions.
int16_t imm16 = instr->Imm16Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
+ int64_t ft;
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
next_pc = current_pc + kBranchReturnOffset;
}
break;
+ case BC1EQZ:
+ ft = get_fpu_register(ft_reg);
+ do_branch = (ft & 0x1) ? false : true;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ case BC1NEZ:
+ ft = get_fpu_register(ft_reg);
+ do_branch = (ft & 0x1) ? true : false;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
default:
UNREACHABLE();
}
WriteW(addr, mem_value, instr);
break;
case LWC1:
- set_fpu_register(ft_reg, alu_out);
+ set_fpu_register_hi_word(ft_reg, 0);
+ set_fpu_register_word(ft_reg, alu_out);
break;
case LDC1:
set_fpu_register_double(ft_reg, fp_out);
break;
case SWC1:
addr = rs + se_imm16;
- WriteW(addr, get_fpu_register(ft_reg), instr);
+ WriteW(addr, get_fpu_register_word(ft_reg), instr);
break;
case SDC1:
addr = rs + se_imm16;
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
- void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
- int32_t get_fpu_register(int fpureg) const;
- int64_t get_fpu_register_long(int fpureg) const;
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
- int32_t FPUregisters_[kNumFPURegisters];
+ // Note: FP32 mode uses only the lower 32-bit part of each element,
+ // the upper 32-bit is unpredictable.
+ int64_t FPUregisters_[kNumFPURegisters];
// FPU control register.
uint32_t FCSR_;
__ Branch(&error, ne, v0, Operand(0x1));
__ nop();
__ sltu(v0, t7, t3);
- __ Branch(&error, ne, v0, Operand(0x0));
+ __ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
// End of SPECIAL class.
__ slti(v0, t1, 0x00002000); // 0x1
__ slti(v0, v0, 0xffff8000); // 0x0
- __ Branch(&error, ne, v0, Operand(0x0));
+ __ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
__ sltiu(v0, v0, 0x00008000); // 0x1
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
// g = sqrt(f) = 10.97451593465515908537
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
__ madd_d(f14, f6, f4, f6);
CHECK_EQ(1.8066e16, t.e);
CHECK_EQ(120.44, t.f);
CHECK_EQ(10.97451593465515908537, t.g);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
CHECK_EQ(6.875, t.h);
}
}
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3.
- __ mfc1(t0, f4);
- __ mfc1(t1, f5);
- __ mfc1(t2, f6);
- __ mfc1(t3, f7);
-
- __ mtc1(t0, f6);
- __ mtc1(t1, f7);
- __ mtc1(t2, f4);
- __ mtc1(t3, f5);
-
+ if (!IsFp64Mode()) {
+ __ mfc1(t0, f4);
+ __ mfc1(t1, f5);
+ __ mfc1(t2, f6);
+ __ mfc1(t3, f7);
+
+ __ mtc1(t0, f6);
+ __ mtc1(t1, f7);
+ __ mtc1(t2, f4);
+ __ mtc1(t3, f5);
+ } else {
+ DCHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
+ __ mfc1(t0, f4);
+ __ mfhc1(t1, f4);
+ __ mfc1(t2, f6);
+ __ mfhc1(t3, f6);
+
+ __ mtc1(t0, f6);
+ __ mthc1(t1, f6);
+ __ mtc1(t2, f4);
+ __ mthc1(t3, f4);
+ }
// Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ if (!IsMipsArchVariant(kMips32r6)) {
__ c(UN, D, f4, f6);
__ bc1f(&neither_is_nan);
+ } else {
+ __ cmp(UN, L, f2, f4, f6);
+ __ bc1eqz(&neither_is_nan, f2);
+ }
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
__ bind(&neither_is_nan);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
__ c(OLT, D, f6, f4);
__ bc1t(&less_than);
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp(OLT, L, f2, f6, f4);
+ __ bc1nez(&less_than, f2);
} else {
__ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2);
}
+
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
MacroAssembler assm(isolate, NULL, 0);
Label exit, exit2, exit3;
- __ Branch(&exit, ge, a0, Operand(0x00000000));
+ __ Branch(&exit, ge, a0, Operand(zero_reg));
__ Branch(&exit2, ge, a0, Operand(0x00001FFF));
__ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (kArchVariant == kMips32r2) {
- // Load all structure elements to registers.
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
-
- // Save the raw bits of the double.
- __ mfc1(t0, f0);
- __ mfc1(t1, f1);
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
-
- // Convert double in f0 to long, save hi/lo parts.
- __ cvt_w_d(f0, f0);
- __ mfc1(t0, f0); // f0 has a 32-bits word.
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
-
- // Convert the b long integers to double b.
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
- __ mtc1(t0, f8); // f8 has a 32-bits word.
- __ cvt_d_w(f10, f8);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
-
- __ jr(ra);
- __ nop();
-
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
- t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
-
- CHECK_EQ(0x41DFFFFF, t.dbl_exp);
- CHECK_EQ(0xFF800000, t.dbl_mant);
- CHECK_EQ(0X7FFFFFFE, t.word);
- // 0x0FF00FF0 -> 2.6739096+e08
- CHECK_EQ(2.6739096e08, t.b);
- }
+ if (!IsMipsArchVariant(kMips32r2)) return;
+
+ // Load all structure elements to registers.
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
+
+ // Save the raw bits of the double.
+ __ mfc1(t0, f0);
+ __ mfc1(t1, f1);
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+
+ // Convert double in f0 to long, save hi/lo parts.
+ __ cvt_w_d(f0, f0);
+ __ mfc1(t0, f0); // f0 has a 32-bits word.
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
+
+ // Convert the b long integers to double b.
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
+ __ mtc1(t0, f8); // f8 has a 32-bits word.
+ __ cvt_d_w(f10, f8);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
+ t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0x41DFFFFF, t.dbl_exp);
+ CHECK_EQ(0xFF800000, t.dbl_mant);
+ CHECK_EQ(0X7FFFFFFE, t.word);
+ // 0x0FF00FF0 -> 2.6739096+e08
+ CHECK_EQ(2.6739096e08, t.b);
}
TEST(MIPS11) {
+ // Do not run test on MIPS32r6, as these instructions are removed.
+ if (IsMipsArchVariant(kMips32r6)) return;
// Test LWL, LWR, SWL and SWR instructions.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
COMPARE(subu(v0, v1, s0),
"00701023 subu v0, v1, s0");
- COMPARE(mult(a0, a1),
- "00850018 mult a0, a1");
- COMPARE(mult(t2, t3),
- "014b0018 mult t2, t3");
- COMPARE(mult(v0, v1),
- "00430018 mult v0, v1");
-
- COMPARE(multu(a0, a1),
- "00850019 multu a0, a1");
- COMPARE(multu(t2, t3),
- "014b0019 multu t2, t3");
- COMPARE(multu(v0, v1),
- "00430019 multu v0, v1");
-
- COMPARE(div(a0, a1),
- "0085001a div a0, a1");
- COMPARE(div(t2, t3),
- "014b001a div t2, t3");
- COMPARE(div(v0, v1),
- "0043001a div v0, v1");
-
- COMPARE(divu(a0, a1),
- "0085001b divu a0, a1");
- COMPARE(divu(t2, t3),
- "014b001b divu t2, t3");
- COMPARE(divu(v0, v1),
- "0043001b divu v0, v1");
-
- if (kArchVariant != kLoongson) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ COMPARE(mult(a0, a1),
+ "00850018 mult a0, a1");
+ COMPARE(mult(t2, t3),
+ "014b0018 mult t2, t3");
+ COMPARE(mult(v0, v1),
+ "00430018 mult v0, v1");
+
+ COMPARE(multu(a0, a1),
+ "00850019 multu a0, a1");
+ COMPARE(multu(t2, t3),
+ "014b0019 multu t2, t3");
+ COMPARE(multu(v0, v1),
+ "00430019 multu v0, v1");
+
+ COMPARE(div(a0, a1),
+ "0085001a div a0, a1");
+ COMPARE(div(t2, t3),
+ "014b001a div t2, t3");
+ COMPARE(div(v0, v1),
+ "0043001a div v0, v1");
+
+ COMPARE(divu(a0, a1),
+ "0085001b divu a0, a1");
+ COMPARE(divu(t2, t3),
+ "014b001b divu t2, t3");
+ COMPARE(divu(v0, v1),
+ "0043001b divu v0, v1");
+
+ if (!IsMipsArchVariant(kLoongson)) {
+ COMPARE(mul(a0, a1, a2),
+ "70a62002 mul a0, a1, a2");
+ COMPARE(mul(t2, t3, t4),
+ "716c5002 mul t2, t3, t4");
+ COMPARE(mul(v0, v1, s0),
+ "70701002 mul v0, v1, s0");
+ }
+ } else { // MIPS32r6.
COMPARE(mul(a0, a1, a2),
- "70a62002 mul a0, a1, a2");
- COMPARE(mul(t2, t3, t4),
- "716c5002 mul t2, t3, t4");
- COMPARE(mul(v0, v1, s0),
- "70701002 mul v0, v1, s0");
+ "00a62098 mul a0, a1, a2");
+ COMPARE(muh(a0, a1, a2),
+ "00a620d8 muh a0, a1, a2");
+ COMPARE(mul(t1, t2, t3),
+ "014b4898 mul t1, t2, t3");
+ COMPARE(muh(t1, t2, t3),
+ "014b48d8 muh t1, t2, t3");
+ COMPARE(mul(v0, v1, a0),
+ "00641098 mul v0, v1, a0");
+ COMPARE(muh(v0, v1, a0),
+ "006410d8 muh v0, v1, a0");
+
+ COMPARE(mulu(a0, a1, a2),
+ "00a62099 mulu a0, a1, a2");
+ COMPARE(muhu(a0, a1, a2),
+ "00a620d9 muhu a0, a1, a2");
+ COMPARE(mulu(t1, t2, t3),
+ "014b4899 mulu t1, t2, t3");
+ COMPARE(muhu(t1, t2, t3),
+ "014b48d9 muhu t1, t2, t3");
+ COMPARE(mulu(v0, v1, a0),
+ "00641099 mulu v0, v1, a0");
+ COMPARE(muhu(v0, v1, a0),
+ "006410d9 muhu v0, v1, a0");
+
+ COMPARE(div(a0, a1, a2),
+ "00a6209a div a0, a1, a2");
+ COMPARE(mod(a0, a1, a2),
+ "00a620da mod a0, a1, a2");
+ COMPARE(div(t1, t2, t3),
+ "014b489a div t1, t2, t3");
+ COMPARE(mod(t1, t2, t3),
+ "014b48da mod t1, t2, t3");
+ COMPARE(div(v0, v1, a0),
+ "0064109a div v0, v1, a0");
+ COMPARE(mod(v0, v1, a0),
+ "006410da mod v0, v1, a0");
+
+ COMPARE(divu(a0, a1, a2),
+ "00a6209b divu a0, a1, a2");
+ COMPARE(modu(a0, a1, a2),
+ "00a620db modu a0, a1, a2");
+ COMPARE(divu(t1, t2, t3),
+ "014b489b divu t1, t2, t3");
+ COMPARE(modu(t1, t2, t3),
+ "014b48db modu t1, t2, t3");
+ COMPARE(divu(v0, v1, a0),
+ "0064109b divu v0, v1, a0");
+ COMPARE(modu(v0, v1, a0),
+ "006410db modu v0, v1, a0");
+
+ COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
+ "20840000 bovc a0, a0, 0");
+ COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
+ "20a40000 bovc a1, a0, 0");
+ COMPARE(bovc(a1, a0, 32767),
+ "20a47fff bovc a1, a0, 32767");
+ COMPARE(bovc(a1, a0, -32768),
+ "20a48000 bovc a1, a0, -32768");
+
+ COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
+ "60840000 bnvc a0, a0, 0");
+ COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
+ "60a40000 bnvc a1, a0, 0");
+ COMPARE(bnvc(a1, a0, 32767),
+ "60a47fff bnvc a1, a0, 32767");
+ COMPARE(bnvc(a1, a0, -32768),
+ "60a48000 bnvc a1, a0, -32768");
+
+ COMPARE(beqzc(a0, 0),
+ "d8800000 beqzc a0, 0x0");
+ COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "d88fffff beqzc a0, 0xfffff");
+ COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576.
+ "d8900000 beqzc a0, 0x100000");
+
+ COMPARE(bnezc(a0, 0),
+ "f8800000 bnezc a0, 0x0");
+ COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
+ "f88fffff bnezc a0, 0xfffff");
+ COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576.
+ "f8900000 bnezc a0, 0x100000");
}
COMPARE(addiu(a0, a1, 0x0),
COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp");
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8),
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
- if (kArchVariant != kLoongson) {
+ if (!IsMipsArchVariant(kLoongson)) {
COMPARE(movz(a0, a1, a2),
"00a6200a movz a0, a1, a2");
COMPARE(movz(s0, s1, s2),
COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
- COMPARE(clz(a0, a1),
- "70a42020 clz a0, a1");
- COMPARE(clz(s6, s7),
- "72f6b020 clz s6, s7");
- COMPARE(clz(v0, v1),
- "70621020 clz v0, v1");
+ if (IsMipsArchVariant(kMips32r6)) {
+ COMPARE(clz(a0, a1),
+ "00a02050 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "02e0b050 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "00601050 clz v0, v1");
+ } else {
+ COMPARE(clz(a0, a1),
+ "70a42020 clz a0, a1");
+ COMPARE(clz(s6, s7),
+ "72f6b020 clz s6, s7");
+ COMPARE(clz(v0, v1),
+ "70621020 clz v0, v1");
+ }
}
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2)) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),