}
+void Assembler::bitswap(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips32r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BITSWAP);
+}
+
+
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
}
+void Assembler::class_s(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
+}
+
+
+void Assembler::class_d(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
+}
+
+
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
}
+void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, W, fd, fs, ft);
+}
+
+void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, L, fd, fs, ft);
+}
+
+
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
+ DCHECK(fmt == S || fmt == D);
DCHECK((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
}
+void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, S, fs, ft, cc);
+}
+
+
+void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, D, fs, ft, cc);
+}
+
+
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
DCHECK(src2 == 0.0);
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void bitswap(Register rd, Register rt);
// --------Coprocessor-instructions----------------
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void class_s(FPURegister fd, FPURegister fs);
+ void class_d(FPURegister fd, FPURegister fs);
+
void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
+ void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
switch (FunctionFieldRaw()) {
case INS:
case EXT:
+ case BITSWAP:
return kRegisterType;
default:
return kUnsupported;
// SPECIAL3 Encoding of Function Field.
EXT = ((0 << 3) + 0),
INS = ((0 << 3) + 4),
+ BITSWAP = ((4 << 3) + 0),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+
ADD_S = ((0 << 3) + 0),
SUB_S = ((0 << 3) + 1),
MUL_S = ((0 << 3) + 2),
FLOOR_W_S = ((1 << 3) + 7),
RECIP_S = ((2 << 3) + 5),
RSQRT_S = ((2 << 3) + 6),
+ CLASS_S = ((3 << 3) + 3),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
+
// COP1 Encoding of Function Field When rs=D.
ADD_D = ((0 << 3) + 0),
SUB_D = ((0 << 3) + 1),
FLOOR_W_D = ((1 << 3) + 7),
RECIP_D = ((2 << 3) + 5),
RSQRT_D = ((2 << 3) + 6),
+ CLASS_D = ((3 << 3) + 3),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
+
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
SEL = ((2 << 3) + 0),
MOVZ_C = ((2 << 3) + 2),
MOVN_C = ((2 << 3) + 3),
enum FPUCondition {
kNoFPUCondition = -1,
- F = 0, // False.
- UN = 1, // Unordered.
- EQ = 2, // Equal.
- UEQ = 3, // Unordered or Equal.
- OLT = 4, // Ordered or Less Than.
- ULT = 5, // Unordered or Less Than.
- OLE = 6, // Ordered or Less Than or Equal.
- ULE = 7 // Unordered or Less Than or Equal.
+ F = 0x00, // False.
+ UN = 0x01, // Unordered.
+ EQ = 0x02, // Equal.
+ UEQ = 0x03, // Unordered or Equal.
+ OLT = 0x04, // Ordered or Less Than, on Mips release < 6.
+ LT = 0x04, // Ordered or Less Than, on Mips release >= 6.
+ ULT = 0x05, // Unordered or Less Than.
+ OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6.
+ LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6.
+ ULE = 0x07, // Unordered or Less Than or Equal.
+
+ // Following constants are available on Mips release >= 6 only.
+ ORD = 0x11, // Ordered, on Mips release >= 6.
+ UNE = 0x12, // Not equal, on Mips release >= 6.
+ NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
case CEIL_W_D:
Format(instr, "ceil.w.'t 'fd, 'fs");
break;
+ case CLASS_D:
+ Format(instr, "class.'t 'fd, 'fs");
+ break;
case CEIL_L_D:
Format(instr, "ceil.l.'t 'fd, 'fs");
break;
case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs");
break;
+ case CMP_AF:
+ Format(instr, "cmp.af.d 'fd, 'fs, 'ft");
+ break;
case CMP_UN:
Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
break;
}
break;
}
+ case BITSWAP: {
+ if (IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "bitswap 'rd, 'rt");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
default:
UNREACHABLE();
}
}
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
bool ret = false;
double max_int32 = std::numeric_limits<int32_t>::max();
}
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(float original, float rounded) {
bool ret = false;
double max_int32 = std::numeric_limits<int32_t>::max();
}
+void Simulator::round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RD (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
*alu_out = (rs_u & (mask << lsb)) >> lsb;
break;
}
+ case BITSWAP: { // Mips32r6 instruction
+ uint32_t input = static_cast<uint32_t>(rt);
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int32_t>(output);
+ break;
+ }
default:
UNREACHABLE();
}
set_fpu_register_float(fd_reg, static_cast<float>(fs));
break;
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
- double rounded = trunc(fs);
- i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ int64_t result;
+ double rounded;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
UNSUPPORTED();
}
break;
+ break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
}
break;
}
- case C_F_D:
- UNIMPLEMENTED_MIPS();
+ case CLASS_D: { // Mips32r6 instruction
+ // Convert double input to uint64_t for easier bit manipulation
+ uint64_t classed = bit_cast<uint64_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input double
+ uint32_t sign = (classed >> 63) & 1;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
+ uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint64_t result;
+ double dResult;
+
+ // Setting flags if input double is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFFF0000000000000);
+ bool posInf = (classed == 0x7FF0000000000000);
+ bool negZero = (classed == 0x8000000000000000);
+ bool posZero = (classed == 0x0000000000000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if double is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && exponent == 0x7ff) {
+ quietNan = ((mantissa & 0x0008000000000000) != 0) &&
+ ((mantissa & (0x0008000000000000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if double is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if double is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.D instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ dResult = bit_cast<double>(result);
+ set_fpu_register_double(fd_reg, dResult);
+
+ break;
+ }
+ case C_F_D: {
+ set_fcsr_bit(fcsr_cc, false);
break;
+ }
default:
UNREACHABLE();
}
void Simulator::DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
const int32_t& fd_reg,
- const int32_t& fs_reg) {
+ const int32_t& fs_reg,
+ const int32_t& ft_reg) {
+ float fs = get_fpu_register_float(fs_reg);
+ float ft = get_fpu_register_float(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg);
alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default: // Mips64r6 CMP.S instructions unimplemented.
+ case CMP_AF:
+ set_fpu_register_word(fd_reg, 0);
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
set_fpu_register_float(fd_reg, result);
break;
}
+ case C_F_D:
+ set_fcsr_bit(fcsr_cc, false);
+ break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
DCHECK(IsMipsArchVariant(kMips32r6));
set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
break;
+ case CLASS_S: { // Mips32r6 instruction
+ // Convert float input to uint32_t for easier bit manipulation
+ float fs = get_fpu_register_float(fs_reg);
+ uint32_t classed = bit_cast<uint32_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input float
+ uint32_t sign = (classed >> 31) & 1;
+ uint32_t exponent = (classed >> 23) & 0x000000ff;
+ uint32_t mantissa = classed & 0x007fffff;
+ uint32_t result;
+ float fResult;
+
+ // Setting flags if input float is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFF800000);
+ bool posInf = (classed == 0x7F800000);
+ bool negZero = (classed == 0x80000000);
+ bool posZero = (classed == 0x00000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if float is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && (exponent == 0xff)) {
+ quietNan = ((mantissa & 0x00200000) == 0) &&
+ ((mantissa & (0x00200000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if float is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if float is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.S instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ fResult = bit_cast<float>(result);
+ set_fpu_register_float(fd_reg, fResult);
+
+ break;
+ }
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
set_fpu_register_float(
set_fpu_register_float(fd_reg, result);
}
break;
+ case CVT_L_S: {
+ if (IsFp64Mode()) {
+ int64_t result;
+ float rounded;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case CVT_W_S: {
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
default:
// CVT_W_S CVT_L_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
- UNIMPLEMENTED_MIPS();
+ if (IsFp64Mode()) {
+ i64 = get_fpu_register(fs_reg);
+ } else {
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg + 1)) << 32;
+ }
+ set_fpu_register_float(fd_reg, static_cast<float>(i64));
break;
case CMP_AF: // Mips64r6 CMP.D instructions.
- UNIMPLEMENTED_MIPS();
+ set_fpu_register(fd_reg, 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
set_fpu_register(fd_reg, 0);
}
break;
- default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED.
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
DecodeTypeRegisterDRsType(instr, fr_reg, fs_reg, ft_reg, fd_reg);
break;
case W:
- DecodeTypeRegisterWRsType(instr, alu_out, fd_reg, fs_reg);
+ DecodeTypeRegisterWRsType(instr, alu_out, fd_reg, fs_reg, ft_reg);
break;
case L:
DecodeTypeRegisterLRsType(instr, ft_reg, fs_reg, fd_reg);
void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
const int32_t& rt_reg,
+ const int32_t& rd_reg,
int32_t& alu_out) {
switch (instr->FunctionFieldRaw()) {
case INS:
// Ext instr leaves result in Rt, rather than Rd.
set_register(rt_reg, alu_out);
break;
+ case BITSWAP:
+ set_register(rd_reg, alu_out);
+ break;
default:
UNREACHABLE();
}
DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
break;
case SPECIAL3:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs);
+ void round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs);
+ void round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
const int32_t& fs_reg, const int32_t& ft_reg,
const int32_t& fd_reg);
void DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
- const int32_t& fd_reg, const int32_t& fs_reg);
+ const int32_t& fd_reg, const int32_t& fs_reg,
+ const int32_t& ft_reg);
void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& ft_reg,
const int32_t& fs_reg, const int32_t& fd_reg);
void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t& ft_reg,
int32_t& alu_out);
void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int32_t& rt_reg,
- int32_t& alu_out);
+ const int32_t& rd_reg, int32_t& alu_out);
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
}
+void Assembler::bitswap(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BITSWAP);
+}
+
+
+void Assembler::dbitswap(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBITSWAP);
+}
+
+
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
}
+void Assembler::class_s(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
+}
+
+
+void Assembler::class_d(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
+}
+
+
void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
}
+void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, W, fd, fs, ft);
+}
+
+void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, L, fd, fs, ft);
+}
+
+
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
}
+void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, S, fs, ft, cc);
+}
+
+
+void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, D, fs, ft, cc);
+}
+
+
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
DCHECK(src2 == 0.0);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void bitswap(Register rd, Register rt);
+ void dbitswap(Register rd, Register rt);
// --------Coprocessor-instructions----------------
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void class_s(FPURegister fd, FPURegister fs);
+ void class_d(FPURegister fd, FPURegister fs);
+
void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
+ void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) {
case INS:
case EXT:
case DEXT:
+ case BITSWAP:
+ case DBITSWAP:
return kRegisterType;
default:
return kUnsupported;
DINSU = ((0 << 3) + 6),
DINS = ((0 << 3) + 7),
+ BITSWAP = ((4 << 3) + 0),
+ DBITSWAP = ((4 << 3) + 4),
DSBH = ((4 << 3) + 4),
+ // SPECIAL3 Encoding of sa Field.
+ DBITSWAP_SA = ((0 << 3) + 0) << kSaShift,
+
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
BGEZ = ((0 << 3) + 1) << 16,
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+
ADD_S = ((0 << 3) + 0),
SUB_S = ((0 << 3) + 1),
MUL_S = ((0 << 3) + 2),
FLOOR_W_S = ((1 << 3) + 7),
RECIP_S = ((2 << 3) + 5),
RSQRT_S = ((2 << 3) + 6),
+ CLASS_S = ((3 << 3) + 3),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
FLOOR_W_D = ((1 << 3) + 7),
RECIP_D = ((2 << 3) + 5),
RSQRT_D = ((2 << 3) + 6),
+ CLASS_D = ((3 << 3) + 3),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
+
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
SEL = ((2 << 3) + 0),
MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
MOVZ_C = ((2 << 3) + 2), // COP1 on FPR registers.
enum FPUCondition {
kNoFPUCondition = -1,
- F = 0, // False.
- UN = 1, // Unordered.
- EQ = 2, // Equal.
- UEQ = 3, // Unordered or Equal.
- OLT = 4, // Ordered or Less Than.
- ULT = 5, // Unordered or Less Than.
- OLE = 6, // Ordered or Less Than or Equal.
- ULE = 7 // Unordered or Less Than or Equal.
+ F = 0x00, // False.
+ UN = 0x01, // Unordered.
+ EQ = 0x02, // Equal.
+ UEQ = 0x03, // Unordered or Equal.
+ OLT = 0x04, // Ordered or Less Than, on Mips release < 6.
+ LT = 0x04, // Ordered or Less Than, on Mips release >= 6.
+ ULT = 0x05, // Unordered or Less Than.
+ OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6.
+ LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6.
+ ULE = 0x07, // Unordered or Less Than or Equal.
+
+ // Following constants are available on Mips release >= 6 only.
+ ORD = 0x11, // Ordered, on Mips release >= 6.
+ UNE = 0x12, // Not equal, on Mips release >= 6.
+ NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
case CEIL_L_D:
Format(instr, "ceil.l.'t 'fd, 'fs");
break;
+ case CLASS_D:
+ Format(instr, "class.'t 'fd, 'fs");
+ break;
case CVT_S_D:
Format(instr, "cvt.s.'t 'fd, 'fs");
break;
case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs");
break;
+ case CMP_AF:
+ Format(instr, "cmp.af.d 'fd, 'fs, 'ft");
+ break;
case CMP_UN:
Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
break;
Format(instr, "dext 'rt, 'rs, 'sa, 'ss1");
break;
}
+ case BITSWAP: {
+ Format(instr, "bitswap 'rd, 'rt");
+ break;
+ }
+ case DBITSWAP: {
+ switch (instr->SaFieldRaw()) {
+ case DBITSWAP_SA:
+ Format(instr, "dbitswap 'rd, 'rt");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
default:
UNREACHABLE();
}
}
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(float original, float rounded) {
bool ret = false;
double max_int32 = std::numeric_limits<int32_t>::max();
}
-// for cvt instructions only
+// For cvt instructions only
void Simulator::round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
}
+// for cvt instructions only
+void Simulator::round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int64_t value) {
pc_modified_ = true;
*alu_out = static_cast<int64_t>((rs_u & (mask << lsb)) >> lsb);
break;
}
+ case BITSWAP: { // Mips32r6 instruction
+ uint32_t input = static_cast<uint32_t>(rt);
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ break;
+ }
+ case DBITSWAP: {
+ switch (instr->SaFieldRaw()) {
+ case DBITSWAP_SA: { // Mips64r6
+ uint64_t input = static_cast<uint64_t>(rt);
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte =
+ static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | ((static_cast<uint64_t>(o_byte) << 56));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int64_t>(output);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
default:
UNREACHABLE();
}
set_fpu_register_float(fd_reg, result);
break;
}
+ case C_F_D:
+ set_fcsr_bit(fcsr_cc, false);
+ break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case CVT_D_S:
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
+ case CLASS_S: { // Mips64r6 instruction
+ // Convert float input to uint32_t for easier bit manipulation
+ uint32_t classed = bit_cast<uint32_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input float
+ uint32_t sign = (classed >> 31) & 1;
+ uint32_t exponent = (classed >> 23) & 0x000000ff;
+ uint32_t mantissa = classed & 0x007fffff;
+ uint32_t result;
+ float fResult;
+
+ // Setting flags if input float is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFF800000);
+ bool posInf = (classed == 0x7F800000);
+ bool negZero = (classed == 0x80000000);
+ bool posZero = (classed == 0x00000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if float is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && (exponent == 0xff)) {
+ quietNan = ((mantissa & 0x00200000) == 0) &&
+ ((mantissa & (0x00200000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if float is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if float is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.S instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ fResult = bit_cast<float>(result);
+ set_fpu_register_float(fd_reg, fResult);
+
+ break;
+ }
+ case CVT_L_S: {
+ float rounded;
+ int64_t result;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case CVT_W_S: {
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
case TRUNC_W_S: { // Truncate single to word (round towards 0).
float rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
break;
}
default:
- // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg, result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
}
break;
}
}
break;
}
- case C_F_D:
- UNIMPLEMENTED_MIPS();
+ case CLASS_D: { // Mips64r6 instruction
+ // Convert double input to uint64_t for easier bit manipulation
+ uint64_t classed = bit_cast<uint64_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input double
+ uint32_t sign = (classed >> 63) & 1;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
+ uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint64_t result;
+ double dResult;
+
+ // Setting flags if input double is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFFF0000000000000);
+ bool posInf = (classed == 0x7FF0000000000000);
+ bool negZero = (classed == 0x8000000000000000);
+ bool posZero = (classed == 0x0000000000000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if double is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && exponent == 0x7ff) {
+ quietNan = ((mantissa & 0x0008000000000000) != 0) &&
+ ((mantissa & (0x0008000000000000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if double is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if double is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.D instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ dResult = bit_cast<double>(result);
+ set_fpu_register_double(fd_reg, dResult);
+
+ break;
+ }
+ case C_F_D: {
+ set_fcsr_bit(fcsr_cc, false);
break;
+ }
default:
UNREACHABLE();
}
void Simulator::DecodeTypeRegisterWRsType(Instruction* instr,
const int32_t& fs_reg,
const int32_t& fd_reg,
+ const int32_t& ft_reg,
int64_t& alu_out) {
+ float fs = get_fpu_register_float(fs_reg);
+ float ft = get_fpu_register_float(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg);
alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default: // Mips64r6 CMP.S instructions unimplemented.
+ case CMP_AF:
+ set_fpu_register_word(fd_reg, 0);
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
- UNIMPLEMENTED_MIPS();
+ i64 = get_fpu_register(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(i64));
break;
- case CMP_AF: // Mips64r6 CMP.D instructions.
- UNIMPLEMENTED_MIPS();
+ case CMP_AF:
+ set_fpu_register(fd_reg, 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
set_fpu_register(fd_reg, 0);
}
break;
- default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
DecodeTypeRegisterDRsType(instr, fs_reg, ft_reg, fd_reg);
break;
case W:
- DecodeTypeRegisterWRsType(instr, fs_reg, fd_reg, alu_out);
+ DecodeTypeRegisterWRsType(instr, fs_reg, fd_reg, ft_reg, alu_out);
break;
case L:
DecodeTypeRegisterLRsType(instr, fs_reg, fd_reg, ft_reg);
void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
const int64_t& rt_reg,
+ const int64_t& rd_reg,
int64_t& alu_out) {
switch (instr->FunctionFieldRaw()) {
case INS:
set_register(rt_reg, alu_out);
TraceRegWr(alu_out);
break;
+ case BITSWAP:
+ case DBITSWAP:
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
default:
UNREACHABLE();
}
DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
break;
case SPECIAL3:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
int32_t& rounded_int, double fs);
void round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs);
+ void round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs);
void set_fcsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
int64_t& alu_out);
void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int64_t& rt_reg,
- int64_t& alu_out);
+ const int64_t& rd_reg, int64_t& alu_out);
void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& fs_reg,
const int32_t& ft_reg, const int32_t& fd_reg);
const int32_t& ft_reg, const int32_t& fd_reg);
void DecodeTypeRegisterWRsType(Instruction* instr, const int32_t& fs_reg,
- const int32_t& fd_reg, int64_t& alu_out);
+ const int32_t& ft_reg, const int32_t& fd_reg,
+ int64_t& alu_out);
void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t& fs_reg,
const int32_t& fd_reg, const int32_t& ft_reg);
int64_t c; // a trunc result
int64_t d; // b trunc result
}Test;
- const int tableLength = 16;
+ const int tableLength = 15;
double inputs_D[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<double>::infinity()
};
float inputs_S[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
- std::numeric_limits<float>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<float>::infinity()
};
double outputs[tableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
- dFPU64InvalidResult, dFPU64InvalidResult};
+ dFPU64InvalidResult};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
__ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
int64_t c;
int64_t d;
}Test;
- const int tableLength = 16;
+ const int tableLength = 15;
double inputs_D[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<double>::infinity()
};
float inputs_S[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
- std::numeric_limits<float>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<float>::infinity()
};
double outputs[tableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
- dFPU64InvalidResult, dFPU64InvalidResult};
+ dFPU64InvalidResult};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
__ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
int64_t c;
int64_t d;
}Test;
- const int tableLength = 16;
+ const int tableLength = 15;
double inputs_D[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<double>::infinity()
};
float inputs_S[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
- std::numeric_limits<float>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<float>::infinity()
};
double outputs[tableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
- dFPU64InvalidResult, dFPU64InvalidResult};
+ dFPU64InvalidResult};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
__ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
int64_t c;
int64_t d;
}Test;
- const int tableLength = 16;
+ const int tableLength = 15;
double inputs_D[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<double>::infinity()
};
float inputs_S[tableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
- std::numeric_limits<float>::infinity(),
- 9223372036854775808.0
+ std::numeric_limits<float>::infinity()
};
double outputs[tableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
- dFPU64InvalidResult, dFPU64InvalidResult};
+ dFPU64InvalidResult};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
__ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
}
+TEST(BITSWAP) {
+ // Test BITSWAP
+ if (IsMipsArchVariant(kMips32r6)) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t r1;
+ int32_t r2;
+ int32_t r3;
+ int32_t r4;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ lw(a2, MemOperand(a0, OFFSET_OF(T, r1)));
+ __ nop();
+ __ bitswap(a1, a2);
+ __ sw(a1, MemOperand(a0, OFFSET_OF(T, r1)));
+
+ __ lw(a2, MemOperand(a0, OFFSET_OF(T, r2)));
+ __ nop();
+ __ bitswap(a1, a2);
+ __ sw(a1, MemOperand(a0, OFFSET_OF(T, r2)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.r1 = 0x781A15C3;
+ t.r2 = 0x8B71FCDE;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(static_cast<int32_t>(0x1E58A8C3), t.r1);
+ CHECK_EQ(static_cast<int32_t>(0xD18E3F7B), t.r2);
+ }
+}
+
+
+TEST(class_fmt) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // Test CLASS.fmt instruction.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double dSignalingNan;
+ double dQuietNan;
+ double dNegInf;
+ double dNegNorm;
+ double dNegSubnorm;
+ double dNegZero;
+ double dPosInf;
+ double dPosNorm;
+ double dPosSubnorm;
+ double dPosZero;
+ float fSignalingNan;
+ float fQuietNan;
+ float fNegInf;
+ float fNegNorm;
+ float fNegSubnorm;
+ float fNegZero;
+ float fPosInf;
+ float fPosNorm;
+ float fPosSubnorm;
+ float fPosZero; } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dSignalingNan)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dSignalingNan)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dQuietNan)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dQuietNan)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegInf)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegInf)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegNorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegNorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegSubnorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegSubnorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegZero)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegZero)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosInf)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosInf)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosNorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosNorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosSubnorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosSubnorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosZero)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosZero)));
+
+ // Testing instruction CLASS.S
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fSignalingNan)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fSignalingNan)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fQuietNan)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fQuietNan)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegInf)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegInf)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegNorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegNorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegSubnorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegSubnorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegZero)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegZero)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosInf)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosInf)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosNorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosNorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosSubnorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosSubnorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosZero)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosZero)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
+ t.dQuietNan = std::numeric_limits<double>::quiet_NaN();
+ t.dNegInf = -1.0 / 0.0;
+ t.dNegNorm = -5.0;
+ t.dNegSubnorm = -DBL_MIN / 2.0;
+ t.dNegZero = -0.0;
+ t.dPosInf = 2.0 / 0.0;
+ t.dPosNorm = 275.35;
+ t.dPosSubnorm = DBL_MIN / 2.0;
+ t.dPosZero = +0.0;
+ // Float test values
+
+ t.fSignalingNan = std::numeric_limits<float>::signaling_NaN();
+ t.fQuietNan = std::numeric_limits<float>::quiet_NaN();
+ t.fNegInf = -0.5/0.0;
+ t.fNegNorm = -FLT_MIN;
+ t.fNegSubnorm = -FLT_MIN / 1.5;
+ t.fNegZero = -0.0;
+ t.fPosInf = 100000.0 / 0.0;
+ t.fPosNorm = FLT_MAX;
+ t.fPosSubnorm = FLT_MIN / 20.0;
+ t.fPosZero = +0.0;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ // Expected double results.
+ CHECK_EQ(bit_cast<uint64_t>(t.dSignalingNan), 0x001);
+ CHECK_EQ(bit_cast<uint64_t>(t.dQuietNan), 0x002);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegInf), 0x004);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegNorm), 0x008);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegSubnorm), 0x010);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegZero), 0x020);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosInf), 0x040);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosNorm), 0x080);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosSubnorm), 0x100);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosZero), 0x200);
+
+ // Expected float results.
+ CHECK_EQ(bit_cast<uint32_t>(t.fSignalingNan), 0x001);
+ CHECK_EQ(bit_cast<uint32_t>(t.fQuietNan), 0x002);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegInf), 0x004);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegNorm), 0x008);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegSubnorm), 0x010);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegZero), 0x020);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosInf), 0x040);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosNorm), 0x080);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosSubnorm), 0x100);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosZero), 0x200);
+ }
+}
+
+
+TEST(ABS) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ int64_t fir;
+ double a;
+ float b;
+ double fcsr;
+ } TestFloat;
+
+ TestFloat test;
+
+ // Save FIR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+ __ abs_d(f10, f4);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ abs_s(f10, f4);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.a = -2.0;
+ test.b = -2.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ test.a = 2.0;
+ test.b = 2.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ // Testing biggest positive number
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest negative number
+ test.a = -std::numeric_limits<double>::lowest();
+ test.b = -std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest positive number
+ test.a = -std::numeric_limits<double>::min();
+ test.b = -std::numeric_limits<float>::min();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::min());
+
+ // Testing infinity
+ test.a = -std::numeric_limits<double>::max()
+ / std::numeric_limits<double>::min();
+ test.b = -std::numeric_limits<float>::max()
+ / std::numeric_limits<float>::min();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max()
+ / std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max()
+ / std::numeric_limits<float>::min());
+
+ test.a = std::numeric_limits<double>::quiet_NaN();
+ test.b = std::numeric_limits<float>::quiet_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.a), true);
+ CHECK_EQ(std::isnan(test.b), true);
+
+ test.a = std::numeric_limits<double>::signaling_NaN();
+ test.b = std::numeric_limits<float>::signaling_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.a), true);
+ CHECK_EQ(std::isnan(test.b), true);
+}
+
+
+TEST(ADD_FMT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ double c;
+ float fa;
+ float fb;
+ float fc;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ add_d(f10, f8, f4);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, fa)));
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, fb)));
+ __ add_s(f10, f8, f4);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, fc)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.a = 2.0;
+ test.b = 3.0;
+ test.fa = 2.0;
+ test.fb = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, 5.0);
+ CHECK_EQ(test.fc, 5.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<double>::lowest();
+ test.fa = std::numeric_limits<float>::max();
+ test.fb = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, 0.0);
+ CHECK_EQ(test.fc, 0.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<double>::max();
+ test.fa = std::numeric_limits<float>::max();
+ test.fb = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isfinite(test.c), false);
+ CHECK_EQ(std::isfinite(test.fc), false);
+
+ test.a = 5.0;
+ test.b = std::numeric_limits<double>::signaling_NaN();
+ test.fa = 5.0;
+ test.fb = std::numeric_limits<float>::signaling_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.c), true);
+ CHECK_EQ(std::isnan(test.fc), true);
+}
+
+
+TEST(C_COND_FMT) {
+ if ((IsMipsArchVariant(kMips32r1)) || (IsMipsArchVariant(kMips32r2))) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double dOp1;
+ double dOp2;
+ uint32_t dF;
+ uint32_t dUn;
+ uint32_t dEq;
+ uint32_t dUeq;
+ uint32_t dOlt;
+ uint32_t dUlt;
+ uint32_t dOle;
+ uint32_t dUle;
+ float fOp1;
+ float fOp2;
+ uint32_t fF;
+ uint32_t fUn;
+ uint32_t fEq;
+ uint32_t fUeq;
+ uint32_t fOlt;
+ uint32_t fUlt;
+ uint32_t fOle;
+ uint32_t fUle;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ li(t1, 1);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, dOp1)));
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, dOp2)));
+
+ __ lwc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, fOp1)));
+ __ lwc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, fOp2)));
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(F, f4, f6, 0);
+ __ c_s(F, f14, f16, 2);
+ __ movt(t2, t1, 0);
+ __ movt(t3, t1, 2);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dF)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fF)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(UN, f4, f6, 2);
+ __ c_s(UN, f14, f16, 4);
+ __ movt(t2, t1, 2);
+ __ movt(t3, t1, 4);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUn)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUn)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(EQ, f4, f6, 4);
+ __ c_s(EQ, f14, f16, 6);
+ __ movt(t2, t1, 4);
+ __ movt(t3, t1, 6);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dEq)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fEq)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(UEQ, f4, f6, 6);
+ __ c_s(UEQ, f14, f16, 0);
+ __ movt(t2, t1, 6);
+ __ movt(t3, t1, 0);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUeq)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUeq)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(OLT, f4, f6, 0);
+ __ c_s(OLT, f14, f16, 2);
+ __ movt(t2, t1, 0);
+ __ movt(t3, t1, 2);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dOlt)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fOlt)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(ULT, f4, f6, 2);
+ __ c_s(ULT, f14, f16, 4);
+ __ movt(t2, t1, 2);
+ __ movt(t3, t1, 4);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUlt)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUlt)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(OLE, f4, f6, 4);
+ __ c_s(OLE, f14, f16, 6);
+ __ movt(t2, t1, 4);
+ __ movt(t3, t1, 6);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dOle)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fOle)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(ULE, f4, f6, 6);
+ __ c_s(ULE, f14, f16, 0);
+ __ movt(t2, t1, 6);
+ __ movt(t3, t1, 0);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUle)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUle)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.dOp1 = 2.0;
+ test.dOp2 = 3.0;
+ test.fOp1 = 2.0;
+ test.fOp2 = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 0);
+ CHECK_EQ(test.dOlt, 1);
+ CHECK_EQ(test.dUlt, 1);
+ CHECK_EQ(test.dOle, 1);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 0);
+ CHECK_EQ(test.fOlt, 1);
+ CHECK_EQ(test.fUlt, 1);
+ CHECK_EQ(test.fOle, 1);
+ CHECK_EQ(test.fUle, 1);
+
+ test.dOp1 = std::numeric_limits<double>::max();
+ test.dOp2 = std::numeric_limits<double>::min();
+ test.fOp1 = std::numeric_limits<float>::min();
+ test.fOp2 = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 0);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 0);
+ CHECK_EQ(test.dOle, 0);
+ CHECK_EQ(test.dUle, 0);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 0);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 0);
+ CHECK_EQ(test.fOle, 0);
+ CHECK_EQ(test.fUle, 0);
+
+ test.dOp1 = std::numeric_limits<double>::lowest();
+ test.dOp2 = std::numeric_limits<double>::lowest();
+ test.fOp1 = std::numeric_limits<float>::max();
+ test.fOp2 = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 1);
+ CHECK_EQ(test.dUeq, 1);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 0);
+ CHECK_EQ(test.dOle, 1);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 1);
+ CHECK_EQ(test.fUeq, 1);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 0);
+ CHECK_EQ(test.fOle, 1);
+ CHECK_EQ(test.fUle, 1);
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = 0.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = 0.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 1);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 1);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 1);
+ CHECK_EQ(test.dOle, 0);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 1);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 1);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 1);
+ CHECK_EQ(test.fOle, 0);
+ CHECK_EQ(test.fUle, 1);
+ }
+}
+
+
+TEST(CMP_COND_FMT) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double dOp1;
+ double dOp2;
+ double dF;
+ double dUn;
+ double dEq;
+ double dUeq;
+ double dOlt;
+ double dUlt;
+ double dOle;
+ double dUle;
+ double dOr;
+ double dUne;
+ double dNe;
+ float fOp1;
+ float fOp2;
+ float fF;
+ float fUn;
+ float fEq;
+ float fUeq;
+ float fOlt;
+ float fUlt;
+ float fOle;
+ float fUle;
+ float fOr;
+ float fUne;
+ float fNe;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ li(t1, 1);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, dOp1)));
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, dOp2)));
+
+ __ lwc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, fOp1)));
+ __ lwc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, fOp2)));
+
+ __ cmp_d(F, f2, f4, f6);
+ __ cmp_s(F, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dF)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fF)) );
+
+ __ cmp_d(UN, f2, f4, f6);
+ __ cmp_s(UN, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUn)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUn)) );
+
+ __ cmp_d(EQ, f2, f4, f6);
+ __ cmp_s(EQ, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dEq)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fEq)) );
+
+ __ cmp_d(UEQ, f2, f4, f6);
+ __ cmp_s(UEQ, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUeq)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUeq)) );
+
+ __ cmp_d(LT, f2, f4, f6);
+ __ cmp_s(LT, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOlt)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOlt)) );
+
+ __ cmp_d(ULT, f2, f4, f6);
+ __ cmp_s(ULT, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUlt)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUlt)) );
+
+ __ cmp_d(LE, f2, f4, f6);
+ __ cmp_s(LE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOle)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOle)) );
+
+ __ cmp_d(ULE, f2, f4, f6);
+ __ cmp_s(ULE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUle)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUle)) );
+
+ __ cmp_d(ORD, f2, f4, f6);
+ __ cmp_s(ORD, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOr)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOr)) );
+
+ __ cmp_d(UNE, f2, f4, f6);
+ __ cmp_s(UNE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUne)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUne)) );
+
+ __ cmp_d(NE, f2, f4, f6);
+ __ cmp_s(NE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dNe)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fNe)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
+ uint64_t dFalse = 0x0000000000000000;
+ uint32_t fTrue = 0xFFFFFFFF;
+ uint32_t fFalse = 0x00000000;
+
+ test.dOp1 = 2.0;
+ test.dOp2 = 3.0;
+ test.fOp1 = 2.0;
+ test.fOp2 = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+
+ test.dOp1 = std::numeric_limits<double>::max();
+ test.dOp2 = std::numeric_limits<double>::min();
+ test.fOp1 = std::numeric_limits<float>::min();
+ test.fOp2 = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fFalse);
+
+ test.dOp1 = std::numeric_limits<double>::lowest();
+ test.dOp2 = std::numeric_limits<double>::lowest();
+ test.fOp1 = std::numeric_limits<float>::max();
+ test.fOp2 = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = 0.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = 0.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+ }
+}
+
+
+TEST(CVT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float cvt_d_s_in;
+ double cvt_d_s_out;
+ int32_t cvt_d_w_in;
+ double cvt_d_w_out;
+ int64_t cvt_d_l_in;
+ double cvt_d_l_out;
+
+ float cvt_l_s_in;
+ int64_t cvt_l_s_out;
+ double cvt_l_d_in;
+ int64_t cvt_l_d_out;
+
+ double cvt_s_d_in;
+ float cvt_s_d_out;
+ int32_t cvt_s_w_in;
+ float cvt_s_w_out;
+ int64_t cvt_s_l_in;
+ float cvt_s_l_out;
+
+ float cvt_w_s_in;
+ int32_t cvt_w_s_out;
+ double cvt_w_d_in;
+ int32_t cvt_w_d_out;
+ } TestFloat;
+
+ TestFloat test;
+
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+#define GENERATE_CVT_TEST(x, y, z) \
+ __ y##c1(f0, MemOperand(a0, OFFSET_OF(TestFloat, x##_in))); \
+ __ x(f0, f0); \
+ __ nop(); \
+ __ z##c1(f0, MemOperand(a0, OFFSET_OF(TestFloat, x##_out)));
+
+ GENERATE_CVT_TEST(cvt_d_s, lw, sd)
+ GENERATE_CVT_TEST(cvt_d_w, lw, sd)
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ GENERATE_CVT_TEST(cvt_d_l, ld, sd)
+ }
+
+ if (IsFp64Mode()) {
+ GENERATE_CVT_TEST(cvt_l_s, lw, sd)
+ GENERATE_CVT_TEST(cvt_l_d, ld, sd)
+ }
+
+ GENERATE_CVT_TEST(cvt_s_d, ld, sw)
+ GENERATE_CVT_TEST(cvt_s_w, lw, sw)
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ GENERATE_CVT_TEST(cvt_s_l, ld, sw)
+ }
+
+ GENERATE_CVT_TEST(cvt_w_s, lw, sw)
+ GENERATE_CVT_TEST(cvt_w_d, ld, sw)
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ test.cvt_d_s_in = -0.51;
+ test.cvt_d_w_in = -1;
+ test.cvt_d_l_in = -1;
+ test.cvt_l_s_in = -0.51;
+ test.cvt_l_d_in = -0.51;
+ test.cvt_s_d_in = -0.51;
+ test.cvt_s_w_in = -1;
+ test.cvt_s_l_in = -1;
+ test.cvt_w_s_in = -0.51;
+ test.cvt_w_d_in = -0.51;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ }
+ if (IsFp64Mode()) {
+ CHECK_EQ(test.cvt_l_s_out, -1);
+ CHECK_EQ(test.cvt_l_d_out, -1);
+ }
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ }
+ CHECK_EQ(test.cvt_w_s_out, -1);
+ CHECK_EQ(test.cvt_w_d_out, -1);
+
+
+ test.cvt_d_s_in = 0.49;
+ test.cvt_d_w_in = 1;
+ test.cvt_d_l_in = 1;
+ test.cvt_l_s_in = 0.49;
+ test.cvt_l_d_in = 0.49;
+ test.cvt_s_d_in = 0.49;
+ test.cvt_s_w_in = 1;
+ test.cvt_s_l_in = 1;
+ test.cvt_w_s_in = 0.49;
+ test.cvt_w_d_in = 0.49;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ }
+ if (IsFp64Mode()) {
+ CHECK_EQ(test.cvt_l_s_out, 0);
+ CHECK_EQ(test.cvt_l_d_out, 0);
+ }
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ }
+ CHECK_EQ(test.cvt_w_s_out, 0);
+ CHECK_EQ(test.cvt_w_d_out, 0);
+
+ test.cvt_d_s_in = std::numeric_limits<float>::max();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::max();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::max();
+ test.cvt_l_s_in = std::numeric_limits<float>::max();
+ test.cvt_l_d_in = std::numeric_limits<double>::max();
+ test.cvt_s_d_in = std::numeric_limits<double>::max();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::max();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::max();
+ test.cvt_w_s_in = std::numeric_limits<float>::max();
+ test.cvt_w_d_in = std::numeric_limits<double>::max();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ }
+ if (IsFp64Mode()) {
+ CHECK_EQ(test.cvt_l_s_out, std::numeric_limits<int64_t>::max());
+ CHECK_EQ(test.cvt_l_d_out, std::numeric_limits<int64_t>::max());
+ }
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ }
+ CHECK_EQ(test.cvt_w_s_out, std::numeric_limits<int32_t>::max());
+ CHECK_EQ(test.cvt_w_d_out, std::numeric_limits<int32_t>::max());
+
+
+ test.cvt_d_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::lowest();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::lowest();
+ test.cvt_l_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_l_d_in = std::numeric_limits<double>::lowest();
+ test.cvt_s_d_in = std::numeric_limits<double>::lowest();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::lowest();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::lowest();
+ test.cvt_w_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_w_d_in = std::numeric_limits<double>::lowest();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ }
+ // The returned value when converting from fixed-point to float-point
+ // is not consistent between board, simulator and specification
+ // in this test case, therefore modifying the test
+ if (IsFp64Mode()) {
+ CHECK(test.cvt_l_s_out == std::numeric_limits<int64_t>::min() ||
+ test.cvt_l_s_out == std::numeric_limits<int64_t>::max());
+ CHECK(test.cvt_l_d_out == std::numeric_limits<int64_t>::min() ||
+ test.cvt_l_d_out == std::numeric_limits<int64_t>::max());
+ }
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ }
+ CHECK(test.cvt_w_s_out == std::numeric_limits<int32_t>::min() ||
+ test.cvt_w_s_out == std::numeric_limits<int32_t>::max());
+ CHECK(test.cvt_w_d_out == std::numeric_limits<int32_t>::min() ||
+ test.cvt_w_d_out == std::numeric_limits<int32_t>::max());
+
+
+ test.cvt_d_s_in = std::numeric_limits<float>::min();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::min();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::min();
+ test.cvt_l_s_in = std::numeric_limits<float>::min();
+ test.cvt_l_d_in = std::numeric_limits<double>::min();
+ test.cvt_s_d_in = std::numeric_limits<double>::min();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::min();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::min();
+ test.cvt_w_s_in = std::numeric_limits<float>::min();
+ test.cvt_w_d_in = std::numeric_limits<double>::min();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ }
+ if (IsFp64Mode()) {
+ CHECK_EQ(test.cvt_l_s_out, 0);
+ CHECK_EQ(test.cvt_l_d_out, 0);
+ }
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ }
+ CHECK_EQ(test.cvt_w_s_out, 0);
+ CHECK_EQ(test.cvt_w_d_out, 0);
+}
+
+
+TEST(DIV_FMT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test {
+ double dOp1;
+ double dOp2;
+ double dRes;
+ float fOp1;
+ float fOp2;
+ float fRes;
+ } Test;
+
+ Test test;
+
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, dOp1)) );
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, dOp2)) );
+ __ nop();
+ __ div_d(f6, f4, f2);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(Test, dRes)) );
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(Test, fOp1)) );
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(Test, fOp2)) );
+ __ nop();
+ __ div_s(f6, f4, f2);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(Test, fRes)) );
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+
+ const int test_size = 3;
+
+ double dOp1[test_size] = {
+ 5.0,
+ DBL_MAX,
+ DBL_MAX,
+ };
+ double dOp2[test_size] = {
+ 2.0,
+ 2.0,
+ -DBL_MAX,
+ };
+ double dRes[test_size] = {
+ 2.5,
+ DBL_MAX / 2.0,
+ -1.0,
+ };
+ float fOp1[test_size] = {
+ 5.0,
+ FLT_MAX,
+ FLT_MAX,
+ };
+ float fOp2[test_size] = {
+ 2.0,
+ 2.0,
+ -FLT_MAX,
+ };
+ float fRes[test_size] = {
+ 2.5,
+ FLT_MAX / 2.0,
+ -1.0,
+ };
+
+ for (int i = 0; i < test_size; i++) {
+ test.dOp1 = dOp1[i];
+ test.dOp2 = dOp2[i];
+ test.fOp1 = fOp1[i];
+ test.fOp2 = fOp2[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dRes, dRes[i]);
+ CHECK_EQ(test.fRes, fRes[i]);
+ }
+
+ test.dOp1 = DBL_MAX;
+ test.dOp2 = -0.0;
+ test.fOp1 = FLT_MAX;
+ test.fOp2 = -0.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(false, std::isfinite(test.dRes));
+ CHECK_EQ(false, std::isfinite(test.fRes));
+
+ test.dOp1 = 0.0;
+ test.dOp2 = -0.0;
+ test.fOp1 = 0.0;
+ test.fOp2 = -0.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(true, std::isnan(test.dRes));
+ CHECK_EQ(true, std::isnan(test.fRes));
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = -5.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = -5.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(true, std::isnan(test.dRes));
+ CHECK_EQ(true, std::isnan(test.fRes));
+}
+
+
#undef __
}
+TEST(BITSWAP) {
+ // Test BITSWAP
+ if (kArchVariant == kMips64r6) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int64_t r1;
+ int64_t r2;
+ int64_t r3;
+ int64_t r4;
+ int64_t r5;
+ int64_t r6;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r1)));
+ __ nop();
+ __ bitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r1)));
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r2)));
+ __ nop();
+ __ bitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r2)));
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r3)));
+ __ nop();
+ __ bitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r3)));
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r4)));
+ __ nop();
+ __ bitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r4)));
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r5)));
+ __ nop();
+ __ dbitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r5)));
+
+ __ ld(a4, MemOperand(a0, OFFSET_OF(T, r6)));
+ __ nop();
+ __ dbitswap(a6, a4);
+ __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.r1 = 0x00102100781A15C3;
+ t.r2 = 0x001021008B71FCDE;
+ t.r3 = 0xFF8017FF781A15C3;
+ t.r4 = 0xFF8017FF8B71FCDE;
+ t.r5 = 0x10C021098B71FCDE;
+ t.r6 = 0xFB8017FF781A15C3;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(static_cast<int64_t>(0x000000001E58A8C3L), t.r1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFD18E3F7BL), t.r2);
+ CHECK_EQ(static_cast<int64_t>(0x000000001E58A8C3L), t.r3);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFD18E3F7BL), t.r4);
+ CHECK_EQ(static_cast<int64_t>(0x08038490D18E3F7BL), t.r5);
+ CHECK_EQ(static_cast<int64_t>(0xDF01E8FF1E58A8C3L), t.r6);
+ }
+}
+
+
+TEST(class_fmt) {
+ if (kArchVariant == kMips64r6) {
+ // Test CLASS.fmt instruction.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double dSignalingNan;
+ double dQuietNan;
+ double dNegInf;
+ double dNegNorm;
+ double dNegSubnorm;
+ double dNegZero;
+ double dPosInf;
+ double dPosNorm;
+ double dPosSubnorm;
+ double dPosZero;
+ float fSignalingNan;
+ float fQuietNan;
+ float fNegInf;
+ float fNegNorm;
+ float fNegSubnorm;
+ float fNegZero;
+ float fPosInf;
+ float fPosNorm;
+ float fPosSubnorm;
+ float fPosZero; } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a ... t.f.
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dSignalingNan)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dSignalingNan)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dQuietNan)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dQuietNan)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegInf)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegInf)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegNorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegNorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegSubnorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegSubnorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dNegZero)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dNegZero)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosInf)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosInf)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosNorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosNorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosSubnorm)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosSubnorm)));
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, dPosZero)));
+ __ class_d(f6, f4);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, dPosZero)));
+
+ // Testing instruction CLASS.S
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fSignalingNan)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fSignalingNan)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fQuietNan)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fQuietNan)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegInf)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegInf)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegNorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegNorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegSubnorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegSubnorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fNegZero)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fNegZero)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosInf)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosInf)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosNorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosNorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosSubnorm)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosSubnorm)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(T, fPosZero)));
+ __ class_s(f6, f4);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(T, fPosZero)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ // Double test values.
+ t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
+ t.dQuietNan = std::numeric_limits<double>::quiet_NaN();
+ t.dNegInf = -1.0 / 0.0;
+ t.dNegNorm = -5.0;
+ t.dNegSubnorm = -DBL_MIN / 2.0;
+ t.dNegZero = -0.0;
+ t.dPosInf = 2.0 / 0.0;
+ t.dPosNorm = 275.35;
+ t.dPosSubnorm = DBL_MIN / 2.0;
+ t.dPosZero = +0.0;
+ // Float test values
+
+ t.fSignalingNan = std::numeric_limits<float>::signaling_NaN();
+ t.fQuietNan = std::numeric_limits<float>::quiet_NaN();
+ t.fNegInf = -0.5/0.0;
+ t.fNegNorm = -FLT_MIN;
+ t.fNegSubnorm = -FLT_MIN / 1.5;
+ t.fNegZero = -0.0;
+ t.fPosInf = 100000.0 / 0.0;
+ t.fPosNorm = FLT_MAX;
+ t.fPosSubnorm = FLT_MIN / 20.0;
+ t.fPosZero = +0.0;
+
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ // Expected double results.
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegInf), 0x004);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegNorm), 0x008);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegSubnorm), 0x010);
+ CHECK_EQ(bit_cast<uint64_t>(t.dNegZero), 0x020);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosInf), 0x040);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosNorm), 0x080);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosSubnorm), 0x100);
+ CHECK_EQ(bit_cast<uint64_t>(t.dPosZero), 0x200);
+
+ // Expected float results.
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegInf), 0x004);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegNorm), 0x008);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegSubnorm), 0x010);
+ CHECK_EQ(bit_cast<uint32_t>(t.fNegZero), 0x020);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosInf), 0x040);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosNorm), 0x080);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosSubnorm), 0x100);
+ CHECK_EQ(bit_cast<uint32_t>(t.fPosZero), 0x200);
+ }
+}
+
+
+TEST(ABS) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ int64_t fir;
+ double a;
+ float b;
+ double fcsr;
+ } TestFloat;
+
+ TestFloat test;
+
+ // Save FIR.
+ __ cfc1(a1, FCSR);
+ __ sd(a1, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)));
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+ __ abs_d(f10, f4);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ abs_s(f10, f4);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.a = -2.0;
+ test.b = -2.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ test.a = 2.0;
+ test.b = 2.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ // Testing biggest positive number
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest negative number
+ test.a = -std::numeric_limits<double>::lowest();
+ test.b = -std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest positive number
+ test.a = -std::numeric_limits<double>::min();
+ test.b = -std::numeric_limits<float>::min();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::min());
+
+ // Testing infinity
+ test.a = -std::numeric_limits<double>::max()
+ / std::numeric_limits<double>::min();
+ test.b = -std::numeric_limits<float>::max()
+ / std::numeric_limits<float>::min();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.a, std::numeric_limits<double>::max()
+ / std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max()
+ / std::numeric_limits<float>::min());
+
+ test.a = std::numeric_limits<double>::quiet_NaN();
+ test.b = std::numeric_limits<float>::quiet_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.a), true);
+ CHECK_EQ(std::isnan(test.b), true);
+
+ test.a = std::numeric_limits<double>::signaling_NaN();
+ test.b = std::numeric_limits<float>::signaling_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.a), true);
+ CHECK_EQ(std::isnan(test.b), true);
+}
+
+
+TEST(ADD_FMT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ double c;
+ float fa;
+ float fb;
+ float fc;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ add_d(f10, f8, f4);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, fa)));
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, fb)));
+ __ add_s(f10, f8, f4);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, fc)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.a = 2.0;
+ test.b = 3.0;
+ test.fa = 2.0;
+ test.fb = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, 5.0);
+ CHECK_EQ(test.fc, 5.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<double>::lowest();
+ test.fa = std::numeric_limits<float>::max();
+ test.fb = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, 0.0);
+ CHECK_EQ(test.fc, 0.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<double>::max();
+ test.fa = std::numeric_limits<float>::max();
+ test.fb = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isfinite(test.c), false);
+ CHECK_EQ(std::isfinite(test.fc), false);
+
+ test.a = 5.0;
+ test.b = std::numeric_limits<double>::signaling_NaN();
+ test.fa = 5.0;
+ test.fb = std::numeric_limits<float>::signaling_NaN();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(std::isnan(test.c), true);
+ CHECK_EQ(std::isnan(test.fc), true);
+}
+
+
+TEST(C_COND_FMT) {
+ if (kArchVariant == kMips64r2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double dOp1;
+ double dOp2;
+ uint32_t dF;
+ uint32_t dUn;
+ uint32_t dEq;
+ uint32_t dUeq;
+ uint32_t dOlt;
+ uint32_t dUlt;
+ uint32_t dOle;
+ uint32_t dUle;
+ float fOp1;
+ float fOp2;
+ uint32_t fF;
+ uint32_t fUn;
+ uint32_t fEq;
+ uint32_t fUeq;
+ uint32_t fOlt;
+ uint32_t fUlt;
+ uint32_t fOle;
+ uint32_t fUle;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ li(t1, 1);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, dOp1)));
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, dOp2)));
+
+ __ lwc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, fOp1)));
+ __ lwc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, fOp2)));
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(F, f4, f6, 0);
+ __ c_s(F, f14, f16, 2);
+ __ movt(t2, t1, 0);
+ __ movt(t3, t1, 2);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dF)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fF)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(UN, f4, f6, 2);
+ __ c_s(UN, f14, f16, 4);
+ __ movt(t2, t1, 2);
+ __ movt(t3, t1, 4);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUn)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUn)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(EQ, f4, f6, 4);
+ __ c_s(EQ, f14, f16, 6);
+ __ movt(t2, t1, 4);
+ __ movt(t3, t1, 6);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dEq)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fEq)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(UEQ, f4, f6, 6);
+ __ c_s(UEQ, f14, f16, 0);
+ __ movt(t2, t1, 6);
+ __ movt(t3, t1, 0);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUeq)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUeq)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(OLT, f4, f6, 0);
+ __ c_s(OLT, f14, f16, 2);
+ __ movt(t2, t1, 0);
+ __ movt(t3, t1, 2);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dOlt)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fOlt)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(ULT, f4, f6, 2);
+ __ c_s(ULT, f14, f16, 4);
+ __ movt(t2, t1, 2);
+ __ movt(t3, t1, 4);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUlt)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUlt)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(OLE, f4, f6, 4);
+ __ c_s(OLE, f14, f16, 6);
+ __ movt(t2, t1, 4);
+ __ movt(t3, t1, 6);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dOle)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fOle)) );
+
+ __ mov(t2, zero_reg);
+ __ mov(t3, zero_reg);
+ __ c_d(ULE, f4, f6, 6);
+ __ c_s(ULE, f14, f16, 0);
+ __ movt(t2, t1, 6);
+ __ movt(t3, t1, 0);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(TestFloat, dUle)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(TestFloat, fUle)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ test.dOp1 = 2.0;
+ test.dOp2 = 3.0;
+ test.fOp1 = 2.0;
+ test.fOp2 = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 0);
+ CHECK_EQ(test.dOlt, 1);
+ CHECK_EQ(test.dUlt, 1);
+ CHECK_EQ(test.dOle, 1);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 0);
+ CHECK_EQ(test.fOlt, 1);
+ CHECK_EQ(test.fUlt, 1);
+ CHECK_EQ(test.fOle, 1);
+ CHECK_EQ(test.fUle, 1);
+
+ test.dOp1 = std::numeric_limits<double>::max();
+ test.dOp2 = std::numeric_limits<double>::min();
+ test.fOp1 = std::numeric_limits<float>::min();
+ test.fOp2 = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 0);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 0);
+ CHECK_EQ(test.dOle, 0);
+ CHECK_EQ(test.dUle, 0);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 0);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 0);
+ CHECK_EQ(test.fOle, 0);
+ CHECK_EQ(test.fUle, 0);
+
+ test.dOp1 = std::numeric_limits<double>::lowest();
+ test.dOp2 = std::numeric_limits<double>::lowest();
+ test.fOp1 = std::numeric_limits<float>::max();
+ test.fOp2 = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 0);
+ CHECK_EQ(test.dEq, 1);
+ CHECK_EQ(test.dUeq, 1);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 0);
+ CHECK_EQ(test.dOle, 1);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 0);
+ CHECK_EQ(test.fEq, 1);
+ CHECK_EQ(test.fUeq, 1);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 0);
+ CHECK_EQ(test.fOle, 1);
+ CHECK_EQ(test.fUle, 1);
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = 0.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = 0.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dF, 0);
+ CHECK_EQ(test.dUn, 1);
+ CHECK_EQ(test.dEq, 0);
+ CHECK_EQ(test.dUeq, 1);
+ CHECK_EQ(test.dOlt, 0);
+ CHECK_EQ(test.dUlt, 1);
+ CHECK_EQ(test.dOle, 0);
+ CHECK_EQ(test.dUle, 1);
+ CHECK_EQ(test.fF, 0);
+ CHECK_EQ(test.fUn, 1);
+ CHECK_EQ(test.fEq, 0);
+ CHECK_EQ(test.fUeq, 1);
+ CHECK_EQ(test.fOlt, 0);
+ CHECK_EQ(test.fUlt, 1);
+ CHECK_EQ(test.fOle, 0);
+ CHECK_EQ(test.fUle, 1);
+ }
+}
+
+
+TEST(CMP_COND_FMT) {
+ if (kArchVariant == kMips64r6) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double dOp1;
+ double dOp2;
+ double dF;
+ double dUn;
+ double dEq;
+ double dUeq;
+ double dOlt;
+ double dUlt;
+ double dOle;
+ double dUle;
+ double dOr;
+ double dUne;
+ double dNe;
+ float fOp1;
+ float fOp2;
+ float fF;
+ float fUn;
+ float fEq;
+ float fUeq;
+ float fOlt;
+ float fUlt;
+ float fOle;
+ float fUle;
+ float fOr;
+ float fUne;
+ float fNe;
+ } TestFloat;
+
+ TestFloat test;
+
+ __ li(t1, 1);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, dOp1)));
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, dOp2)));
+
+ __ lwc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, fOp1)));
+ __ lwc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, fOp2)));
+
+ __ cmp_d(F, f2, f4, f6);
+ __ cmp_s(F, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dF)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fF)) );
+
+ __ cmp_d(UN, f2, f4, f6);
+ __ cmp_s(UN, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUn)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUn)) );
+
+ __ cmp_d(EQ, f2, f4, f6);
+ __ cmp_s(EQ, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dEq)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fEq)) );
+
+ __ cmp_d(UEQ, f2, f4, f6);
+ __ cmp_s(UEQ, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUeq)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUeq)) );
+
+ __ cmp_d(LT, f2, f4, f6);
+ __ cmp_s(LT, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOlt)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOlt)) );
+
+ __ cmp_d(ULT, f2, f4, f6);
+ __ cmp_s(ULT, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUlt)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUlt)) );
+
+ __ cmp_d(LE, f2, f4, f6);
+ __ cmp_s(LE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOle)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOle)) );
+
+ __ cmp_d(ULE, f2, f4, f6);
+ __ cmp_s(ULE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUle)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUle)) );
+
+ __ cmp_d(ORD, f2, f4, f6);
+ __ cmp_s(ORD, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dOr)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fOr)) );
+
+ __ cmp_d(UNE, f2, f4, f6);
+ __ cmp_s(UNE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dUne)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fUne)) );
+
+ __ cmp_d(NE, f2, f4, f6);
+ __ cmp_s(NE, f12, f14, f16);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, dNe)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, fNe)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
+ uint64_t dFalse = 0x0000000000000000;
+ uint32_t fTrue = 0xFFFFFFFF;
+ uint32_t fFalse = 0x00000000;
+
+ test.dOp1 = 2.0;
+ test.dOp2 = 3.0;
+ test.fOp1 = 2.0;
+ test.fOp2 = 3.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+
+ test.dOp1 = std::numeric_limits<double>::max();
+ test.dOp2 = std::numeric_limits<double>::min();
+ test.fOp1 = std::numeric_limits<float>::min();
+ test.fOp2 = std::numeric_limits<float>::lowest();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fFalse);
+
+ test.dOp1 = std::numeric_limits<double>::lowest();
+ test.dOp2 = std::numeric_limits<double>::lowest();
+ test.fOp1 = std::numeric_limits<float>::max();
+ test.fOp2 = std::numeric_limits<float>::max();
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = 0.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = 0.0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUeq), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOlt), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUlt), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOle), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUle), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dOr), dFalse);
+ CHECK_EQ(bit_cast<uint64_t>(test.dUne), dTrue);
+ CHECK_EQ(bit_cast<uint64_t>(test.dNe), dFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fF), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUn), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fEq), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUeq), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOlt), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUlt), fTrue);
+ CHECK_EQ(bit_cast<uint32_t>(test.fOle), fFalse);
+ CHECK_EQ(bit_cast<uint32_t>(test.fUle), fTrue);
+ }
+}
+
+
+TEST(CVT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float cvt_d_s_in;
+ double cvt_d_s_out;
+ int32_t cvt_d_w_in;
+ double cvt_d_w_out;
+ int64_t cvt_d_l_in;
+ double cvt_d_l_out;
+
+ float cvt_l_s_in;
+ int64_t cvt_l_s_out;
+ double cvt_l_d_in;
+ int64_t cvt_l_d_out;
+
+ double cvt_s_d_in;
+ float cvt_s_d_out;
+ int32_t cvt_s_w_in;
+ float cvt_s_w_out;
+ int64_t cvt_s_l_in;
+ float cvt_s_l_out;
+
+ float cvt_w_s_in;
+ int32_t cvt_w_s_out;
+ double cvt_w_d_in;
+ int32_t cvt_w_d_out;
+ } TestFloat;
+
+ TestFloat test;
+
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+#define GENERATE_CVT_TEST(x, y, z) \
+ __ y##c1(f0, MemOperand(a0, OFFSET_OF(TestFloat, x##_in))); \
+ __ x(f0, f0); \
+ __ nop(); \
+ __ z##c1(f0, MemOperand(a0, OFFSET_OF(TestFloat, x##_out)));
+
+ GENERATE_CVT_TEST(cvt_d_s, lw, sd)
+ GENERATE_CVT_TEST(cvt_d_w, lw, sd)
+ GENERATE_CVT_TEST(cvt_d_l, ld, sd)
+
+ GENERATE_CVT_TEST(cvt_l_s, lw, sd)
+ GENERATE_CVT_TEST(cvt_l_d, ld, sd)
+
+ GENERATE_CVT_TEST(cvt_s_d, ld, sw)
+ GENERATE_CVT_TEST(cvt_s_w, lw, sw)
+ GENERATE_CVT_TEST(cvt_s_l, ld, sw)
+
+ GENERATE_CVT_TEST(cvt_w_s, lw, sw)
+ GENERATE_CVT_TEST(cvt_w_d, ld, sw)
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ test.cvt_d_s_in = -0.51;
+ test.cvt_d_w_in = -1;
+ test.cvt_d_l_in = -1;
+ test.cvt_l_s_in = -0.51;
+ test.cvt_l_d_in = -0.51;
+ test.cvt_s_d_in = -0.51;
+ test.cvt_s_w_in = -1;
+ test.cvt_s_l_in = -1;
+ test.cvt_w_s_in = -0.51;
+ test.cvt_w_d_in = -0.51;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ CHECK_EQ(test.cvt_l_s_out, -1);
+ CHECK_EQ(test.cvt_l_d_out, -1);
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ CHECK_EQ(test.cvt_w_s_out, -1);
+ CHECK_EQ(test.cvt_w_d_out, -1);
+
+
+ test.cvt_d_s_in = 0.49;
+ test.cvt_d_w_in = 1;
+ test.cvt_d_l_in = 1;
+ test.cvt_l_s_in = 0.49;
+ test.cvt_l_d_in = 0.49;
+ test.cvt_s_d_in = 0.49;
+ test.cvt_s_w_in = 1;
+ test.cvt_s_l_in = 1;
+ test.cvt_w_s_in = 0.49;
+ test.cvt_w_d_in = 0.49;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ CHECK_EQ(test.cvt_l_s_out, 0);
+ CHECK_EQ(test.cvt_l_d_out, 0);
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ CHECK_EQ(test.cvt_w_s_out, 0);
+ CHECK_EQ(test.cvt_w_d_out, 0);
+
+ test.cvt_d_s_in = std::numeric_limits<float>::max();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::max();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::max();
+ test.cvt_l_s_in = std::numeric_limits<float>::max();
+ test.cvt_l_d_in = std::numeric_limits<double>::max();
+ test.cvt_s_d_in = std::numeric_limits<double>::max();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::max();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::max();
+ test.cvt_w_s_in = std::numeric_limits<float>::max();
+ test.cvt_w_d_in = std::numeric_limits<double>::max();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ CHECK_EQ(test.cvt_l_s_out, std::numeric_limits<int64_t>::max());
+ CHECK_EQ(test.cvt_l_d_out, std::numeric_limits<int64_t>::max());
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ CHECK_EQ(test.cvt_w_s_out, std::numeric_limits<int32_t>::max());
+ CHECK_EQ(test.cvt_w_d_out, std::numeric_limits<int32_t>::max());
+
+
+ test.cvt_d_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::lowest();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::lowest();
+ test.cvt_l_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_l_d_in = std::numeric_limits<double>::lowest();
+ test.cvt_s_d_in = std::numeric_limits<double>::lowest();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::lowest();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::lowest();
+ test.cvt_w_s_in = std::numeric_limits<float>::lowest();
+ test.cvt_w_d_in = std::numeric_limits<double>::lowest();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ // The returned value when converting from fixed-point to float-point
+ // is not consistent between board, simulator and specification
+ // in this test case, therefore modifying the test
+ CHECK(test.cvt_l_s_out == std::numeric_limits<int64_t>::min() ||
+ test.cvt_l_s_out == std::numeric_limits<int64_t>::max());
+ CHECK(test.cvt_l_d_out == std::numeric_limits<int64_t>::min() ||
+ test.cvt_l_d_out == std::numeric_limits<int64_t>::max());
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ CHECK(test.cvt_w_s_out == std::numeric_limits<int32_t>::min() ||
+ test.cvt_w_s_out == std::numeric_limits<int32_t>::max());
+ CHECK(test.cvt_w_d_out == std::numeric_limits<int32_t>::min() ||
+ test.cvt_w_d_out == std::numeric_limits<int32_t>::max());
+
+
+ test.cvt_d_s_in = std::numeric_limits<float>::min();
+ test.cvt_d_w_in = std::numeric_limits<int32_t>::min();
+ test.cvt_d_l_in = std::numeric_limits<int64_t>::min();
+ test.cvt_l_s_in = std::numeric_limits<float>::min();
+ test.cvt_l_d_in = std::numeric_limits<double>::min();
+ test.cvt_s_d_in = std::numeric_limits<double>::min();
+ test.cvt_s_w_in = std::numeric_limits<int32_t>::min();
+ test.cvt_s_l_in = std::numeric_limits<int64_t>::min();
+ test.cvt_w_s_in = std::numeric_limits<float>::min();
+ test.cvt_w_d_in = std::numeric_limits<double>::min();
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
+ CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
+ CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
+ CHECK_EQ(test.cvt_l_s_out, 0);
+ CHECK_EQ(test.cvt_l_d_out, 0);
+ CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
+ CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
+ CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
+ CHECK_EQ(test.cvt_w_s_out, 0);
+ CHECK_EQ(test.cvt_w_d_out, 0);
+}
+
+
+TEST(DIV_FMT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test {
+ double dOp1;
+ double dOp2;
+ double dRes;
+ float fOp1;
+ float fOp2;
+ float fRes;
+ } Test;
+
+ Test test;
+
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, dOp1)) );
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, dOp2)) );
+ __ nop();
+ __ div_d(f6, f4, f2);
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(Test, dRes)) );
+
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(Test, fOp1)) );
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(Test, fOp2)) );
+ __ nop();
+ __ div_s(f6, f4, f2);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(Test, fRes)) );
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
+
+ __ jr(ra);
+ __ nop();
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+
+ const int test_size = 3;
+
+ double dOp1[test_size] = {
+ 5.0,
+ DBL_MAX,
+ DBL_MAX,
+ };
+ double dOp2[test_size] = {
+ 2.0,
+ 2.0,
+ -DBL_MAX,
+ };
+ double dRes[test_size] = {
+ 2.5,
+ DBL_MAX / 2.0,
+ -1.0,
+ };
+ float fOp1[test_size] = {
+ 5.0,
+ FLT_MAX,
+ FLT_MAX,
+ };
+ float fOp2[test_size] = {
+ 2.0,
+ 2.0,
+ -FLT_MAX,
+ };
+ float fRes[test_size] = {
+ 2.5,
+ FLT_MAX / 2.0,
+ -1.0,
+ };
+
+ for (int i = 0; i < test_size; i++) {
+ test.dOp1 = dOp1[i];
+ test.dOp2 = dOp2[i];
+ test.fOp1 = fOp1[i];
+ test.fOp2 = fOp2[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dRes, dRes[i]);
+ CHECK_EQ(test.fRes, fRes[i]);
+ }
+
+ test.dOp1 = DBL_MAX;
+ test.dOp2 = -0.0;
+ test.fOp1 = FLT_MAX;
+ test.fOp2 = -0.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(false, std::isfinite(test.dRes));
+ CHECK_EQ(false, std::isfinite(test.fRes));
+
+ test.dOp1 = 0.0;
+ test.dOp2 = -0.0;
+ test.fOp1 = 0.0;
+ test.fOp2 = -0.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(true, std::isnan(test.dRes));
+ CHECK_EQ(true, std::isnan(test.fRes));
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = -5.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = -5.0;
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(true, std::isnan(test.dRes));
+ CHECK_EQ(true, std::isnan(test.fRes));
+}
+
+
#undef __
COMPARE(ext_(v0, v1, 0, 32),
"7c62f800 ext v0, v1, 0, 32");
}
+ COMPARE(add_s(f4, f6, f8), "46083100 add.s f4, f6, f8");
+ COMPARE(add_d(f12, f14, f16), "46307300 add.d f12, f14, f16");
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ COMPARE(bitswap(a0, a1), "7c052020 bitswap a0, a1");
+ COMPARE(bitswap(t8, s0), "7c10c020 bitswap t8, s0");
+ }
+
+ COMPARE(abs_s(f6, f8), "46004185 abs.s f6, f8");
+ COMPARE(abs_d(f10, f12), "46206285 abs.d f10, f12");
+
+ COMPARE(div_s(f2, f4, f6), "46062083 div.s f2, f4, f6");
+ COMPARE(div_d(f2, f4, f6), "46262083 div.d f2, f4, f6");
VERIFY_RUN();
}
}
VERIFY_RUN();
}
+
+
+TEST(Type2) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ SET_UP();
+
+ COMPARE(class_s(f3, f4), "460020db class.s f3, f4");
+ COMPARE(class_d(f2, f3), "4620189b class.d f2, f3");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(C_FMT_DISASM) {
+ if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)) {
+ SET_UP();
+
+ COMPARE(c_s(F, f8, f10, 0), "460a4030 c.f.s f8, f10, cc(0)");
+ COMPARE(c_d(F, f8, f10, 0), "462a4030 c.f.d f8, f10, cc(0)");
+
+ COMPARE(c_s(UN, f8, f10, 2), "460a4231 c.un.s f8, f10, cc(2)");
+ COMPARE(c_d(UN, f8, f10, 2), "462a4231 c.un.d f8, f10, cc(2)");
+
+ COMPARE(c_s(EQ, f8, f10, 4), "460a4432 c.eq.s f8, f10, cc(4)");
+ COMPARE(c_d(EQ, f8, f10, 4), "462a4432 c.eq.d f8, f10, cc(4)");
+
+ COMPARE(c_s(UEQ, f8, f10, 6), "460a4633 c.ueq.s f8, f10, cc(6)");
+ COMPARE(c_d(UEQ, f8, f10, 6), "462a4633 c.ueq.d f8, f10, cc(6)");
+
+ COMPARE(c_s(OLT, f8, f10, 0), "460a4034 c.olt.s f8, f10, cc(0)");
+ COMPARE(c_d(OLT, f8, f10, 0), "462a4034 c.olt.d f8, f10, cc(0)");
+
+ COMPARE(c_s(ULT, f8, f10, 2), "460a4235 c.ult.s f8, f10, cc(2)");
+ COMPARE(c_d(ULT, f8, f10, 2), "462a4235 c.ult.d f8, f10, cc(2)");
+
+ COMPARE(c_s(OLE, f8, f10, 4), "460a4436 c.ole.s f8, f10, cc(4)");
+ COMPARE(c_d(OLE, f8, f10, 4), "462a4436 c.ole.d f8, f10, cc(4)");
+
+ COMPARE(c_s(ULE, f8, f10, 6), "460a4637 c.ule.s f8, f10, cc(6)");
+ COMPARE(c_d(ULE, f8, f10, 6), "462a4637 c.ule.d f8, f10, cc(6)");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(COND_FMT_DISASM) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ SET_UP();
+
+ COMPARE(cmp_s(F, f6, f8, f10), "468a4180 cmp.af.s f6, f8, f10");
+ COMPARE(cmp_d(F, f6, f8, f10), "46aa4180 cmp.af.d f6, f8, f10");
+
+ COMPARE(cmp_s(UN, f6, f8, f10), "468a4181 cmp.un.s f6, f8, f10");
+ COMPARE(cmp_d(UN, f6, f8, f10), "46aa4181 cmp.un.d f6, f8, f10");
+
+ COMPARE(cmp_s(EQ, f6, f8, f10), "468a4182 cmp.eq.s f6, f8, f10");
+ COMPARE(cmp_d(EQ, f6, f8, f10), "46aa4182 cmp.eq.d f6, f8, f10");
+
+ COMPARE(cmp_s(UEQ, f6, f8, f10), "468a4183 cmp.ueq.s f6, f8, f10");
+ COMPARE(cmp_d(UEQ, f6, f8, f10), "46aa4183 cmp.ueq.d f6, f8, f10");
+
+ COMPARE(cmp_s(LT, f6, f8, f10), "468a4184 cmp.lt.s f6, f8, f10");
+ COMPARE(cmp_d(LT, f6, f8, f10), "46aa4184 cmp.lt.d f6, f8, f10");
+
+ COMPARE(cmp_s(ULT, f6, f8, f10), "468a4185 cmp.ult.s f6, f8, f10");
+ COMPARE(cmp_d(ULT, f6, f8, f10), "46aa4185 cmp.ult.d f6, f8, f10");
+
+ COMPARE(cmp_s(LE, f6, f8, f10), "468a4186 cmp.le.s f6, f8, f10");
+ COMPARE(cmp_d(LE, f6, f8, f10), "46aa4186 cmp.le.d f6, f8, f10");
+
+ COMPARE(cmp_s(ULE, f6, f8, f10), "468a4187 cmp.ule.s f6, f8, f10");
+ COMPARE(cmp_d(ULE, f6, f8, f10), "46aa4187 cmp.ule.d f6, f8, f10");
+
+ COMPARE(cmp_s(ORD, f6, f8, f10), "468a4191 cmp.or.s f6, f8, f10");
+ COMPARE(cmp_d(ORD, f6, f8, f10), "46aa4191 cmp.or.d f6, f8, f10");
+
+ COMPARE(cmp_s(UNE, f6, f8, f10), "468a4192 cmp.une.s f6, f8, f10");
+ COMPARE(cmp_d(UNE, f6, f8, f10), "46aa4192 cmp.une.d f6, f8, f10");
+
+ COMPARE(cmp_s(NE, f6, f8, f10), "468a4193 cmp.ne.s f6, f8, f10");
+ COMPARE(cmp_d(NE, f6, f8, f10), "46aa4193 cmp.ne.d f6, f8, f10");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(CVT_DISSASM) {
+ SET_UP();
+ COMPARE(cvt_d_s(f22, f24), "4600c5a1 cvt.d.s f22, f24");
+ COMPARE(cvt_d_w(f22, f24), "4680c5a1 cvt.d.w f22, f24");
+ if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
+ COMPARE(cvt_d_l(f22, f24), "46a0c5a1 cvt.d.l f22, f24");
+ }
+
+ if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
+ COMPARE(cvt_l_s(f22, f24), "4600c5a5 cvt.l.s f22, f24");
+ COMPARE(cvt_l_d(f22, f24), "4620c5a5 cvt.l.d f22, f24");
+ }
+
+ COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
+ COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
+ if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
+ COMPARE(cvt_s_l(f22, f24), "46a0c5a0 cvt.s.l f22, f24");
+ }
+
+ COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
+ COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
+
+ VERIFY_RUN();
+}
COMPARE(ext_(v0, v1, 0, 32),
"7c62f800 ext v0, v1, 0, 32");
+ COMPARE(add_s(f4, f6, f8), "46083100 add.s f4, f6, f8");
+ COMPARE(add_d(f12, f14, f16), "46307300 add.d f12, f14, f16");
+
+ if (kArchVariant == kMips64r6) {
+ COMPARE(bitswap(a0, a1), "7c052020 bitswap a0, a1");
+ COMPARE(bitswap(t8, s0), "7c10c020 bitswap t8, s0");
+ COMPARE(dbitswap(a0, a1), "7c052024 dbitswap a0, a1");
+ COMPARE(dbitswap(t8, s0), "7c10c024 dbitswap t8, s0");
+ }
+
+ COMPARE(abs_s(f6, f8), "46004185 abs.s f6, f8");
+ COMPARE(abs_d(f10, f12), "46206285 abs.d f10, f12");
+
+ COMPARE(div_s(f2, f4, f6), "46062083 div.s f2, f4, f6");
+ COMPARE(div_d(f2, f4, f6), "46262083 div.d f2, f4, f6");
+
VERIFY_RUN();
}
}
VERIFY_RUN();
}
+
+
+TEST(Type2) {
+ if (kArchVariant == kMips64r6) {
+ SET_UP();
+
+ COMPARE(class_s(f3, f4), "460020db class.s f3, f4");
+ COMPARE(class_d(f2, f3), "4620189b class.d f2, f3");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(C_FMT_DISASM) {
+ if (kArchVariant == kMips64r2) {
+ SET_UP();
+
+ COMPARE(c_s(F, f8, f10, 0), "460a4030 c.f.s f8, f10, cc(0)");
+ COMPARE(c_d(F, f8, f10, 0), "462a4030 c.f.d f8, f10, cc(0)");
+
+ COMPARE(c_s(UN, f8, f10, 2), "460a4231 c.un.s f8, f10, cc(2)");
+ COMPARE(c_d(UN, f8, f10, 2), "462a4231 c.un.d f8, f10, cc(2)");
+
+ COMPARE(c_s(EQ, f8, f10, 4), "460a4432 c.eq.s f8, f10, cc(4)");
+ COMPARE(c_d(EQ, f8, f10, 4), "462a4432 c.eq.d f8, f10, cc(4)");
+
+ COMPARE(c_s(UEQ, f8, f10, 6), "460a4633 c.ueq.s f8, f10, cc(6)");
+ COMPARE(c_d(UEQ, f8, f10, 6), "462a4633 c.ueq.d f8, f10, cc(6)");
+
+ COMPARE(c_s(OLT, f8, f10, 0), "460a4034 c.olt.s f8, f10, cc(0)");
+ COMPARE(c_d(OLT, f8, f10, 0), "462a4034 c.olt.d f8, f10, cc(0)");
+
+ COMPARE(c_s(ULT, f8, f10, 2), "460a4235 c.ult.s f8, f10, cc(2)");
+ COMPARE(c_d(ULT, f8, f10, 2), "462a4235 c.ult.d f8, f10, cc(2)");
+
+ COMPARE(c_s(OLE, f8, f10, 4), "460a4436 c.ole.s f8, f10, cc(4)");
+ COMPARE(c_d(OLE, f8, f10, 4), "462a4436 c.ole.d f8, f10, cc(4)");
+
+ COMPARE(c_s(ULE, f8, f10, 6), "460a4637 c.ule.s f8, f10, cc(6)");
+ COMPARE(c_d(ULE, f8, f10, 6), "462a4637 c.ule.d f8, f10, cc(6)");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(COND_FMT_DISASM) {
+ if (kArchVariant == kMips64r6) {
+ SET_UP();
+
+ COMPARE(cmp_s(F, f6, f8, f10), "468a4180 cmp.af.s f6, f8, f10");
+ COMPARE(cmp_d(F, f6, f8, f10), "46aa4180 cmp.af.d f6, f8, f10");
+
+ COMPARE(cmp_s(UN, f6, f8, f10), "468a4181 cmp.un.s f6, f8, f10");
+ COMPARE(cmp_d(UN, f6, f8, f10), "46aa4181 cmp.un.d f6, f8, f10");
+
+ COMPARE(cmp_s(EQ, f6, f8, f10), "468a4182 cmp.eq.s f6, f8, f10");
+ COMPARE(cmp_d(EQ, f6, f8, f10), "46aa4182 cmp.eq.d f6, f8, f10");
+
+ COMPARE(cmp_s(UEQ, f6, f8, f10), "468a4183 cmp.ueq.s f6, f8, f10");
+ COMPARE(cmp_d(UEQ, f6, f8, f10), "46aa4183 cmp.ueq.d f6, f8, f10");
+
+ COMPARE(cmp_s(LT, f6, f8, f10), "468a4184 cmp.lt.s f6, f8, f10");
+ COMPARE(cmp_d(LT, f6, f8, f10), "46aa4184 cmp.lt.d f6, f8, f10");
+
+ COMPARE(cmp_s(ULT, f6, f8, f10), "468a4185 cmp.ult.s f6, f8, f10");
+ COMPARE(cmp_d(ULT, f6, f8, f10), "46aa4185 cmp.ult.d f6, f8, f10");
+
+ COMPARE(cmp_s(LE, f6, f8, f10), "468a4186 cmp.le.s f6, f8, f10");
+ COMPARE(cmp_d(LE, f6, f8, f10), "46aa4186 cmp.le.d f6, f8, f10");
+
+ COMPARE(cmp_s(ULE, f6, f8, f10), "468a4187 cmp.ule.s f6, f8, f10");
+ COMPARE(cmp_d(ULE, f6, f8, f10), "46aa4187 cmp.ule.d f6, f8, f10");
+
+ COMPARE(cmp_s(ORD, f6, f8, f10), "468a4191 cmp.or.s f6, f8, f10");
+ COMPARE(cmp_d(ORD, f6, f8, f10), "46aa4191 cmp.or.d f6, f8, f10");
+
+ COMPARE(cmp_s(UNE, f6, f8, f10), "468a4192 cmp.une.s f6, f8, f10");
+ COMPARE(cmp_d(UNE, f6, f8, f10), "46aa4192 cmp.une.d f6, f8, f10");
+
+ COMPARE(cmp_s(NE, f6, f8, f10), "468a4193 cmp.ne.s f6, f8, f10");
+ COMPARE(cmp_d(NE, f6, f8, f10), "46aa4193 cmp.ne.d f6, f8, f10");
+
+ VERIFY_RUN();
+ }
+}
+
+
+TEST(CVT_DISSASM) {
+ SET_UP();
+ COMPARE(cvt_d_s(f22, f24), "4600c5a1 cvt.d.s f22, f24");
+ COMPARE(cvt_d_w(f22, f24), "4680c5a1 cvt.d.w f22, f24");
+ if (kArchVariant == kMips64r6 || kArchVariant == kMips64r2) {
+ COMPARE(cvt_d_l(f22, f24), "46a0c5a1 cvt.d.l f22, f24");
+ }
+
+ if (kArchVariant == kMips64r6 || kArchVariant == kMips64r2) {
+ COMPARE(cvt_l_s(f22, f24), "4600c5a5 cvt.l.s f22, f24");
+ COMPARE(cvt_l_d(f22, f24), "4620c5a5 cvt.l.d f22, f24");
+ }
+
+ COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
+ COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
+ if (kArchVariant == kMips64r6 || kArchVariant == kMips64r2) {
+ COMPARE(cvt_s_l(f22, f24), "46a0c5a0 cvt.s.l f22, f24");
+ }
+
+ COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
+ COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
+
+ VERIFY_RUN();
+}