}
+void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
+
+
+void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
+}
+
+
+void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
+
+
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
void floor_w_d(FPURegister fd, FPURegister fs);
void ceil_w_s(FPURegister fd, FPURegister fs);
void ceil_w_d(FPURegister fd, FPURegister fs);
+ void rint_s(FPURegister fd, FPURegister fs);
+ void rint_d(FPURegister fd, FPURegister fs);
+ void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
enum SecondaryField {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
+ SLL = ((0 << 3) + 0),
+ MOVCI = ((0 << 3) + 1),
+ SRL = ((0 << 3) + 2),
+ SRA = ((0 << 3) + 3),
+ SLLV = ((0 << 3) + 4),
+ SRLV = ((0 << 3) + 6),
+ SRAV = ((0 << 3) + 7),
+
+ JR = ((1 << 3) + 0),
+ JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
+ BREAK = ((1 << 3) + 5),
+
+ MFHI = ((2 << 3) + 0),
+ CLZ_R6 = ((2 << 3) + 0),
+ CLO_R6 = ((2 << 3) + 1),
+ MFLO = ((2 << 3) + 2),
+
+ MULT = ((3 << 3) + 0),
+ MULTU = ((3 << 3) + 1),
+ DIV = ((3 << 3) + 2),
+ DIVU = ((3 << 3) + 3),
+
+ ADD = ((4 << 3) + 0),
+ ADDU = ((4 << 3) + 1),
+ SUB = ((4 << 3) + 2),
+ SUBU = ((4 << 3) + 3),
+ AND = ((4 << 3) + 4),
+ OR = ((4 << 3) + 5),
+ XOR = ((4 << 3) + 6),
+ NOR = ((4 << 3) + 7),
+
+ SLT = ((5 << 3) + 2),
+ SLTU = ((5 << 3) + 3),
+
+ TGE = ((6 << 3) + 0),
+ TGEU = ((6 << 3) + 1),
+ TLT = ((6 << 3) + 2),
+ TLTU = ((6 << 3) + 3),
+ TEQ = ((6 << 3) + 4),
+ SELEQZ_S = ((6 << 3) + 5),
+ TNE = ((6 << 3) + 6),
+ SELNEZ_S = ((6 << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+ MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+ RINT = ((3 << 3) + 2),
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
+ MUL_OP = ((0 << 3) + 2),
+ MUH_OP = ((0 << 3) + 3),
+ DIV_OP = ((0 << 3) + 2),
+ MOD_OP = ((0 << 3) + 3),
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
+ DIV_MOD = ((3 << 3) + 2),
+ DIV_MOD_U = ((3 << 3) + 3),
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
+ MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- INS = ((0 << 3) + 4),
+ EXT = ((0 << 3) + 0),
+ INS = ((0 << 3) + 4),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
+ BLTZ = ((0 << 3) + 0) << 16,
+ BGEZ = ((0 << 3) + 1) << 16,
+ BLTZAL = ((2 << 3) + 0) << 16,
+ BGEZAL = ((2 << 3) + 1) << 16,
+ BGEZALL = ((2 << 3) + 3) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0 << 3) + 0) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
+ MFHC1 = ((0 << 3) + 3) << 21,
+ MTC1 = ((0 << 3) + 4) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
+ MTHC1 = ((0 << 3) + 7) << 21,
+ BC1 = ((1 << 3) + 0) << 21,
+ S = ((2 << 3) + 0) << 21,
+ D = ((2 << 3) + 1) << 21,
+ W = ((2 << 3) + 4) << 21,
+ L = ((2 << 3) + 5) << 21,
+ PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
+ CVT_D_S = ((4 << 3) + 1),
+ CVT_W_S = ((4 << 3) + 4),
+ CVT_L_S = ((4 << 3) + 5),
+ CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
+ CVT_S_D = ((4 << 3) + 0),
+ CVT_W_D = ((4 << 3) + 4),
+ CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4 << 3) + 0),
+ CVT_D_W = ((4 << 3) + 1),
+ CVT_S_L = ((4 << 3) + 0),
+ CVT_D_L = ((4 << 3) + 1),
+ BC1EQZ = ((2 << 2) + 1) << 21,
+ BC1NEZ = ((3 << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0 << 3) + 0),
+ CMP_UN = ((0 << 3) + 1),
+ CMP_EQ = ((0 << 3) + 2),
+ CMP_UEQ = ((0 << 3) + 3),
+ CMP_LT = ((0 << 3) + 4),
+ CMP_ULT = ((0 << 3) + 5),
+ CMP_LE = ((0 << 3) + 6),
+ CMP_ULE = ((0 << 3) + 7),
+ CMP_SAF = ((1 << 3) + 0),
+ CMP_SUN = ((1 << 3) + 1),
+ CMP_SEQ = ((1 << 3) + 2),
+ CMP_SUEQ = ((1 << 3) + 3),
+ CMP_SSLT = ((1 << 3) + 4),
+ CMP_SSULT = ((1 << 3) + 5),
+ CMP_SLE = ((1 << 3) + 6),
+ CMP_SULE = ((1 << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2 << 3) + 1),
+ CMP_UNE = ((2 << 3) + 2),
+ CMP_NE = ((2 << 3) + 3),
+ CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3 << 3) + 1),
+ CMP_SUNE = ((3 << 3) + 2),
+ CMP_SNE = ((3 << 3) + 3),
+ CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2 << 3) + 0),
+ SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4 << 3) + 1),
- NULLSF = 0
+ NULLSF = 0
};
bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
+ case RINT:
+ Format(instr, "rint.'t 'fd, 'fs");
+ break;
case MIN:
Format(instr, "min.'t 'fd, 'fs, 'ft");
break;
}
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
}
+void Simulator::round_according_to_fcsr(double toRound, double& rounded,
+ int32_t& rounded_int, double fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RD (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
+ case RINT: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ double result, temp, temp_result;
+ double upper = std::ceil(fs);
+ double lower = std::floor(fs);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_double(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
case SEL:
DCHECK(IsMipsArchVariant(kMips32r6));
set_fpu_register_double(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
- case CVT_W_D: // Convert double to word.
- // Rounding modes are not yet supported.
- DCHECK((FCSR_ & 3) == 0);
- // In rounding mode 0 it should behave like ROUND.
+ case CVT_W_D: { // Convert double to word.
+ double rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
case ROUND_W_D: // Round double to word (round half to even).
{
double rounded = std::floor(fs + 0.5);
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
bool set_fcsr_round_error(double original, double rounded);
-
+ void round_according_to_fcsr(double toRound, double& rounded,
+ int32_t& rounded_int, double fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
}
+void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
+
+
+void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
+
+
+void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, D, f0, fs, fd, RINT);
+}
+
+
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
void floor_w_d(FPURegister fd, FPURegister fs);
void ceil_w_s(FPURegister fd, FPURegister fs);
void ceil_w_d(FPURegister fd, FPURegister fs);
+ void rint_s(FPURegister fd, FPURegister fs);
+ void rint_d(FPURegister fd, FPURegister fs);
+ void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
+
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
enum SecondaryField {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
- DSLLV = ((2 << 3) + 4),
- DSRLV = ((2 << 3) + 6),
- DSRAV = ((2 << 3) + 7),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
- DMULT = ((3 << 3) + 4),
- DMULTU = ((3 << 3) + 5),
- DDIV = ((3 << 3) + 6),
- DDIVU = ((3 << 3) + 7),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
- DADD = ((5 << 3) + 4),
- DADDU = ((5 << 3) + 5),
- DSUB = ((5 << 3) + 6),
- DSUBU = ((5 << 3) + 7),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
-
- DSLL = ((7 << 3) + 0),
- DSRL = ((7 << 3) + 2),
- DSRA = ((7 << 3) + 3),
- DSLL32 = ((7 << 3) + 4),
- DSRL32 = ((7 << 3) + 6),
- DSRA32 = ((7 << 3) + 7),
+ SLL = ((0 << 3) + 0),
+ MOVCI = ((0 << 3) + 1),
+ SRL = ((0 << 3) + 2),
+ SRA = ((0 << 3) + 3),
+ SLLV = ((0 << 3) + 4),
+ SRLV = ((0 << 3) + 6),
+ SRAV = ((0 << 3) + 7),
+
+ JR = ((1 << 3) + 0),
+ JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
+ BREAK = ((1 << 3) + 5),
+
+ MFHI = ((2 << 3) + 0),
+ CLZ_R6 = ((2 << 3) + 0),
+ CLO_R6 = ((2 << 3) + 1),
+ MFLO = ((2 << 3) + 2),
+ DSLLV = ((2 << 3) + 4),
+ DSRLV = ((2 << 3) + 6),
+ DSRAV = ((2 << 3) + 7),
+
+ MULT = ((3 << 3) + 0),
+ MULTU = ((3 << 3) + 1),
+ DIV = ((3 << 3) + 2),
+ DIVU = ((3 << 3) + 3),
+ DMULT = ((3 << 3) + 4),
+ DMULTU = ((3 << 3) + 5),
+ DDIV = ((3 << 3) + 6),
+ DDIVU = ((3 << 3) + 7),
+
+ ADD = ((4 << 3) + 0),
+ ADDU = ((4 << 3) + 1),
+ SUB = ((4 << 3) + 2),
+ SUBU = ((4 << 3) + 3),
+ AND = ((4 << 3) + 4),
+ OR = ((4 << 3) + 5),
+ XOR = ((4 << 3) + 6),
+ NOR = ((4 << 3) + 7),
+
+ SLT = ((5 << 3) + 2),
+ SLTU = ((5 << 3) + 3),
+ DADD = ((5 << 3) + 4),
+ DADDU = ((5 << 3) + 5),
+ DSUB = ((5 << 3) + 6),
+ DSUBU = ((5 << 3) + 7),
+
+ TGE = ((6 << 3) + 0),
+ TGEU = ((6 << 3) + 1),
+ TLT = ((6 << 3) + 2),
+ TLTU = ((6 << 3) + 3),
+ TEQ = ((6 << 3) + 4),
+ SELEQZ_S = ((6 << 3) + 5),
+ TNE = ((6 << 3) + 6),
+ SELNEZ_S = ((6 << 3) + 7),
+
+ DSLL = ((7 << 3) + 0),
+ DSRL = ((7 << 3) + 2),
+ DSRA = ((7 << 3) + 3),
+ DSLL32 = ((7 << 3) + 4),
+ DSRL32 = ((7 << 3) + 6),
+ DSRA32 = ((7 << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
- D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
+ MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
+ D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U.
+ RINT = ((3 << 3) + 2),
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
+ MUL_OP = ((0 << 3) + 2),
+ MUH_OP = ((0 << 3) + 3),
+ DIV_OP = ((0 << 3) + 2),
+ MOD_OP = ((0 << 3) + 3),
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
- D_DIV_MOD = ((3 << 3) + 6),
+ DIV_MOD = ((3 << 3) + 2),
+ DIV_MOD_U = ((3 << 3) + 3),
+ D_DIV_MOD = ((3 << 3) + 6),
D_DIV_MOD_U = ((3 << 3) + 7),
// drotr in special4?
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
+ MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- DEXTM = ((0 << 3) + 1),
- DEXTU = ((0 << 3) + 2),
- DEXT = ((0 << 3) + 3),
- INS = ((0 << 3) + 4),
- DINSM = ((0 << 3) + 5),
- DINSU = ((0 << 3) + 6),
- DINS = ((0 << 3) + 7),
+ EXT = ((0 << 3) + 0),
+ DEXTM = ((0 << 3) + 1),
+ DEXTU = ((0 << 3) + 2),
+ DEXT = ((0 << 3) + 3),
+ INS = ((0 << 3) + 4),
+ DINSM = ((0 << 3) + 5),
+ DINSU = ((0 << 3) + 6),
+ DINS = ((0 << 3) + 7),
- DSBH = ((4 << 3) + 4),
+ DSBH = ((4 << 3) + 4),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
- DAHI = ((0 << 3) + 6) << 16,
- DATI = ((3 << 3) + 6) << 16,
+ BLTZ = ((0 << 3) + 0) << 16,
+ BGEZ = ((0 << 3) + 1) << 16,
+ BLTZAL = ((2 << 3) + 0) << 16,
+ BGEZAL = ((2 << 3) + 1) << 16,
+ BGEZALL = ((2 << 3) + 3) << 16,
+ DAHI = ((0 << 3) + 6) << 16,
+ DATI = ((3 << 3) + 6) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- DMFC1 = ((0 << 3) + 1) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- DMTC1 = ((0 << 3) + 5) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0 << 3) + 0) << 21,
+ DMFC1 = ((0 << 3) + 1) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
+ MFHC1 = ((0 << 3) + 3) << 21,
+ MTC1 = ((0 << 3) + 4) << 21,
+ DMTC1 = ((0 << 3) + 5) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
+ MTHC1 = ((0 << 3) + 7) << 21,
+ BC1 = ((1 << 3) + 0) << 21,
+ S = ((2 << 3) + 0) << 21,
+ D = ((2 << 3) + 1) << 21,
+ W = ((2 << 3) + 4) << 21,
+ L = ((2 << 3) + 5) << 21,
+ PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
+ CVT_D_S = ((4 << 3) + 1),
+ CVT_W_S = ((4 << 3) + 4),
+ CVT_L_S = ((4 << 3) + 5),
+ CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
+ CVT_S_D = ((4 << 3) + 0),
+ CVT_W_D = ((4 << 3) + 4),
+ CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4 << 3) + 0),
+ CVT_D_W = ((4 << 3) + 1),
+ CVT_S_L = ((4 << 3) + 0),
+ CVT_D_L = ((4 << 3) + 1),
+ BC1EQZ = ((2 << 2) + 1) << 21,
+ BC1NEZ = ((3 << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0 << 3) + 0),
+ CMP_UN = ((0 << 3) + 1),
+ CMP_EQ = ((0 << 3) + 2),
+ CMP_UEQ = ((0 << 3) + 3),
+ CMP_LT = ((0 << 3) + 4),
+ CMP_ULT = ((0 << 3) + 5),
+ CMP_LE = ((0 << 3) + 6),
+ CMP_ULE = ((0 << 3) + 7),
+ CMP_SAF = ((1 << 3) + 0),
+ CMP_SUN = ((1 << 3) + 1),
+ CMP_SEQ = ((1 << 3) + 2),
+ CMP_SUEQ = ((1 << 3) + 3),
+ CMP_SSLT = ((1 << 3) + 4),
+ CMP_SSULT = ((1 << 3) + 5),
+ CMP_SLE = ((1 << 3) + 6),
+ CMP_SULE = ((1 << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2 << 3) + 1),
+ CMP_UNE = ((2 << 3) + 2),
+ CMP_NE = ((2 << 3) + 3),
+ CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3 << 3) + 1),
+ CMP_SUNE = ((3 << 3) + 2),
+ CMP_SNE = ((3 << 3) + 3),
+ CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2 << 3) + 0),
+ SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4 << 3) + 1),
- NULLSF = 0
+ NULLSF = 0
};
bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
+ case RINT:
+ Format(instr, "rint.'t 'fd, 'fs");
+ break;
case SELEQZ_C:
Format(instr, "seleqz.'t 'fd, 'fs, 'ft");
break;
}
+// for cvt instructions only
+void Simulator::round_according_to_fcsr(double toRound, double& rounded,
+ int32_t& rounded_int, double fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int64_t value) {
pc_modified_ = true;
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
switch (instr->FunctionFieldRaw()) {
+ case RINT: {
+ DCHECK(kArchVariant == kMips64r6);
+ double result, temp, temp_result;
+ double upper = std::ceil(fs);
+ double lower = std::floor(fs);
+ switch (FCSR_ & 0x3) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_double(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
case SEL:
DCHECK(kArchVariant == kMips64r6);
set_fpu_register_double(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
- case CVT_W_D: // Convert double to word.
- // Rounding modes are not yet supported.
- DCHECK((FCSR_ & 3) == 0);
- // In rounding mode 0 it should behave like ROUND.
- // No break.
+ case CVT_W_D: { // Convert double to word.
+ double rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
case ROUND_W_D: // Round double to word (round half to even).
{
double rounded = std::floor(fs + 0.5);
case CVT_S_D: // Convert double to float (single).
set_fpu_register_float(fd_reg, static_cast<float>(fs));
break;
- case CVT_L_D: // Mips64r2: Truncate double to 64-bit long-word.
- // Rounding modes are not yet supported.
- DCHECK((FCSR_ & 3) == 0);
- // In rounding mode 0 it should behave like ROUND.
- // No break.
+ case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
+ double rounded;
+ int64_t result;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
case ROUND_L_D: { // Mips64r2 instruction.
// check error cases
double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
#undef UNSUPPORTED
-
} } // namespace v8::internal
#endif // USE_SIMULATOR
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round64_error(double original, double rounded);
+ void round_according_to_fcsr(double toRound, double& rounded,
+ int32_t& rounded_int, double fs);
+ void round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
TEST(MIPS8) {
// Test ROTR and ROTRV instructions.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
+ if (IsMipsArchVariant(kMips32r2)) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
- typedef struct {
- int32_t input;
- int32_t result_rotr_4;
- int32_t result_rotr_8;
- int32_t result_rotr_12;
- int32_t result_rotr_16;
- int32_t result_rotr_20;
- int32_t result_rotr_24;
- int32_t result_rotr_28;
- int32_t result_rotrv_4;
- int32_t result_rotrv_8;
- int32_t result_rotrv_12;
- int32_t result_rotrv_16;
- int32_t result_rotrv_20;
- int32_t result_rotrv_24;
- int32_t result_rotrv_28;
- } T;
- T t;
+ typedef struct {
+ int32_t input;
+ int32_t result_rotr_4;
+ int32_t result_rotr_8;
+ int32_t result_rotr_12;
+ int32_t result_rotr_16;
+ int32_t result_rotr_20;
+ int32_t result_rotr_24;
+ int32_t result_rotr_28;
+ int32_t result_rotrv_4;
+ int32_t result_rotrv_8;
+ int32_t result_rotrv_12;
+ int32_t result_rotrv_16;
+ int32_t result_rotrv_20;
+ int32_t result_rotrv_24;
+ int32_t result_rotrv_28;
+ } T;
+ T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0);
- // Basic word load.
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, input)) );
-
- // ROTR instruction (called through the Ror macro).
- __ Ror(t1, t0, 0x0004);
- __ Ror(t2, t0, 0x0008);
- __ Ror(t3, t0, 0x000c);
- __ Ror(t4, t0, 0x0010);
- __ Ror(t5, t0, 0x0014);
- __ Ror(t6, t0, 0x0018);
- __ Ror(t7, t0, 0x001c);
-
- // Basic word store.
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
- __ sw(t4, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
- __ sw(t5, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
- __ sw(t6, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
- __ sw(t7, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
-
- // ROTRV instruction (called through the Ror macro).
- __ li(t7, 0x0004);
- __ Ror(t1, t0, t7);
- __ li(t7, 0x0008);
- __ Ror(t2, t0, t7);
- __ li(t7, 0x000C);
- __ Ror(t3, t0, t7);
- __ li(t7, 0x0010);
- __ Ror(t4, t0, t7);
- __ li(t7, 0x0014);
- __ Ror(t5, t0, t7);
- __ li(t7, 0x0018);
- __ Ror(t6, t0, t7);
- __ li(t7, 0x001C);
- __ Ror(t7, t0, t7);
-
- // Basic word store.
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
- __ sw(t4, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
- __ sw(t5, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
- __ sw(t6, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
- __ sw(t7, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
+ // Basic word load.
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, input)) );
+
+ // ROTR instruction (called through the Ror macro).
+ __ Ror(t1, t0, 0x0004);
+ __ Ror(t2, t0, 0x0008);
+ __ Ror(t3, t0, 0x000c);
+ __ Ror(t4, t0, 0x0010);
+ __ Ror(t5, t0, 0x0014);
+ __ Ror(t6, t0, 0x0018);
+ __ Ror(t7, t0, 0x001c);
+
+ // Basic word store.
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
+ __ sw(t4, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
+ __ sw(t5, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
+ __ sw(t6, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
+ __ sw(t7, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
+
+ // ROTRV instruction (called through the Ror macro).
+ __ li(t7, 0x0004);
+ __ Ror(t1, t0, t7);
+ __ li(t7, 0x0008);
+ __ Ror(t2, t0, t7);
+ __ li(t7, 0x000C);
+ __ Ror(t3, t0, t7);
+ __ li(t7, 0x0010);
+ __ Ror(t4, t0, t7);
+ __ li(t7, 0x0014);
+ __ Ror(t5, t0, t7);
+ __ li(t7, 0x0018);
+ __ Ror(t6, t0, t7);
+ __ li(t7, 0x001C);
+ __ Ror(t7, t0, t7);
+
+ // Basic word store.
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
+ __ sw(t4, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
+ __ sw(t5, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
+ __ sw(t6, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
+ __ sw(t7, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
- CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
- CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
- CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
- CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
- CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
- CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
-
- CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
- CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
- CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
- CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
- CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
- CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
- CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.input = 0x12345678;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
+
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
+ }
}
18446744073709551621.0, -18446744073709551621.0};
double tests[test_size*2] = {2.8, 2.9, -2.8, -2.9,
18446744073709551616.0, 18446744073709555712.0};
- for (int j=0;j < test_size;j+=2) {
- for (int i=0;i < input_size;i++) {
+ for (int j=0; j < test_size; j+=2) {
+ for (int i=0; i < input_size; i++) {
test.e = inputs[i];
test.f = tests[j];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
__ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
- __ min(D, f10, f8, f4);
- __ max(D, f12, f8, f4);
+ __ min(D, f10, f4, f8);
+ __ max(D, f12, f4, f8);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, d)));
__ jr(ra);
}
+TEST(MIPS18) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ const int tableLength = 30;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ int fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RN[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RZ[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RP[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RM[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ rint_d(f8, f4);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs[i];
+ std::cout << j << " " << i << "\n";
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+ }
+}
+
+
+TEST(MIPS19) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ int32_t b;
+ int32_t fcsr;
+ }Test;
+ const int tableLength = 24;
+ double inputs[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, 2147483653.0
+ };
+ double outputs_RN[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RZ[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RP[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RM[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(Test, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ cvt_w_d(f8, f4);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs[i];
+ std::cout << i << " " << j << "\n";
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+}
+
+
TEST(jump_tables1) {
// Test jump tables with forward jumps.
CcTest::InitializeVM();
TEST(MIPS8) {
- // Test ROTR and ROTRV instructions.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
+ if (kArchVariant == kMips64r2) {
+ // Test ROTR and ROTRV instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
- typedef struct {
- int32_t input;
- int32_t result_rotr_4;
- int32_t result_rotr_8;
- int32_t result_rotr_12;
- int32_t result_rotr_16;
- int32_t result_rotr_20;
- int32_t result_rotr_24;
- int32_t result_rotr_28;
- int32_t result_rotrv_4;
- int32_t result_rotrv_8;
- int32_t result_rotrv_12;
- int32_t result_rotrv_16;
- int32_t result_rotrv_20;
- int32_t result_rotrv_24;
- int32_t result_rotrv_28;
- } T;
- T t;
+ typedef struct {
+ int32_t input;
+ int32_t result_rotr_4;
+ int32_t result_rotr_8;
+ int32_t result_rotr_12;
+ int32_t result_rotr_16;
+ int32_t result_rotr_20;
+ int32_t result_rotr_24;
+ int32_t result_rotr_28;
+ int32_t result_rotrv_4;
+ int32_t result_rotrv_8;
+ int32_t result_rotrv_12;
+ int32_t result_rotrv_16;
+ int32_t result_rotrv_20;
+ int32_t result_rotrv_24;
+ int32_t result_rotrv_28;
+ } T;
+ T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0);
- // Basic word load.
- __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
-
- // ROTR instruction (called through the Ror macro).
- __ Ror(a5, a4, 0x0004);
- __ Ror(a6, a4, 0x0008);
- __ Ror(a7, a4, 0x000c);
- __ Ror(t0, a4, 0x0010);
- __ Ror(t1, a4, 0x0014);
- __ Ror(t2, a4, 0x0018);
- __ Ror(t3, a4, 0x001c);
-
- // Basic word store.
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
-
- // ROTRV instruction (called through the Ror macro).
- __ li(t3, 0x0004);
- __ Ror(a5, a4, t3);
- __ li(t3, 0x0008);
- __ Ror(a6, a4, t3);
- __ li(t3, 0x000C);
- __ Ror(a7, a4, t3);
- __ li(t3, 0x0010);
- __ Ror(t0, a4, t3);
- __ li(t3, 0x0014);
- __ Ror(t1, a4, t3);
- __ li(t3, 0x0018);
- __ Ror(t2, a4, t3);
- __ li(t3, 0x001C);
- __ Ror(t3, a4, t3);
-
- // Basic word store.
- __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
- __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
- __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
+ // Basic word load.
+ __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
+
+ // ROTR instruction (called through the Ror macro).
+ __ Ror(a5, a4, 0x0004);
+ __ Ror(a6, a4, 0x0008);
+ __ Ror(a7, a4, 0x000c);
+ __ Ror(t0, a4, 0x0010);
+ __ Ror(t1, a4, 0x0014);
+ __ Ror(t2, a4, 0x0018);
+ __ Ror(t3, a4, 0x001c);
+
+ // Basic word store.
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
+
+ // ROTRV instruction (called through the Ror macro).
+ __ li(t3, 0x0004);
+ __ Ror(a5, a4, t3);
+ __ li(t3, 0x0008);
+ __ Ror(a6, a4, t3);
+ __ li(t3, 0x000C);
+ __ Ror(a7, a4, t3);
+ __ li(t3, 0x0010);
+ __ Ror(t0, a4, t3);
+ __ li(t3, 0x0014);
+ __ Ror(t1, a4, t3);
+ __ li(t3, 0x0018);
+ __ Ror(t2, a4, t3);
+ __ li(t3, 0x001C);
+ __ Ror(t3, a4, t3);
+
+ // Basic word store.
+ __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
+ __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
+ __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
- CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
- CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
- CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
- CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
- CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
- CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
-
- CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
- CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
- CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
- CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
- CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
- CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
- CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.input = 0x12345678;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
+
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
+ }
}
__ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
- __ min(D, f10, f8, f4);
- __ max(D, f12, f8, f4);
+ __ min(D, f10, f4, f8);
+ __ max(D, f12, f4, f8);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, d)));
__ jr(ra);
}
+TEST(MIPS19) {
+ if (kArchVariant == kMips64r6) {
+ const int tableLength = 30;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ int fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RN[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RZ[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RP[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RM[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ ctc1(t0, FCSR);
+ __ rint_d(f8, f4);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ for (int j = 0;j < 4;j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0;i < tableLength;i++) {
+ test.a = inputs[i];
+ std::cout << j << " " << i << "\n";
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+ }
+}
+
+
+TEST(MIPS20) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ int32_t b;
+ int fcsr;
+ }Test;
+ const int tableLength = 24;
+ double inputs[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, 2147483653.0
+ };
+ double outputs_RN[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RZ[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RP[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RM[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(Test, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ cvt_w_d(f8, f4);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int j = 0;j < 4;j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0;i < tableLength;i++) {
+ test.a = inputs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+}
+
+
+TEST(MIPS21) {
+ if (kArchVariant == kMips64r6) {
+ const int tableLength = 30;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ int fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 6.27463370218383111104242366943E-307,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RN[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RZ[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RP[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ double outputs_RM[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ rint_d(f8, f4);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int j = 0;j < 4;j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0;i < tableLength;i++) {
+ test.a = inputs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+ }
+}
+
+
+
+
TEST(jump_tables1) {
// Test jump tables with forward jumps.
CcTest::InitializeVM();
COMPARE(min(D, f3, f4, f5), "462520dc min.d f3, f4, f5");
COMPARE(max(D, f3, f4, f5), "462520de max.d f3, f4, f5");
+ COMPARE(rint_d(f8, f6), "4620321a rint.d f8, f6");
+
VERIFY_RUN();
}
}
COMPARE(min(D, f3, f4, f5), "462520dc min.d f3, f4, f5");
COMPARE(max(D, f3, f4, f5), "462520de max.d f3, f4, f5");
+ COMPARE(rint_d(f8, f6), "4620321a rint.d f8, f6");
VERIFY_RUN();
}
}