}
-void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
-
- Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | SEL;
- emit(instr);
-}
-
-
void Assembler::seleqz(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
-void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
-}
-
-
-void Assembler::selnez(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
-}
-
-
-void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
-}
-
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (!IsMipsArchVariant(kMips32r6)) {
}
+void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
+}
+
+
+void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(S, fd, fs, ft);
+}
+
+
+void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
+}
+
+
+void Assembler::selnez(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
+}
+
+
+void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
+}
+
+
+void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(S, fd, fs, ft);
+}
+
+
+void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(D, fd, fs, ft);
+}
+
+
+void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(S, fd, fs, ft);
+}
+
+
+void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
}
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
}
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
}
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
}
void Assembler::abs_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
}
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_S);
+}
+
+
+void Assembler::mov_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
}
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
}
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
}
}
+void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
+}
+
+
+void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
+}
+
+
+void Assembler::recip_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
+}
+
+
+void Assembler::recip_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
+}
+
+
// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
void seleqz(Register rd, Register rs, Register rt);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
void selnez(Register rd, Register rs, Register rt);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
-
+ void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
+
+ void movz_s(FPURegister fd, FPURegister fs, Register rt);
+ void movz_d(FPURegister fd, FPURegister fs, Register rt);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movn_s(FPURegister fd, FPURegister fs, Register rt);
+ void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
+ void mov_s(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
+ void rsqrt_s(FPURegister fd, FPURegister fs);
+ void rsqrt_d(FPURegister fd, FPURegister fs);
+ void recip_d(FPURegister fd, FPURegister fs);
+ void recip_s(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+ ADD_S = ((0 << 3) + 0),
+ SUB_S = ((0 << 3) + 1),
+ MUL_S = ((0 << 3) + 2),
+ DIV_S = ((0 << 3) + 3),
+ ABS_S = ((0 << 3) + 5),
+ SQRT_S = ((0 << 3) + 4),
+ MOV_S = ((0 << 3) + 6),
+ NEG_S = ((0 << 3) + 7),
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
+ RECIP_S = ((2 << 3) + 5),
+ RSQRT_S = ((2 << 3) + 6),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
+ RECIP_D = ((2 << 3) + 5),
+ RSQRT_D = ((2 << 3) + 6),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
SEL = ((2 << 3) + 0),
+ MOVZ_C = ((2 << 3) + 2),
+ MOVN_C = ((2 << 3) + 3),
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
case SELNEZ_C:
Format(instr, "selnez.'t 'fd, 'fs, 'ft");
break;
+ case MOVZ_C:
+ Format(instr, "movz.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVN_C:
+ Format(instr, "movn.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVF:
+ if (instr->Bit(16)) {
+ Format(instr, "movt.'t 'fd, 'fs, 'Cc");
+ } else {
+ Format(instr, "movf.'t 'fd, 'fs, 'Cc");
+ }
+ break;
case ADD_D:
Format(instr, "add.'t 'fd, 'fs, 'ft");
break;
case SQRT_D:
Format(instr, "sqrt.'t 'fd, 'fs");
break;
+ case RECIP_D:
+ Format(instr, "recip.'t 'fd, 'fs");
+ break;
+ case RSQRT_D:
+ Format(instr, "rsqrt.'t 'fd, 'fs");
+ break;
case CVT_W_D:
Format(instr, "cvt.w.'t 'fd, 'fs");
break;
case ROUND_W_D:
Format(instr, "round.w.'t 'fd, 'fs");
break;
+ case ROUND_L_D:
+ Format(instr, "round.l.'t 'fd, 'fs");
+ break;
case FLOOR_W_D:
Format(instr, "floor.w.'t 'fd, 'fs");
break;
+ case FLOOR_L_D:
+ Format(instr, "floor.l.'t 'fd, 'fs");
+ break;
case CEIL_W_D:
Format(instr, "ceil.w.'t 'fd, 'fs");
break;
+ case CEIL_L_D:
+ Format(instr, "ceil.l.'t 'fd, 'fs");
+ break;
case CVT_S_D:
Format(instr, "cvt.s.'t 'fd, 'fs");
break;
}
-// Sets the rounding error codes in FCSR based on the result of the rounding.
-// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
bool ret = false;
double max_int32 = std::numeric_limits<int32_t>::max();
ret = true;
}
- if (rounded > max_int32 || rounded < min_int32) {
+ if (rounded >= max_int32 || rounded <= min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded <= min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int32 || rounded <= min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded <= min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
uint32_t cc, fcsr_cc;
int64_t i64;
fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
+ if (instr->FunctionFieldRaw() != MOVF) {
+ ft = get_fpu_register_double(ft_reg);
+ }
+ fd = get_fpu_register_double(fd_reg);
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
cc = instr->FCccValue();
DCHECK(IsMipsArchVariant(kMips32r6));
set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
break;
+ case MOVZ_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
fs = get_fpu_register_double(fs_reg);
set_fpu_register_double(fd_reg, (fs >= ft) ? ft : fs);
}
break;
+ case MINA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
fs = get_fpu_register_double(fs_reg);
case SQRT_D:
set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_D: {
+ double result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
+ case RECIP_D: {
+ double result = 1.0 / fs;
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
- i64 = static_cast<int64_t>(rounded);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
}
- case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(std::floor(fs));
+ case FLOOR_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::floor(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
- case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(std::ceil(fs));
+ }
+ case CEIL_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::ceil(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
+ }
case C_F_D:
UNIMPLEMENTED_MIPS();
break;
const int32_t& ft_reg,
const int32_t& fs_reg,
const int32_t& fd_reg) {
- float fs, ft;
+ float fs, ft, fd;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
- int64_t ft_int = static_cast<int64_t>(get_fpu_register_double(ft_reg));
+ fd = get_fpu_register_float(fd_reg);
+ int32_t ft_int = bit_cast<int32_t>(ft);
+ int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case ADD_D:
+ case RINT: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fs);
+ float lower = std::floor(fs);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_float(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case ADD_S:
set_fpu_register_float(fd_reg, fs + ft);
break;
- case SUB_D:
+ case SUB_S:
set_fpu_register_float(fd_reg, fs - ft);
break;
- case MUL_D:
+ case MUL_S:
set_fpu_register_float(fd_reg, fs * ft);
break;
- case DIV_D:
+ case DIV_S:
set_fpu_register_float(fd_reg, fs / ft);
break;
- case ABS_D:
+ case ABS_S:
set_fpu_register_float(fd_reg, fabs(fs));
break;
- case MOV_D:
+ case MOV_S:
set_fpu_register_float(fd_reg, fs);
break;
- case NEG_D:
+ case NEG_S:
set_fpu_register_float(fd_reg, -fs);
break;
- case SQRT_D:
+ case SQRT_S:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_S: {
+ float result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case RECIP_S: {
+ float result = 1.0 / fs;
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case CVT_D_S:
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
+ case SEL:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ break;
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(
- fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_double(fs_reg) : 0.0);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(
- fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_double(fs_reg) : 0.0);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case MOVZ_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case TRUNC_W_S: { // Truncate single to word (round towards 0).
+ float rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case TRUNC_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = trunc(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case FLOOR_W_S: // Round double to word towards negative infinity.
+ {
+ float rounded = std::floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case FLOOR_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::floor(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case ROUND_W_S: {
+ float rounded = std::floor(fs + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ROUND_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case CEIL_W_S: // Round double to word towards positive infinity.
+ {
+ float rounded = std::ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::ceil(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case MIN:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ }
+ break;
+ case MAX:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ }
+ break;
+ case MINA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
break;
default:
- // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // CVT_W_S CVT_L_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
void set_fcsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs);
// Special case of set_register and get_register to access the raw PC value.
}
-void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
-
- Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | SEL;
- emit(instr);
-}
-
-
-void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
-}
-
-
-void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
-}
-
-
// GPR.
void Assembler::seleqz(Register rd, Register rs, Register rt) {
DCHECK(kArchVariant == kMips64r6);
}
-// FPR.
-void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
-}
-
-
// GPR.
void Assembler::selnez(Register rd, Register rs, Register rt) {
DCHECK(kArchVariant == kMips64r6);
}
-// FPR.
-void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
-}
-
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
}
+void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
+}
+
+
+void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(S, fd, fs, ft);
+}
+
+
+void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(D, fd, fs, ft);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+// FPR.
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
+}
+
+
+void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(S, fd, fs, ft);
+}
+
+
+void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(D, fd, fs, ft);
+}
+
+
+void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(S, fd, fs, ft);
+}
+
+
+void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
+}
+
+
+// FPR.
+void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
+}
+
+
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
}
+void Assembler::mov_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
+}
+
+
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
}
-// Conversions.
+void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
+}
+
+void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
+}
+
+
+void Assembler::recip_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
+}
+
+
+void Assembler::recip_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
+}
+
+
+// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
}
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r6);
- GenInstrRegister(COP1, D, f0, fs, fd, RINT);
+ GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
}
-void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
-void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(is_uint3(cc));
+ DCHECK(fmt == S || fmt == D);
DCHECK((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
void seleqz(Register rd, Register rs, Register rt);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
void selnez(Register rs, Register rt, Register rd);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
+ void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
+
+ void movz_s(FPURegister fd, FPURegister fs, Register rt);
+ void movz_d(FPURegister fd, FPURegister fs, Register rt);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movn_s(FPURegister fd, FPURegister fs, Register rt);
+ void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
+ void mov_s(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
+ void rsqrt_s(FPURegister fd, FPURegister fs);
+ void rsqrt_d(FPURegister fd, FPURegister fs);
+ void recip_d(FPURegister fd, FPURegister fs);
+ void recip_s(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+ ADD_S = ((0 << 3) + 0),
+ SUB_S = ((0 << 3) + 1),
+ MUL_S = ((0 << 3) + 2),
+ DIV_S = ((0 << 3) + 3),
+ ABS_S = ((0 << 3) + 5),
+ SQRT_S = ((0 << 3) + 4),
+ MOV_S = ((0 << 3) + 6),
+ NEG_S = ((0 << 3) + 7),
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
+ RECIP_S = ((2 << 3) + 5),
+ RSQRT_S = ((2 << 3) + 6),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
+ RECIP_D = ((2 << 3) + 5),
+ RSQRT_D = ((2 << 3) + 6),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
+ MIN = ((3 << 3) + 4),
+ MINA = ((3 << 3) + 5),
+ MAX = ((3 << 3) + 6),
+ MAXA = ((3 << 3) + 7),
SEL = ((2 << 3) + 0),
+ MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ MOVZ_C = ((2 << 3) + 2), // COP1 on FPR registers.
+ MOVN_C = ((2 << 3) + 3), // COP1 on FPR registers.
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
case RINT:
Format(instr, "rint.'t 'fd, 'fs");
break;
+ case SEL:
+ Format(instr, "sel.'t 'fd, 'fs, 'ft");
+ break;
case SELEQZ_C:
Format(instr, "seleqz.'t 'fd, 'fs, 'ft");
break;
case SELNEZ_C:
Format(instr, "selnez.'t 'fd, 'fs, 'ft");
break;
+ case MOVZ_C:
+ Format(instr, "movz.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVN_C:
+ Format(instr, "movn.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVF:
+ if (instr->Bit(16)) {
+ Format(instr, "movt.'t 'fd, 'fs, 'Cc");
+ } else {
+ Format(instr, "movf.'t 'fd, 'fs, 'Cc");
+ }
+ break;
case MIN:
Format(instr, "min.'t 'fd, 'fs, 'ft");
break;
case SQRT_D:
Format(instr, "sqrt.'t 'fd, 'fs");
break;
+ case RECIP_D:
+ Format(instr, "recip.'t 'fd, 'fs");
+ break;
+ case RSQRT_D:
+ Format(instr, "rsqrt.'t 'fd, 'fs");
+ break;
case CVT_W_D:
Format(instr, "cvt.w.'t 'fd, 'fs");
break;
}
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
ret = true;
}
- if (rounded > max_int32 || rounded < min_int32) {
+ if (rounded >= max_int32 || rounded <= min_int32) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded <= min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int32 || rounded <= min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded <= min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
const int32_t& fs_reg,
const int32_t& ft_reg,
const int32_t& fd_reg) {
- float fs, ft;
+ float fs, ft, fd;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
+ fd = get_fpu_register_float(fd_reg);
+ int32_t ft_int = bit_cast<int32_t>(ft);
+ int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case ADD_D:
+ case RINT: {
+ DCHECK(kArchVariant == kMips64r6);
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fs);
+ float lower = std::floor(fs);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_float(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case ADD_S:
set_fpu_register_float(fd_reg, fs + ft);
break;
- case SUB_D:
+ case SUB_S:
set_fpu_register_float(fd_reg, fs - ft);
break;
- case MUL_D:
+ case MUL_S:
set_fpu_register_float(fd_reg, fs * ft);
break;
- case DIV_D:
+ case DIV_S:
set_fpu_register_float(fd_reg, fs / ft);
break;
- case ABS_D:
+ case ABS_S:
set_fpu_register_float(fd_reg, fabs(fs));
break;
- case MOV_D:
+ case MOV_S:
set_fpu_register_float(fd_reg, fs);
break;
- case NEG_D:
+ case NEG_S:
set_fpu_register_float(fd_reg, -fs);
break;
- case SQRT_D:
+ case SQRT_S:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_S: {
+ float result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case RECIP_S: {
+ float result = 1.0 / fs;
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case CVT_D_S:
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
+ case TRUNC_W_S: { // Truncate single to word (round towards 0).
+ float rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case TRUNC_L_S: { // Mips64r2 instruction.
+ float rounded = trunc(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case ROUND_W_S: {
+ float rounded = std::floor(fs + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ROUND_L_S: { // Mips64r2 instruction.
+ float rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case FLOOR_L_S: { // Mips64r2 instruction.
+ float rounded = floor(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case FLOOR_W_S: // Round double to word towards negative infinity.
+ {
+ float rounded = std::floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_W_S: // Round double to word towards positive infinity.
+ {
+ float rounded = std::ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_L_S: { // Mips64r2 instruction.
+ float rounded = ceil(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case MINA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MIN:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ }
+ break;
+ case MAX:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ }
+ break;
+ case SEL:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ break;
+ case SELEQZ_C:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case SELNEZ_C:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case MOVZ_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
default:
// CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
double ft, fs, fd;
uint32_t cc, fcsr_cc;
fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
+ if (instr->FunctionFieldRaw() != MOVF) {
+ ft = get_fpu_register_double(ft_reg);
+ }
fd = get_fpu_register_double(fd_reg);
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
double result, temp, temp_result;
double upper = std::ceil(fs);
double lower = std::floor(fs);
- switch (FCSR_ & 0x3) {
+ switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
if (upper - fs < fs - lower) {
result = upper;
DCHECK(kArchVariant == kMips64r6);
set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
break;
+ case MOVZ_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MINA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
fs = get_fpu_register_double(fs_reg);
case SQRT_D:
set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_D: {
+ double result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
+ case RECIP_D: {
+ double result = 1.0 / fs;
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
break;
}
case ROUND_L_D: { // Mips64r2 instruction.
- // check error cases
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded = std::floor(fs + 0.5);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ set_fpu_register(fd_reg, i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPU64InvalidResult);
}
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs);
void round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs);
-
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
int64_t get_pc() const;
TEST(MIPS10) {
- // Test conversions between doubles and long integers.
- // Test hos the long ints map to FP regs pairs.
+ // Test conversions between doubles and words.
+ // Test maps double to FP reg pairs in fp32 mode
+ // and into FP reg in fp64 mode.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
if (!IsMipsArchVariant(kMips32r2)) return;
// Load all structure elements to registers.
+ // (f0, f1) = a (fp32), f0 = a (fp64)
__ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
- // Save the raw bits of the double.
- __ mfc1(t0, f0);
- __ mfc1(t1, f1);
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
- __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+ if (IsFp64Mode()) {
+ __ mfc1(t0, f0); // t0 = f0(31..0)
+ __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant))); // dbl_mant = t0
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp))); // dbl_exp = t1
+ } else {
+ // Save the raw bits of the double.
+ __ mfc1(t0, f0); // t0 = a1
+ __ mfc1(t1, f1); // t1 = a2
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant))); // dbl_mant = t0
+ __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp))); // dbl_exp = t1
+ }
- // Convert double in f0 to long, save hi/lo parts.
- __ cvt_w_d(f0, f0);
- __ mfc1(t0, f0); // f0 has a 32-bits word.
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
+ // Convert double in f0 to word, save hi/lo parts.
+ __ cvt_w_d(f0, f0); // a_word = (word)a
+ __ mfc1(t0, f0); // f0 has a 32-bits word. t0 = a_word
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, word))); // word = a_word
- // Convert the b long integers to double b.
+ // Convert the b word to double b.
__ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
__ mtc1(t0, f8); // f8 has a 32-bits word.
__ cvt_d_w(f10, f8);
t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
-
CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
CHECK_EQ(static_cast<int32_t>(0xFF800000), t.dbl_mant);
CHECK_EQ(static_cast<int32_t>(0x7FFFFFFE), t.word);
}
-TEST(MIPS16) {
+// ----------------------mips32r6 specific tests----------------------
+TEST(seleqz_selnez) {
if (IsMipsArchVariant(kMips32r6)) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
double f;
double g;
double h;
+ float i;
+ float j;
+ float k;
+ float l;
} Test;
Test test;
// Floating point part of test.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(Test, e)) ); // src
__ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, f)) ); // test
- __ seleqz(D, f4, f0, f2);
- __ selnez(D, f6, f0, f2);
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(Test, i)) ); // src
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(Test, j)) ); // test
+ __ seleqz_d(f4, f0, f2);
+ __ selnez_d(f6, f0, f2);
+ __ seleqz_s(f12, f8, f10);
+ __ selnez_s(f14, f8, f10);
__ sdc1(f4, MemOperand(a0, OFFSET_OF(Test, g)) ); // src
__ sdc1(f6, MemOperand(a0, OFFSET_OF(Test, h)) ); // src
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(Test, k)) ); // src
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(Test, l)) ); // src
__ jr(ra);
__ nop();
CodeDesc desc;
const int test_size = 3;
const int input_size = 5;
- double inputs[input_size] = {0.0, 65.2, -70.32,
+ double inputs_D[input_size] = {0.0, 65.2, -70.32,
18446744073709551621.0, -18446744073709551621.0};
- double outputs[input_size] = {0.0, 65.2, -70.32,
+ double outputs_D[input_size] = {0.0, 65.2, -70.32,
18446744073709551621.0, -18446744073709551621.0};
- double tests[test_size*2] = {2.8, 2.9, -2.8, -2.9,
+ double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9,
18446744073709551616.0, 18446744073709555712.0};
+ float inputs_S[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float outputs_S[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8,
+ 18446744073709551616.0, 18446746272732807168.0};
for (int j=0; j < test_size; j+=2) {
for (int i=0; i < input_size; i++) {
- test.e = inputs[i];
- test.f = tests[j];
+ test.e = inputs_D[i];
+ test.f = tests_D[j];
+ test.i = inputs_S[i];
+ test.j = tests_S[j];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.g, outputs[i]);
+ CHECK_EQ(test.g, outputs_D[i]);
CHECK_EQ(test.h, 0);
+ CHECK_EQ(test.k, outputs_S[i]);
+ CHECK_EQ(test.l, 0);
- test.f = tests[j+1];
+ test.f = tests_D[j+1];
+ test.j = tests_S[j+1];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, 0);
- CHECK_EQ(test.h, outputs[i]);
+ CHECK_EQ(test.h, outputs_D[i]);
+ CHECK_EQ(test.k, 0);
+ CHECK_EQ(test.l, outputs_S[i]);
}
}
}
}
-TEST(MIPS17) {
+TEST(min_max) {
if (IsMipsArchVariant(kMips32r6)) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
double b;
double c;
double d;
+ float e;
+ float f;
+ float g;
+ float h;
} TestFloat;
TestFloat test;
+ const double dblNaN = std::numeric_limits<double>::quiet_NaN();
+ const float fltNaN = std::numeric_limits<float>::quiet_NaN();
+ const int tableLength = 5;
+ double inputsa[tableLength] = {2.0, 3.0, dblNaN, 3.0, dblNaN};
+ double inputsb[tableLength] = {3.0, 2.0, 3.0, dblNaN, dblNaN};
+ double outputsdmin[tableLength] = {2.0, 2.0, 3.0, 3.0, dblNaN};
+ double outputsdmax[tableLength] = {3.0, 3.0, 3.0, 3.0, dblNaN};
+
+ float inputse[tableLength] = {2.0, 3.0, fltNaN, 3.0, fltNaN};
+ float inputsf[tableLength] = {3.0, 2.0, 3.0, fltNaN, fltNaN};
+ float outputsfmin[tableLength] = {2.0, 2.0, 3.0, 3.0, fltNaN};
+ float outputsfmax[tableLength] = {3.0, 3.0, 3.0, 3.0, fltNaN};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, e)));
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, f)));
__ min_d(f10, f4, f8);
__ max_d(f12, f4, f8);
+ __ min_s(f14, f2, f6);
+ __ max_s(f16, f2, f6);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, d)));
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, g)));
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, h)));
__ jr(ra);
__ nop();
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- test.a = 2.0; // a goes to fs
- test.b = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 2.0);
- CHECK_EQ(test.d, 3.0);
-
- test.a = 3.0; // a goes to fs
- test.b = 2.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 2.0);
- CHECK_EQ(test.d, 3.0);
-
- test.a = std::numeric_limits<double>::quiet_NaN();
- test.b = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 3.0);
- CHECK_EQ(test.d, 3.0);
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
- test.b = std::numeric_limits<double>::quiet_NaN();
- test.a = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 3.0);
- CHECK_EQ(test.d, 3.0);
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- test.a = std::numeric_limits<double>::quiet_NaN();
- test.b = std::numeric_limits<double>::quiet_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- DCHECK(std::isnan(test.c));
- DCHECK(std::isnan(test.d));
+ if (i < tableLength - 1) {
+ CHECK_EQ(test.c, outputsdmin[i]);
+ CHECK_EQ(test.d, outputsdmax[i]);
+ CHECK_EQ(test.g, outputsfmin[i]);
+ CHECK_EQ(test.h, outputsfmax[i]);
+ } else {
+ DCHECK(std::isnan(test.c));
+ DCHECK(std::isnan(test.d));
+ DCHECK(std::isnan(test.g));
+ DCHECK(std::isnan(test.h));
+ }
+ }
}
}
-TEST(MIPS18) {
+TEST(rint_d) {
if (IsMipsArchVariant(kMips32r6)) {
const int tableLength = 30;
CcTest::InitializeVM();
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < tableLength; i++) {
test.a = inputs[i];
- std::cout << j << " " << i << "\n";
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
-TEST(MIPS19) {
+TEST(sel) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test {
+ double dd;
+ double ds;
+ double dt;
+ float fd;
+ float fs;
+ float ft;
+ } Test;
+
+ Test test;
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(Test, dd)) ); // test
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, ds)) ); // src1
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, dt)) ); // src2
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, fd)) ); // test
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(Test, fs)) ); // src1
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(Test, ft)) ); // src2
+ __ sel_d(f0, f2, f4);
+ __ sel_s(f6, f8, f10);
+ __ sdc1(f0, MemOperand(a0, OFFSET_OF(Test, dd)) );
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(Test, fd)) );
+ __ jr(ra);
+ __ nop();
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ const int test_size = 3;
+ const int input_size = 5;
+
+ double inputs_dt[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ double inputs_ds[input_size] = {0.1, 69.88, -91.325,
+ 18446744073709551625.0, -18446744073709551625.0};
+ float inputs_ft[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float inputs_fs[input_size] = {0.1, 69.88, -91.325,
+ 18446744073709551625.0, -18446744073709551625.0};
+ double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9,
+ 18446744073709551616.0, 18446744073709555712.0};
+ float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8,
+ 18446744073709551616.0, 18446746272732807168.0};
+ for (int j=0; j < test_size; j+=2) {
+ for (int i=0; i < input_size; i++) {
+ test.dt = inputs_dt[i];
+ test.dd = tests_D[j];
+ test.ds = inputs_ds[i];
+ test.ft = inputs_ft[i];
+ test.fd = tests_S[j];
+ test.fs = inputs_fs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dd, inputs_ds[i]);
+ CHECK_EQ(test.fd, inputs_fs[i]);
+
+ test.dd = tests_D[j+1];
+ test.fd = tests_S[j+1];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dd, inputs_dt[i]);
+ CHECK_EQ(test.fd, inputs_ft[i]);
+ }
+ }
+ }
+}
+
+
+TEST(rint_s) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ const int tableLength = 30;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float b;
+ int fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ float inputs[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ float outputs_RN[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ float outputs_RZ[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ float outputs_RP[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ float outputs_RM[tableLength] = {18446744073709551617.0,
+ 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ rint_s(f8, f4);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+ }
+}
+
+
+TEST(mina_maxa) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ const int tableLength = 12;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ double resd;
+ double resd1;
+ float c;
+ float d;
+ float resf;
+ float resf1;
+ }TestFloat;
+
+ TestFloat test;
+ double inputsa[tableLength] = {
+ 5.3, 4.8, 6.1,
+ 9.8, 9.8, 9.8,
+ -10.0, -8.9, -9.8,
+ -10.0, -8.9, -9.8
+ };
+ double inputsb[tableLength] = {
+ 4.8, 5.3, 6.1,
+ -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8,
+ -9.8, -11.2, -9.8
+ };
+ double resd[tableLength] = {
+ 4.8, 4.8, 6.1,
+ 9.8, -8.9, 9.8,
+ 9.8, -8.9, 9.8,
+ -9.8, -8.9, -9.8
+ };
+ double resd1[tableLength] = {
+ 5.3, 5.3, 6.1,
+ -10.0, 9.8, 9.8,
+ -10.0, 9.8, 9.8,
+ -10.0, -11.2, -9.8
+ };
+ float inputsc[tableLength] = {
+ 5.3, 4.8, 6.1,
+ 9.8, 9.8, 9.8,
+ -10.0, -8.9, -9.8,
+ -10.0, -8.9, -9.8
+ };
+ float inputsd[tableLength] = {
+ 4.8, 5.3, 6.1,
+ -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8,
+ -9.8, -11.2, -9.8
+ };
+ float resf[tableLength] = {
+ 4.8, 4.8, 6.1,
+ 9.8, -8.9, 9.8,
+ 9.8, -8.9, 9.8,
+ -9.8, -8.9, -9.8
+ };
+ float resf1[tableLength] = {
+ 5.3, 5.3, 6.1,
+ -10.0, 9.8, 9.8,
+ -10.0, 9.8, 9.8,
+ -10.0, -11.2, -9.8
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ mina_d(f6, f2, f4);
+ __ mina_s(f12, f8, f10);
+ __ maxa_d(f14, f2, f4);
+ __ maxa_s(f16, f8, f10);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resf)) );
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resd)) );
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, resf1)) );
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, resd1)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.c = inputsc[i];
+ test.d = inputsd[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resd, resd[i]);
+ CHECK_EQ(test.resf, resf[i]);
+ CHECK_EQ(test.resd1, resd1[i]);
+ CHECK_EQ(test.resf1, resf1[i]);
+ }
+ }
+}
+
+
+// ----------------------mips32r2 specific tests----------------------
+TEST(trunc_l) {
+ if (IsMipsArchVariant(kMips32r2) && IsFp64Mode()) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c; // a trunc result
+ int64_t d; // b trunc result
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ trunc_l_d(f8, f4);
+ __ trunc_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+ }
+}
+
+
+TEST(movz_movn) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ int64_t rt;
+ double a;
+ double b;
+ double bold;
+ double b1;
+ double bold1;
+ float c;
+ float d;
+ float dold;
+ float d1;
+ float dold1;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, rt)) );
+ __ li(t1, 0x0);
+ __ mtc1(t1, f12);
+ __ mtc1(t1, f10);
+ __ mtc1(t1, f16);
+ __ mtc1(t1, f14);
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, bold)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dold)) );
+ __ sdc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, bold1)) );
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, dold1)) );
+ __ movz_s(f10, f6, t0);
+ __ movz_d(f12, f2, t0);
+ __ movn_s(f14, f6, t0);
+ __ movn_d(f16, f2, t0);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, d1)) );
+ __ sdc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, b1)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.c = inputs_S[i];
+
+ test.rt = 1;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, test.bold);
+ CHECK_EQ(test.d, test.dold);
+ CHECK_EQ(test.b1, outputs_D[i]);
+ CHECK_EQ(test.d1, outputs_S[i]);
+
+ test.rt = 0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_S[i]);
+ CHECK_EQ(test.b1, test.bold1);
+ CHECK_EQ(test.d1, test.dold1);
+ }
+ }
+}
+
+
+TEST(movt_movd) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+
+ typedef struct test_float {
+ double srcd;
+ double dstd;
+ double dstdold;
+ double dstd1;
+ double dstdold1;
+ float srcf;
+ float dstf;
+ float dstfold;
+ float dstf1;
+ float dstfold1;
+ int32_t cc;
+ int32_t fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 20.8, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.88, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.88, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 20.8, -2.9
+ };
+ int condition_flags[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ for (int i = 0; i < tableLength; i++) {
+ test.srcd = inputs_D[i];
+ test.srcf = inputs_S[i];
+
+ for (int j = 0; j< 8; j++) {
+ test.cc = condition_flags[j];
+ if (test.cc == 0) {
+ test.fcsr = 1 << 23;
+ } else {
+ test.fcsr = 1 << (24+condition_flags[j]);
+ }
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, srcd)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, srcf)) );
+ __ lw(t1, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ cfc1(t0, FCSR);
+ __ ctc1(t1, FCSR);
+ __ li(t2, 0x0);
+ __ mtc1(t2, f12);
+ __ mtc1(t2, f10);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstdold)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstfold)) );
+ __ movt_s(f12, f4, test.cc);
+ __ movt_d(f10, f2, test.cc);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstf)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstd)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstdold1)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstfold1)) );
+ __ movf_s(f12, f4, test.cc);
+ __ movf_d(f10, f2, test.cc);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstf1)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstd1)) );
+ __ ctc1(t0, FCSR);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dstf, outputs_S[i]);
+ CHECK_EQ(test.dstd, outputs_D[i]);
+ CHECK_EQ(test.dstf1, test.dstfold1);
+ CHECK_EQ(test.dstd1, test.dstdold1);
+ test.fcsr = 0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dstf, test.dstfold);
+ CHECK_EQ(test.dstd, test.dstdold);
+ CHECK_EQ(test.dstf1, outputs_S[i]);
+ CHECK_EQ(test.dstd1, outputs_D[i]);
+ }
+ }
+ }
+}
+
+
+// ----------------------tests for all archs--------------------------
+TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < tableLength; i++) {
test.a = inputs[i];
- std::cout << i << " " << j << "\n";
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
+TEST(trunc_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a trunc result
+ int32_t d; // b trunc result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ trunc_w_d(f8, f4);
+ __ trunc_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(round_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a trunc result
+ int32_t d; // b trunc result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ round_w_d(f8, f4);
+ __ round_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(round_l) {
+ if (IsFp64Mode()) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ round_l_d(f8, f4);
+ __ round_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+ }
+}
+
+
+TEST(sub) {
+ const int tableLength = 12;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputfs_D[tableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ double inputft_D[tableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ double outputs_D[tableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ float inputfs_S[tableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ float inputft_S[tableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ float outputs_S[tableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ ldc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sub_s(f6, f2, f4);
+ __ sub_d(f12, f8, f10);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+ }
+}
+
+
+TEST(sqrt_rsqrt_recip) {
+ const int tableLength = 4;
+ const double deltaDouble = 2E-15;
+ const float deltaFloat = 2E-7;
+ const float sqrt2_s = sqrt(2);
+ const double sqrt2_d = sqrt(2);
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float resultS;
+ float resultS1;
+ float resultS2;
+ double c;
+ double resultD;
+ double resultD1;
+ double resultD2;
+ }TestFloat;
+ TestFloat test;
+
+ double inputs_D[tableLength] = {
+ 0.0L, 4.0L, 2.0L, 4e-28L
+ };
+
+ double outputs_D[tableLength] = {
+ 0.0L, 2.0L, sqrt2_d, 2e-14L
+ };
+ float inputs_S[tableLength] = {
+ 0.0, 4.0, 2.0, 4e-28
+ };
+
+ float outputs_S[tableLength] = {
+ 0.0, 2.0, sqrt2_s, 2e-14
+ };
+
+
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ sqrt_s(f6, f2);
+ __ sqrt_d(f12, f8);
+ __ rsqrt_d(f14, f8);
+ __ rsqrt_s(f16, f2);
+ __ recip_d(f18, f8);
+ __ recip_s(f20, f2);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, resultS1)) );
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, resultD1)) );
+ __ swc1(f20, MemOperand(a0, OFFSET_OF(TestFloat, resultS2)) );
+ __ sdc1(f18, MemOperand(a0, OFFSET_OF(TestFloat, resultD2)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ for (int i = 0; i < tableLength; i++) {
+ float f1;
+ double d1;
+ test.a = inputs_S[i];
+ test.c = inputs_D[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+
+ if (i != 0) {
+ f1 = test.resultS1 - 1.0F/outputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD1 - 1.0L/outputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ f1 = test.resultS2 - 1.0F/inputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD2 - 1.0L/inputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ } else {
+ CHECK_EQ(test.resultS1, 1.0F/outputs_S[i]);
+ CHECK_EQ(test.resultD1, 1.0L/outputs_D[i]);
+ CHECK_EQ(test.resultS2, 1.0F/inputs_S[i]);
+ CHECK_EQ(test.resultD2, 1.0L/inputs_D[i]);
+ }
+ }
+}
+
+
+TEST(neg) {
+ const int tableLength = 3;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float resultS;
+ double c;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 0.0, 4.0, -2.0
+ };
+
+ double outputs_D[tableLength] = {
+ 0.0, -4.0, 2.0
+ };
+ float inputs_S[tableLength] = {
+ 0.0, 4.0, -2.0
+ };
+
+ float outputs_S[tableLength] = {
+ 0.0, -4.0, 2.0
+ };
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ neg_s(f6, f2);
+ __ neg_d(f12, f8);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_S[i];
+ test.c = inputs_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+ }
+}
+
+
+TEST(mul) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputfs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputft_D[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float inputfs_S[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ float inputft_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ mul_s(f10, f2, f4);
+ __ mul_d(f12, f6, f8);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
+ CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
+ }
+}
+
+
+TEST(mov) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ float c;
+ float d;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ mov_s(f18, f6);
+ __ mov_d(f20, f2);
+ __ swc1(f18, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sdc1(f20, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.c = inputs_S[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_S[i]);
+ }
+}
+
+
+TEST(floor_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a floor result
+ int32_t d; // b floor result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ floor_w_d(f8, f4);
+ __ floor_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(floor_l) {
+ if (IsFp64Mode()) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ floor_l_d(f8, f4);
+ __ floor_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+ }
+}
+
+
+TEST(ceil_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a floor result
+ int32_t d; // b floor result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ceil_w_d(f8, f4);
+ __ ceil_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(ceil_l) {
+ if (IsFp64Mode()) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ceil_l_d(f8, f4);
+ __ ceil_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+ }
+}
+
+
TEST(jump_tables1) {
// Test jump tables with forward jumps.
CcTest::InitializeVM();
}
-TEST(MIPS17) {
+// ----------------------mips32r6 specific tests----------------------
+TEST(seleqz_selnez) {
if (kArchVariant == kMips64r6) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
double f;
double g;
double h;
+ float i;
+ float j;
+ float k;
+ float l;
} Test;
Test test;
// Floating point part of test.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(Test, e)) ); // src
__ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, f)) ); // test
- __ seleqz(D, f4, f0, f2);
- __ selnez(D, f6, f0, f2);
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(Test, i)) ); // src
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(Test, j)) ); // test
+ __ seleqz_d(f4, f0, f2);
+ __ selnez_d(f6, f0, f2);
+ __ seleqz_s(f12, f8, f10);
+ __ selnez_s(f14, f8, f10);
__ sdc1(f4, MemOperand(a0, OFFSET_OF(Test, g)) ); // src
__ sdc1(f6, MemOperand(a0, OFFSET_OF(Test, h)) ); // src
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(Test, k)) ); // src
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(Test, l)) ); // src
__ jr(ra);
__ nop();
CodeDesc desc;
const int test_size = 3;
const int input_size = 5;
- double inputs[input_size] = {0.0, 65.2, -70.32,
+ double inputs_D[input_size] = {0.0, 65.2, -70.32,
18446744073709551621.0, -18446744073709551621.0};
- double outputs[input_size] = {0.0, 65.2, -70.32,
+ double outputs_D[input_size] = {0.0, 65.2, -70.32,
18446744073709551621.0, -18446744073709551621.0};
- double tests[test_size*2] = {2.8, 2.9, -2.8, -2.9,
+ double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9,
18446744073709551616.0, 18446744073709555712.0};
- for (int j=0;j < test_size;j+=2) {
- for (int i=0;i < input_size;i++) {
- test.e = inputs[i];
- test.f = tests[j];
+ float inputs_S[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float outputs_S[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8,
+ 18446744073709551616.0, 18446746272732807168.0};
+ for (int j=0; j < test_size; j+=2) {
+ for (int i=0; i < input_size; i++) {
+ test.e = inputs_D[i];
+ test.f = tests_D[j];
+ test.i = inputs_S[i];
+ test.j = tests_S[j];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.g, outputs[i]);
+ CHECK_EQ(test.g, outputs_D[i]);
CHECK_EQ(test.h, 0);
+ CHECK_EQ(test.k, outputs_S[i]);
+ CHECK_EQ(test.l, 0);
- test.f = tests[j+1];
+ test.f = tests_D[j+1];
+ test.j = tests_S[j+1];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, 0);
- CHECK_EQ(test.h, outputs[i]);
+ CHECK_EQ(test.h, outputs_D[i]);
+ CHECK_EQ(test.k, 0);
+ CHECK_EQ(test.l, outputs_S[i]);
}
}
}
}
-TEST(MIPS18) {
+
+TEST(min_max) {
if (kArchVariant == kMips64r6) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
double b;
double c;
double d;
+ float e;
+ float f;
+ float g;
+ float h;
} TestFloat;
TestFloat test;
+ const double dblNaN = std::numeric_limits<double>::quiet_NaN();
+ const float fltNaN = std::numeric_limits<float>::quiet_NaN();
+ const int tableLength = 5;
+ double inputsa[tableLength] = {2.0, 3.0, dblNaN, 3.0, dblNaN};
+ double inputsb[tableLength] = {3.0, 2.0, 3.0, dblNaN, dblNaN};
+ double outputsdmin[tableLength] = {2.0, 2.0, 3.0, 3.0, dblNaN};
+ double outputsdmax[tableLength] = {3.0, 3.0, 3.0, 3.0, dblNaN};
+
+ float inputse[tableLength] = {2.0, 3.0, fltNaN, 3.0, fltNaN};
+ float inputsf[tableLength] = {3.0, 2.0, 3.0, fltNaN, fltNaN};
+ float outputsfmin[tableLength] = {2.0, 2.0, 3.0, 3.0, fltNaN};
+ float outputsfmax[tableLength] = {3.0, 3.0, 3.0, 3.0, fltNaN};
__ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)));
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, e)));
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, f)));
__ min_d(f10, f4, f8);
__ max_d(f12, f4, f8);
+ __ min_s(f14, f2, f6);
+ __ max_s(f16, f2, f6);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, d)));
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, g)));
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, h)));
__ jr(ra);
__ nop();
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- test.a = 2.0; // a goes to fs
- test.b = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 2.0);
- CHECK_EQ(test.d, 3.0);
-
- test.a = 3.0; // a goes to fs
- test.b = 2.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 2.0);
- CHECK_EQ(test.d, 3.0);
-
- test.a = std::numeric_limits<double>::quiet_NaN();
- test.b = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 3.0);
- CHECK_EQ(test.d, 3.0);
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
- test.b = std::numeric_limits<double>::quiet_NaN();
- test.a = 3.0; // b goes to ft
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, 3.0);
- CHECK_EQ(test.d, 3.0);
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- test.a = std::numeric_limits<double>::quiet_NaN();
- test.b = std::numeric_limits<double>::quiet_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- DCHECK(std::isnan(test.c));
- DCHECK(std::isnan(test.d));
+ if (i < tableLength - 1) {
+ CHECK_EQ(test.c, outputsdmin[i]);
+ CHECK_EQ(test.d, outputsdmax[i]);
+ CHECK_EQ(test.g, outputsfmin[i]);
+ CHECK_EQ(test.h, outputsfmax[i]);
+ } else {
+ DCHECK(std::isnan(test.c));
+ DCHECK(std::isnan(test.d));
+ DCHECK(std::isnan(test.g));
+ DCHECK(std::isnan(test.h));
+ }
+ }
}
}
-TEST(MIPS19) {
+TEST(rint_d) {
if (kArchVariant == kMips64r6) {
const int tableLength = 30;
CcTest::InitializeVM();
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int j = 0;j < 4;j++) {
+ for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0;i < tableLength;i++) {
+ for (int i = 0; i < tableLength; i++) {
test.a = inputs[i];
- std::cout << j << " " << i << "\n";
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
-TEST(MIPS20) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+TEST(sel) {
+ if (kArchVariant == kMips64r6) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
- typedef struct test_float {
- double a;
- int32_t b;
- int fcsr;
- }Test;
- const int tableLength = 24;
- double inputs[tableLength] = {
- 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
- -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
- 2147483637.0, 2147483638.0, 2147483639.0,
- 2147483640.0, 2147483641.0, 2147483642.0,
- 2147483643.0, 2147483644.0, 2147483645.0,
- 2147483646.0, 2147483647.0, 2147483653.0
- };
- double outputs_RN[tableLength] = {
- 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
- -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
- 2147483637.0, 2147483638.0, 2147483639.0,
- 2147483640.0, 2147483641.0, 2147483642.0,
- 2147483643.0, 2147483644.0, 2147483645.0,
- 2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RZ[tableLength] = {
- 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
- -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
- 2147483637.0, 2147483638.0, 2147483639.0,
- 2147483640.0, 2147483641.0, 2147483642.0,
- 2147483643.0, 2147483644.0, 2147483645.0,
- 2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RP[tableLength] = {
- 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
- -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
- 2147483637.0, 2147483638.0, 2147483639.0,
- 2147483640.0, 2147483641.0, 2147483642.0,
- 2147483643.0, 2147483644.0, 2147483645.0,
- 2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RM[tableLength] = {
- 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
- -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
- 2147483637.0, 2147483638.0, 2147483639.0,
- 2147483640.0, 2147483641.0, 2147483642.0,
- 2147483643.0, 2147483644.0, 2147483645.0,
- 2147483646.0, 2147483647.0, kFPUInvalidResult};
- int fcsr_inputs[4] =
- {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
- double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
- __ lw(t0, MemOperand(a0, OFFSET_OF(Test, fcsr)) );
- __ cfc1(t1, FCSR);
- __ ctc1(t0, FCSR);
- __ cvt_w_d(f8, f4);
- __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, b)) );
- __ ctc1(t1, FCSR);
- __ jr(ra);
- __ nop();
- Test test;
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int j = 0;j < 4;j++) {
- test.fcsr = fcsr_inputs[j];
- for (int i = 0;i < tableLength;i++) {
- test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.b, outputs[j][i]);
+ typedef struct test {
+ double dd;
+ double ds;
+ double dt;
+ float fd;
+ float fs;
+ float ft;
+ } Test;
+
+ Test test;
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(Test, dd)) ); // test
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(Test, ds)) ); // src1
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, dt)) ); // src2
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, fd)) ); // test
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(Test, fs)) ); // src1
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(Test, ft)) ); // src2
+ __ sel_d(f0, f2, f4);
+ __ sel_s(f6, f8, f10);
+ __ sdc1(f0, MemOperand(a0, OFFSET_OF(Test, dd)) );
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(Test, fd)) );
+ __ jr(ra);
+ __ nop();
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ const int test_size = 3;
+ const int input_size = 5;
+
+ double inputs_dt[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ double inputs_ds[input_size] = {0.1, 69.88, -91.325,
+ 18446744073709551625.0, -18446744073709551625.0};
+ float inputs_ft[input_size] = {0.0, 65.2, -70.32,
+ 18446744073709551621.0, -18446744073709551621.0};
+ float inputs_fs[input_size] = {0.1, 69.88, -91.325,
+ 18446744073709551625.0, -18446744073709551625.0};
+ double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9,
+ 18446744073709551616.0, 18446744073709555712.0};
+ float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8,
+ 18446744073709551616.0, 18446746272732807168.0};
+ for (int j=0; j < test_size; j+=2) {
+ for (int i=0; i < input_size; i++) {
+ test.dt = inputs_dt[i];
+ test.dd = tests_D[j];
+ test.ds = inputs_ds[i];
+ test.ft = inputs_ft[i];
+ test.fd = tests_S[j];
+ test.fs = inputs_fs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dd, inputs_ds[i]);
+ CHECK_EQ(test.fd, inputs_fs[i]);
+
+ test.dd = tests_D[j+1];
+ test.fd = tests_S[j+1];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dd, inputs_dt[i]);
+ CHECK_EQ(test.fd, inputs_ft[i]);
+ }
}
}
}
-TEST(MIPS21) {
+TEST(rint_s) {
if (kArchVariant == kMips64r6) {
const int tableLength = 30;
CcTest::InitializeVM();
MacroAssembler assm(isolate, NULL, 0);
typedef struct test_float {
- double a;
- double b;
+ float a;
+ float b;
int fcsr;
}TestFloat;
TestFloat test;
- double inputs[tableLength] = {18446744073709551617.0,
+ float inputs[tableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
- 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
- 1.7976931348623157E308, 6.27463370218383111104242366943E-307,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
309485009821345068724781056.89,
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RN[tableLength] = {18446744073709551617.0,
+ float outputs_RN[tableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
- 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
- 1.7976931348623157E308, 0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
309485009821345068724781057.0,
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RZ[tableLength] = {18446744073709551617.0,
+ float outputs_RZ[tableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
- 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
- 1.7976931348623157E308, 0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
309485009821345068724781057.0,
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RP[tableLength] = {18446744073709551617.0,
+ float outputs_RP[tableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
- 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
- 1.7976931348623157E308, 1,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 1,
309485009821345068724781057.0,
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RM[tableLength] = {18446744073709551617.0,
+ float outputs_RM[tableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
- 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
- 1.7976931348623157E308, 0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
309485009821345068724781057.0,
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
37778931862957161709586.0, 37778931862957161709587.0};
int fcsr_inputs[4] =
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
- double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
__ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
__ cfc1(t1, FCSR);
__ ctc1(t0, FCSR);
- __ rint_d(f8, f4);
- __ sdc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ rint_s(f8, f4);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
__ ctc1(t1, FCSR);
__ jr(ra);
__ nop();
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int j = 0;j < 4;j++) {
+
+ for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0;i < tableLength;i++) {
+ for (int i = 0; i < tableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
+TEST(mina_maxa) {
+ if (kArchVariant == kMips64r6) {
+ const int tableLength = 12;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ double resd;
+ double resd1;
+ float c;
+ float d;
+ float resf;
+ float resf1;
+ }TestFloat;
+
+ TestFloat test;
+ double inputsa[tableLength] = {
+ 5.3, 4.8, 6.1,
+ 9.8, 9.8, 9.8,
+ -10.0, -8.9, -9.8,
+ -10.0, -8.9, -9.8
+ };
+ double inputsb[tableLength] = {
+ 4.8, 5.3, 6.1,
+ -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8,
+ -9.8, -11.2, -9.8
+ };
+ double resd[tableLength] = {
+ 4.8, 4.8, 6.1,
+ 9.8, -8.9, 9.8,
+ 9.8, -8.9, 9.8,
+ -9.8, -8.9, -9.8
+ };
+ double resd1[tableLength] = {
+ 5.3, 5.3, 6.1,
+ -10.0, 9.8, 9.8,
+ -10.0, 9.8, 9.8,
+ -10.0, -11.2, -9.8
+ };
+ float inputsc[tableLength] = {
+ 5.3, 4.8, 6.1,
+ 9.8, 9.8, 9.8,
+ -10.0, -8.9, -9.8,
+ -10.0, -8.9, -9.8
+ };
+ float inputsd[tableLength] = {
+ 4.8, 5.3, 6.1,
+ -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8,
+ -9.8, -11.2, -9.8
+ };
+ float resf[tableLength] = {
+ 4.8, 4.8, 6.1,
+ 9.8, -8.9, 9.8,
+ 9.8, -8.9, 9.8,
+ -9.8, -8.9, -9.8
+ };
+ float resf1[tableLength] = {
+ 5.3, 5.3, 6.1,
+ -10.0, 9.8, 9.8,
+ -10.0, 9.8, 9.8,
+ -10.0, -11.2, -9.8
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ lwc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ lwc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ mina_d(f6, f2, f4);
+ __ mina_s(f12, f8, f10);
+ __ maxa_d(f14, f2, f4);
+ __ maxa_s(f16, f8, f10);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resf)) );
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resd)) );
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, resf1)) );
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, resd1)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.c = inputsc[i];
+ test.d = inputsd[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+
+ CHECK_EQ(test.resd, resd[i]);
+ CHECK_EQ(test.resf, resf[i]);
+ CHECK_EQ(test.resd1, resd1[i]);
+ CHECK_EQ(test.resf1, resf1[i]);
+ }
+ }
+}
+
+
+
+// ----------------------mips32r2 specific tests----------------------
+TEST(trunc_l) {
+ if (kArchVariant == kMips64r2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c; // a trunc result
+ int64_t d; // b trunc result
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ trunc_l_d(f8, f4);
+ __ trunc_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+ }
+}
+
+
+TEST(movz_movn) {
+ if (kArchVariant == kMips64r2) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ int64_t rt;
+ double a;
+ double b;
+ double bold;
+ double b1;
+ double bold1;
+ float c;
+ float d;
+ float dold;
+ float d1;
+ float dold1;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(TestFloat, rt)) );
+ __ li(t1, 0x0);
+ __ mtc1(t1, f12);
+ __ mtc1(t1, f10);
+ __ mtc1(t1, f16);
+ __ mtc1(t1, f14);
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, bold)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dold)) );
+ __ sdc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, bold1)) );
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, dold1)) );
+ __ movz_s(f10, f6, t0);
+ __ movz_d(f12, f2, t0);
+ __ movn_s(f14, f6, t0);
+ __ movn_d(f16, f2, t0);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ swc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, d1)) );
+ __ sdc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, b1)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.c = inputs_S[i];
+
+ test.rt = 1;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, test.bold);
+ CHECK_EQ(test.d, test.dold);
+ CHECK_EQ(test.b1, outputs_D[i]);
+ CHECK_EQ(test.d1, outputs_S[i]);
+
+ test.rt = 0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_S[i]);
+ CHECK_EQ(test.b1, test.bold1);
+ CHECK_EQ(test.d1, test.dold1);
+ }
+ }
+}
+
+
+TEST(movt_movd) {
+ if (kArchVariant == kMips64r2) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ typedef struct test_float {
+ double srcd;
+ double dstd;
+ double dstdold;
+ double dstd1;
+ double dstdold1;
+ float srcf;
+ float dstf;
+ float dstfold;
+ float dstf1;
+ float dstfold1;
+ int32_t cc;
+ int32_t fcsr;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 20.8, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.88, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.88, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 20.8, -2.9
+ };
+ int condition_flags[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ for (int i = 0; i < tableLength; i++) {
+ test.srcd = inputs_D[i];
+ test.srcf = inputs_S[i];
+
+ for (int j = 0; j< 8; j++) {
+ test.cc = condition_flags[j];
+ if (test.cc == 0) {
+ test.fcsr = 1 << 23;
+ } else {
+ test.fcsr = 1 << (24+condition_flags[j]);
+ }
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, srcd)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, srcf)) );
+ __ lw(t1, MemOperand(a0, OFFSET_OF(TestFloat, fcsr)) );
+ __ cfc1(t0, FCSR);
+ __ ctc1(t1, FCSR);
+ __ li(t2, 0x0);
+ __ mtc1(t2, f12);
+ __ mtc1(t2, f10);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstdold)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstfold)) );
+ __ movt_s(f12, f4, test.cc);
+ __ movt_d(f10, f2, test.cc);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstf)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstd)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstdold1)) );
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstfold1)) );
+ __ movf_s(f12, f4, test.cc);
+ __ movf_d(f10, f2, test.cc);
+ __ swc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, dstf1)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, dstd1)) );
+ __ ctc1(t0, FCSR);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dstf, outputs_S[i]);
+ CHECK_EQ(test.dstd, outputs_D[i]);
+ CHECK_EQ(test.dstf1, test.dstfold1);
+ CHECK_EQ(test.dstd1, test.dstdold1);
+ test.fcsr = 0;
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.dstf, test.dstfold);
+ CHECK_EQ(test.dstd, test.dstdold);
+ CHECK_EQ(test.dstf1, outputs_S[i]);
+ CHECK_EQ(test.dstd1, outputs_D[i]);
+ }
+ }
+ }
+}
+
+
+
+// ----------------------tests for all archs--------------------------
+TEST(cvt_w_d) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ int32_t b;
+ int fcsr;
+ }Test;
+ const int tableLength = 24;
+ double inputs[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, 2147483653.0
+ };
+ double outputs_RN[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RZ[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RP[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ double outputs_RM[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483637.0, 2147483638.0, 2147483639.0,
+ 2147483640.0, 2147483641.0, 2147483642.0,
+ 2147483643.0, 2147483644.0, 2147483645.0,
+ 2147483646.0, 2147483647.0, kFPUInvalidResult};
+ int fcsr_inputs[4] =
+ {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
+ double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(Test, fcsr)) );
+ __ cfc1(t1, FCSR);
+ __ ctc1(t0, FCSR);
+ __ cvt_w_d(f8, f4);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ctc1(t1, FCSR);
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs[j][i]);
+ }
+ }
+}
+
+
+TEST(trunc_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a trunc result
+ int32_t d; // b trunc result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ trunc_w_d(f8, f4);
+ __ trunc_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(round_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a trunc result
+ int32_t d; // b trunc result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ round_w_d(f8, f4);
+ __ round_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(round_l) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ round_l_d(f8, f4);
+ __ round_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ std::cout<< i<< "\n";
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(sub) {
+ const int tableLength = 12;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputfs_D[tableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ double inputft_D[tableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ double outputs_D[tableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ float inputfs_S[tableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ float inputft_S[tableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ float outputs_S[tableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ ldc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sub_s(f6, f2, f4);
+ __ sub_d(f12, f8, f10);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+ }
+}
+
+
+TEST(sqrt_rsqrt_recip) {
+ const int tableLength = 4;
+ const double deltaDouble = 2E-15;
+ const float deltaFloat = 2E-7;
+ const float sqrt2_s = sqrt(2);
+ const double sqrt2_d = sqrt(2);
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float resultS;
+ float resultS1;
+ float resultS2;
+ double c;
+ double resultD;
+ double resultD1;
+ double resultD2;
+ }TestFloat;
+ TestFloat test;
+
+ double inputs_D[tableLength] = {
+ 0.0L, 4.0L, 2.0L, 4e-28L
+ };
+
+ double outputs_D[tableLength] = {
+ 0.0L, 2.0L, sqrt2_d, 2e-14L
+ };
+ float inputs_S[tableLength] = {
+ 0.0, 4.0, 2.0, 4e-28
+ };
+
+ float outputs_S[tableLength] = {
+ 0.0, 2.0, sqrt2_s, 2e-14
+ };
+
+
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ sqrt_s(f6, f2);
+ __ sqrt_d(f12, f8);
+ __ rsqrt_d(f14, f8);
+ __ rsqrt_s(f16, f2);
+ __ recip_d(f18, f8);
+ __ recip_s(f20, f2);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ swc1(f16, MemOperand(a0, OFFSET_OF(TestFloat, resultS1)) );
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(TestFloat, resultD1)) );
+ __ swc1(f20, MemOperand(a0, OFFSET_OF(TestFloat, resultS2)) );
+ __ sdc1(f18, MemOperand(a0, OFFSET_OF(TestFloat, resultD2)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ for (int i = 0; i < tableLength; i++) {
+ float f1;
+ double d1;
+ test.a = inputs_S[i];
+ test.c = inputs_D[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+
+ if (i != 0) {
+ f1 = test.resultS1 - 1.0F/outputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD1 - 1.0L/outputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ f1 = test.resultS2 - 1.0F/inputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD2 - 1.0L/inputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ } else {
+ CHECK_EQ(test.resultS1, 1.0F/outputs_S[i]);
+ CHECK_EQ(test.resultD1, 1.0L/outputs_D[i]);
+ CHECK_EQ(test.resultS2, 1.0F/inputs_S[i]);
+ CHECK_EQ(test.resultD2, 1.0L/inputs_D[i]);
+ }
+ }
+}
+
+
+TEST(neg) {
+ const int tableLength = 2;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float resultS;
+ double c;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 4.0, -2.0
+ };
+
+ double outputs_D[tableLength] = {
+ -4.0, 2.0
+ };
+ float inputs_S[tableLength] = {
+ 4.0, -2.0
+ };
+
+ float outputs_S[tableLength] = {
+ -4.0, 2.0
+ };
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ neg_s(f6, f2);
+ __ neg_d(f12, f8);
+ __ swc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_S[i];
+ test.c = inputs_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+ }
+}
+
+
+
+TEST(mul) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ }TestFloat;
+
+ TestFloat test;
+ double inputfs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputft_D[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float inputfs_S[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ float inputft_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ __ lwc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f4, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ ldc1(f8, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ mul_s(f10, f2, f4);
+ __ mul_d(f12, f6, f8);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(TestFloat, resultS)) );
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(TestFloat, resultD)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
+ CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
+ }
+}
+
+
+TEST(mov) {
+ const int tableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ double b;
+ float c;
+ float d;
+ }TestFloat;
+
+ TestFloat test;
+ double inputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float outputs_S[tableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+ double outputs_D[tableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+
+ __ ldc1(f2, MemOperand(a0, OFFSET_OF(TestFloat, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(TestFloat, c)) );
+ __ mov_s(f18, f6);
+ __ mov_d(f20, f2);
+ __ swc1(f18, MemOperand(a0, OFFSET_OF(TestFloat, d)) );
+ __ sdc1(f20, MemOperand(a0, OFFSET_OF(TestFloat, b)) );
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.c = inputs_S[i];
+
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.b, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_S[i]);
+ }
+}
+
+
+TEST(floor_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a floor result
+ int32_t d; // b floor result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ floor_w_d(f8, f4);
+ __ floor_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(floor_l) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ floor_l_d(f8, f4);
+ __ floor_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(ceil_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+
+ typedef struct test_float {
+ double a;
+ float b;
+ int32_t c; // a floor result
+ int32_t d; // b floor result
+ }Test;
+ const int tableLength = 15;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, kFPUInvalidResult,
+ kFPUInvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ceil_w_d(f8, f4);
+ __ ceil_w_s(f10, f6);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
+
+
+TEST(ceil_l) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0);
+ const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
+ typedef struct test_float {
+ double a;
+ float b;
+ int64_t c;
+ int64_t d;
+ }Test;
+ const int tableLength = 16;
+ double inputs_D[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ 9223372036854775808.0
+ };
+ float inputs_S[tableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ 9223372036854775808.0
+ };
+ double outputs[tableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult, dFPU64InvalidResult};
+
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(Test, a)) );
+ __ lwc1(f6, MemOperand(a0, OFFSET_OF(Test, b)) );
+ __ ceil_l_d(f8, f4);
+ __ ceil_l_s(f10, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(Test, c)) );
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(Test, d)) );
+ __ jr(ra);
+ __ nop();
+ Test test;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < tableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ CHECK_EQ(test.c, outputs[i]);
+ CHECK_EQ(test.d, test.c);
+ }
+}
TEST(jump_tables1) {
}
-// Tests only seleqz, selnez, seleqz.fmt and selnez.fmt
TEST(Type1) {
+ SET_UP();
if (IsMipsArchVariant(kMips32r6)) {
- SET_UP();
COMPARE(seleqz(a0, a1, a2), "00a62035 seleqz a0, a1, a2");
COMPARE(selnez(a0, a1, a2), "00a62037 selnez a0, a1, a2");
- COMPARE(seleqz(D, f3, f4, f5), "462520d4 seleqz.d f3, f4, f5");
- COMPARE(selnez(D, f3, f4, f5), "462520d7 selnez.d f3, f4, f5");
+ COMPARE(seleqz_d(f3, f4, f5), "462520d4 seleqz.d f3, f4, f5");
+ COMPARE(selnez_d(f3, f4, f5), "462520d7 selnez.d f3, f4, f5");
+ COMPARE(seleqz_s(f3, f4, f5), "460520d4 seleqz.s f3, f4, f5");
+ COMPARE(selnez_s(f3, f4, f5), "460520d7 selnez.s f3, f4, f5");
COMPARE(min_d(f3, f4, f5), "462520dc min.d f3, f4, f5");
COMPARE(max_d(f3, f4, f5), "462520de max.d f3, f4, f5");
+
+ COMPARE(sel_s(f3, f4, f5), "460520d0 sel.s f3, f4, f5");
+ COMPARE(sel_d(f3, f4, f5), "462520d0 sel.d f3, f4, f5");
+
COMPARE(rint_d(f8, f6), "4620321a rint.d f8, f6");
+ COMPARE(rint_s(f8, f6), "4600321a rint.s f8, f6");
+
+ COMPARE(min_s(f3, f4, f5), "460520dc min.s f3, f4, f5");
+ COMPARE(max_s(f3, f4, f5), "460520de max.s f3, f4, f5");
- VERIFY_RUN();
+ COMPARE(mina_d(f3, f4, f5), "462520dd mina.d f3, f4, f5");
+ COMPARE(mina_s(f3, f4, f5), "460520dd mina.s f3, f4, f5");
+
+ COMPARE(maxa_d(f3, f4, f5), "462520df maxa.d f3, f4, f5");
+ COMPARE(maxa_s(f3, f4, f5), "460520df maxa.s f3, f4, f5");
}
+
+ COMPARE(trunc_w_d(f8, f6), "4620320d trunc.w.d f8, f6");
+ COMPARE(trunc_w_s(f8, f6), "4600320d trunc.w.s f8, f6");
+
+ COMPARE(round_w_s(f8, f6), "4600320c round.w.s f8, f6");
+ COMPARE(round_w_d(f8, f6), "4620320c round.w.d f8, f6");
+
+ COMPARE(round_l_s(f8, f6), "46003208 round.l.s f8, f6");
+ COMPARE(round_l_d(f8, f6), "46203208 round.l.d f8, f6");
+
+ COMPARE(floor_w_s(f8, f6), "4600320f floor.w.s f8, f6");
+ COMPARE(floor_w_d(f8, f6), "4620320f floor.w.d f8, f6");
+
+ COMPARE(floor_l_s(f8, f6), "4600320b floor.l.s f8, f6");
+ COMPARE(floor_l_d(f8, f6), "4620320b floor.l.d f8, f6");
+
+ COMPARE(ceil_w_s(f8, f6), "4600320e ceil.w.s f8, f6");
+ COMPARE(ceil_w_d(f8, f6), "4620320e ceil.w.d f8, f6");
+
+ COMPARE(ceil_l_s(f8, f6), "4600320a ceil.l.s f8, f6");
+ COMPARE(ceil_l_d(f8, f6), "4620320a ceil.l.d f8, f6");
+
+ COMPARE(sub_s(f10, f8, f6), "46064281 sub.s f10, f8, f6");
+ COMPARE(sub_d(f10, f8, f6), "46264281 sub.d f10, f8, f6");
+
+ COMPARE(sqrt_s(f8, f6), "46003204 sqrt.s f8, f6");
+ COMPARE(sqrt_d(f8, f6), "46203204 sqrt.d f8, f6");
+
+ COMPARE(neg_s(f8, f6), "46003207 neg.s f8, f6");
+ COMPARE(neg_d(f8, f6), "46203207 neg.d f8, f6");
+
+ COMPARE(mul_s(f8, f6, f4), "46043202 mul.s f8, f6, f4");
+ COMPARE(mul_d(f8, f6, f4), "46243202 mul.d f8, f6, f4");
+
+ COMPARE(rsqrt_s(f8, f6), "46003216 rsqrt.s f8, f6");
+ COMPARE(rsqrt_d(f8, f6), "46203216 rsqrt.d f8, f6");
+
+ COMPARE(recip_s(f8, f6), "46003215 recip.s f8, f6");
+ COMPARE(recip_d(f8, f6), "46203215 recip.d f8, f6");
+
+ COMPARE(mov_s(f6, f4), "46002186 mov.s f6, f4");
+ COMPARE(mov_d(f6, f4), "46202186 mov.d f6, f4");
+
+ if (IsMipsArchVariant(kMips32r2)) {
+ COMPARE(trunc_l_d(f8, f6), "46203209 trunc.l.d f8, f6");
+ COMPARE(trunc_l_s(f8, f6), "46003209 trunc.l.s f8, f6");
+
+ COMPARE(movz_s(f6, f4, t0), "46082192 movz.s f6, f4, t0");
+ COMPARE(movz_d(f6, f4, t0), "46282192 movz.d f6, f4, t0");
+
+ COMPARE(movt_s(f6, f4, 4), "46112191 movt.s f6, f4, cc(1)");
+ COMPARE(movt_d(f6, f4, 4), "46312191 movt.d f6, f4, cc(1)");
+
+ COMPARE(movf_s(f6, f4, 4), "46102191 movf.s f6, f4, cc(1)");
+ COMPARE(movf_d(f6, f4, 4), "46302191 movf.d f6, f4, cc(1)");
+
+ COMPARE(movn_s(f6, f4, t0), "46082193 movn.s f6, f4, t0");
+ COMPARE(movn_d(f6, f4, t0), "46282193 movn.d f6, f4, t0");
+ }
+ VERIFY_RUN();
}
TEST(Type1) {
+ SET_UP();
if (kArchVariant == kMips64r6) {
- SET_UP();
COMPARE(seleqz(a0, a1, a2), "00a62035 seleqz a0, a1, a2");
COMPARE(selnez(a0, a1, a2), "00a62037 selnez a0, a1, a2");
COMPARE(seleqz(D, f3, f4, f5), "462520d4 seleqz.d f3, f4, f5");
COMPARE(selnez(D, f3, f4, f5), "462520d7 selnez.d f3, f4, f5");
+ COMPARE(seleqz(S, f3, f4, f5), "460520d4 seleqz.s f3, f4, f5");
+ COMPARE(selnez(S, f3, f4, f5), "460520d7 selnez.s f3, f4, f5");
COMPARE(min_d(f3, f4, f5), "462520dc min.d f3, f4, f5");
COMPARE(max_d(f3, f4, f5), "462520de max.d f3, f4, f5");
+
+ COMPARE(sel(S, f3, f4, f5), "460520d0 sel.s f3, f4, f5");
+ COMPARE(sel(D, f3, f4, f5), "462520d0 sel.d f3, f4, f5");
+
COMPARE(rint_d(f8, f6), "4620321a rint.d f8, f6");
- VERIFY_RUN();
+
+ COMPARE(min_s(f3, f4, f5), "460520dc min.s f3, f4, f5");
+ COMPARE(max_s(f3, f4, f5), "460520de max.s f3, f4, f5");
+
+ COMPARE(rint(S, f8, f6), "4600321a rint.s f8, f6");
+
+ COMPARE(mina_d(f3, f4, f5), "462520dd mina.d f3, f4, f5");
+ COMPARE(mina_s(f3, f4, f5), "460520dd mina.s f3, f4, f5");
+
+ COMPARE(maxa_d(f3, f4, f5), "462520df maxa.d f3, f4, f5");
+ COMPARE(maxa_s(f3, f4, f5), "460520df maxa.s f3, f4, f5");
}
+ COMPARE(trunc_w_d(f8, f6), "4620320d trunc.w.d f8, f6");
+ COMPARE(trunc_w_s(f8, f6), "4600320d trunc.w.s f8, f6");
+
+ COMPARE(round_w_s(f8, f6), "4600320c round.w.s f8, f6");
+ COMPARE(round_w_d(f8, f6), "4620320c round.w.d f8, f6");
+
+ COMPARE(round_l_s(f8, f6), "46003208 round.l.s f8, f6");
+ COMPARE(round_l_d(f8, f6), "46203208 round.l.d f8, f6");
+
+ COMPARE(floor_w_s(f8, f6), "4600320f floor.w.s f8, f6");
+ COMPARE(floor_w_d(f8, f6), "4620320f floor.w.d f8, f6");
+
+ COMPARE(floor_l_s(f8, f6), "4600320b floor.l.s f8, f6");
+ COMPARE(floor_l_d(f8, f6), "4620320b floor.l.d f8, f6");
+
+ COMPARE(ceil_w_s(f8, f6), "4600320e ceil.w.s f8, f6");
+ COMPARE(ceil_w_d(f8, f6), "4620320e ceil.w.d f8, f6");
+
+ COMPARE(ceil_l_s(f8, f6), "4600320a ceil.l.s f8, f6");
+ COMPARE(ceil_l_d(f8, f6), "4620320a ceil.l.d f8, f6");
+
+ COMPARE(sub_s(f10, f8, f6), "46064281 sub.s f10, f8, f6");
+ COMPARE(sub_d(f10, f8, f6), "46264281 sub.d f10, f8, f6");
+
+ COMPARE(sqrt_s(f8, f6), "46003204 sqrt.s f8, f6");
+ COMPARE(sqrt_d(f8, f6), "46203204 sqrt.d f8, f6");
+
+ COMPARE(neg_s(f8, f6), "46003207 neg.s f8, f6");
+ COMPARE(neg_d(f8, f6), "46203207 neg.d f8, f6");
+
+ COMPARE(mul_s(f8, f6, f4), "46043202 mul.s f8, f6, f4");
+ COMPARE(mul_d(f8, f6, f4), "46243202 mul.d f8, f6, f4");
+
+ COMPARE(rsqrt_s(f8, f6), "46003216 rsqrt.s f8, f6");
+ COMPARE(rsqrt_d(f8, f6), "46203216 rsqrt.d f8, f6");
+
+ COMPARE(recip_s(f8, f6), "46003215 recip.s f8, f6");
+ COMPARE(recip_d(f8, f6), "46203215 recip.d f8, f6");
+
+ COMPARE(mov_s(f6, f4), "46002186 mov.s f6, f4");
+ COMPARE(mov_d(f6, f4), "46202186 mov.d f6, f4");
+ if (kArchVariant == kMips64r2) {
+ COMPARE(trunc_l_d(f8, f6), "46203209 trunc.l.d f8, f6");
+ COMPARE(trunc_l_s(f8, f6), "46003209 trunc.l.s f8, f6");
+
+ COMPARE(movz_s(f6, f4, t0), "460c2192 movz.s f6, f4, t0");
+ COMPARE(movz_d(f6, f4, t0), "462c2192 movz.d f6, f4, t0");
+
+ COMPARE(movt_s(f6, f4, 4), "46112191 movt.s f6, f4, cc(1)");
+ COMPARE(movt_d(f6, f4, 4), "46312191 movt.d f6, f4, cc(1)");
+
+ COMPARE(movf_s(f6, f4, 4), "46102191 movf.s f6, f4, cc(1)");
+ COMPARE(movf_d(f6, f4, 4), "46302191 movf.d f6, f4, cc(1)");
+
+ COMPARE(movn_s(f6, f4, t0), "460c2193 movn.s f6, f4, t0");
+ COMPARE(movn_d(f6, f4, t0), "462c2193 movn.d f6, f4, t0");
+ }
+ VERIFY_RUN();
}