}
+template<> double Simulator::FPDefaultNaN<double>() const {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() const {
+ return kFP32DefaultNaN;
+}
+
+
void Simulator::FPCompare(double val0, double val1) {
AssertSupportedFPCR();
void Simulator::VisitFPCompare(Instruction* instr) {
AssertSupportedFPCR();
- unsigned reg_size = instr->FPType() == FP32 ? kSRegSizeInBits
- : kDRegSizeInBits;
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
double fn_val = fpreg(reg_size, instr->Rn());
switch (instr->Mask(FPCompareMask)) {
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
// If the condition passes, set the status flags to the result of
// comparing the operands.
- unsigned reg_size = instr->FPType() == FP32 ? kSRegSizeInBits
- : kDRegSizeInBits;
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
} else {
// If the condition fails, set the status flags to the nzcv immediate.
case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
case FNEG_s: set_sreg(fd, -sreg(fn)); break;
case FNEG_d: set_dreg(fd, -dreg(fn)); break;
- case FSQRT_s: set_sreg(fd, std::sqrt(sreg(fn))); break;
- case FSQRT_d: set_dreg(fd, std::sqrt(dreg(fn))); break;
+ case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
double Simulator::FPRoundInt(double value, FPRounding round_mode) {
if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
- (value == kFP64NegativeInfinity) || std::isnan(value)) {
+ (value == kFP64NegativeInfinity)) {
return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
}
double int_result = floor(value);
double Simulator::FPToDouble(float value) {
switch (std::fpclassify(value)) {
case FP_NAN: {
- // Convert NaNs as the processor would, assuming that FPCR.DN (default
- // NaN) is not set:
+ if (DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred entirely, except that the top
// bit is forced to '1', making the result a quiet NaN. The unused
switch (std::fpclassify(value)) {
case FP_NAN: {
- // Convert NaNs as the processor would, assuming that FPCR.DN (default
- // NaN) is not set:
+ if (DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
unsigned fn = instr->Rn();
unsigned fm = instr->Rm();
+ // Fmaxnm and Fminnm have special NaN handling.
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
+ default:
+ break; // Fall through.
+ }
+
+ if (FPProcessNaNs(instr)) return;
+
switch (instr->Mask(FPDataProcessing2SourceMask)) {
- case FADD_s: set_sreg(fd, sreg(fn) + sreg(fm)); break;
- case FADD_d: set_dreg(fd, dreg(fn) + dreg(fm)); break;
- case FSUB_s: set_sreg(fd, sreg(fn) - sreg(fm)); break;
- case FSUB_d: set_dreg(fd, dreg(fn) - dreg(fm)); break;
- case FMUL_s: set_sreg(fd, sreg(fn) * sreg(fm)); break;
- case FMUL_d: set_dreg(fd, dreg(fn) * dreg(fm)); break;
- case FDIV_s: set_sreg(fd, sreg(fn) / sreg(fm)); break;
- case FDIV_d: set_dreg(fd, dreg(fn) / dreg(fm)); break;
+ case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
+ case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
+ case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
+ case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
+ case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
+ case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
+ case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
+ case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
- case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); break;
- case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); break;
- case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); break;
- case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s:
+ case FMAXNM_d:
+ case FMINNM_s:
+ case FMINNM_d:
+ // These were handled before the standard FPProcessNaNs() stage.
+ UNREACHABLE();
default: UNIMPLEMENTED();
}
}
// The C99 (and C++11) fma function performs a fused multiply-accumulate.
switch (instr->Mask(FPDataProcessing3SourceMask)) {
// fd = fa +/- (fn * fm)
- case FMADD_s: set_sreg(fd, fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
- case FMSUB_s: set_sreg(fd, fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
- case FMADD_d: set_dreg(fd, fma(dreg(fn), dreg(fm), dreg(fa))); break;
- case FMSUB_d: set_dreg(fd, fma(-dreg(fn), dreg(fm), dreg(fa))); break;
- // Variants of the above where the result is negated.
- case FNMADD_s: set_sreg(fd, -fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
- case FNMSUB_s: set_sreg(fd, -fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
- case FNMADD_d: set_dreg(fd, -fma(dreg(fn), dreg(fm), dreg(fa))); break;
- case FNMSUB_d: set_dreg(fd, -fma(-dreg(fn), dreg(fm), dreg(fa))); break;
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
default: UNIMPLEMENTED();
}
}
template <typename T>
-T Simulator::FPMax(T a, T b) {
- if (IsSignallingNaN(a)) {
- return a;
- } else if (IsSignallingNaN(b)) {
- return b;
- } else if (std::isnan(a)) {
- ASSERT(IsQuietNaN(a));
- return a;
- } else if (std::isnan(b)) {
- ASSERT(IsQuietNaN(b));
- return b;
+T Simulator::FPAdd(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (isinf(op1) && isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((isinf(op1) && isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
}
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(a) && !std::isnan(b));
if ((a == 0.0) && (b == 0.0) &&
(copysign(1.0, a) != copysign(1.0, b))) {
} else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
b = kFP64NegativeInfinity;
}
- return FPMax(a, b);
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
}
template <typename T>
T Simulator::FPMin(T a, T b) {
- if (IsSignallingNaN(a)) {
- return a;
- } else if (IsSignallingNaN(b)) {
- return b;
- } else if (std::isnan(a)) {
- ASSERT(IsQuietNaN(a));
- return a;
- } else if (std::isnan(b)) {
- ASSERT(IsQuietNaN(b));
- return b;
- }
+ // NaNs should be handled elsewhere.
+ ASSERT(!isnan(a) && !isnan(b));
if ((a == 0.0) && (b == 0.0) &&
(copysign(1.0, a) != copysign(1.0, b))) {
} else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
b = kFP64PositiveInfinity;
}
- return FPMin(a, b);
+
+ T result = FPProcessNaNs(a, b);
+ return isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((isinf(op1) && (op2 == 0.0)) || (isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ return FPDefaultNaN<T>();
+ } else {
+ return std::sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (isinf(op1) && isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaN(T op) {
+ ASSERT(std::isnan(op));
+ return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
}
SETUP();
START();
- __ Fmov(s13, -0.0);
- __ Fmov(s14, kFP32PositiveInfinity);
- __ Fmov(s15, kFP32NegativeInfinity);
- __ Fmov(s16, 3.25);
- __ Fmov(s17, 1.0);
- __ Fmov(s18, 0);
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
__ Fmov(d26, -0.0);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0);
+ __ Fmov(d29, 0.0);
__ Fmov(d30, -2.0);
__ Fmov(d31, 2.25);
- __ Fadd(s0, s16, s17);
- __ Fadd(s1, s17, s18);
- __ Fadd(s2, s13, s17);
- __ Fadd(s3, s14, s17);
- __ Fadd(s4, s15, s17);
+ __ Fadd(s0, s17, s18);
+ __ Fadd(s1, s18, s19);
+ __ Fadd(s2, s14, s18);
+ __ Fadd(s3, s15, s18);
+ __ Fadd(s4, s16, s18);
+ __ Fadd(s5, s15, s16);
+ __ Fadd(s6, s16, s15);
- __ Fadd(d5, d30, d31);
- __ Fadd(d6, d29, d31);
- __ Fadd(d7, d26, d31);
- __ Fadd(d8, d27, d31);
- __ Fadd(d9, d28, d31);
+ __ Fadd(d7, d30, d31);
+ __ Fadd(d8, d29, d31);
+ __ Fadd(d9, d26, d31);
+ __ Fadd(d10, d27, d31);
+ __ Fadd(d11, d28, d31);
+ __ Fadd(d12, d27, d28);
+ __ Fadd(d13, d28, d27);
END();
RUN();
ASSERT_EQUAL_FP32(1.0, s2);
ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
- ASSERT_EQUAL_FP64(0.25, d5);
- ASSERT_EQUAL_FP64(2.25, d6);
- ASSERT_EQUAL_FP64(2.25, d7);
- ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8);
- ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.25, d7);
+ ASSERT_EQUAL_FP64(2.25, d8);
+ ASSERT_EQUAL_FP64(2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
TEARDOWN();
}
SETUP();
START();
- __ Fmov(s13, -0.0);
- __ Fmov(s14, kFP32PositiveInfinity);
- __ Fmov(s15, kFP32NegativeInfinity);
- __ Fmov(s16, 3.25);
- __ Fmov(s17, 1.0);
- __ Fmov(s18, 0);
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
__ Fmov(d26, -0.0);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0);
+ __ Fmov(d29, 0.0);
__ Fmov(d30, -2.0);
__ Fmov(d31, 2.25);
- __ Fsub(s0, s16, s17);
- __ Fsub(s1, s17, s18);
- __ Fsub(s2, s13, s17);
- __ Fsub(s3, s17, s14);
- __ Fsub(s4, s17, s15);
+ __ Fsub(s0, s17, s18);
+ __ Fsub(s1, s18, s19);
+ __ Fsub(s2, s14, s18);
+ __ Fsub(s3, s18, s15);
+ __ Fsub(s4, s18, s16);
+ __ Fsub(s5, s15, s15);
+ __ Fsub(s6, s16, s16);
- __ Fsub(d5, d30, d31);
- __ Fsub(d6, d29, d31);
- __ Fsub(d7, d26, d31);
- __ Fsub(d8, d31, d27);
- __ Fsub(d9, d31, d28);
+ __ Fsub(d7, d30, d31);
+ __ Fsub(d8, d29, d31);
+ __ Fsub(d9, d26, d31);
+ __ Fsub(d10, d31, d27);
+ __ Fsub(d11, d31, d28);
+ __ Fsub(d12, d27, d27);
+ __ Fsub(d13, d28, d28);
END();
RUN();
ASSERT_EQUAL_FP32(-1.0, s2);
ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
- ASSERT_EQUAL_FP64(-4.25, d5);
- ASSERT_EQUAL_FP64(-2.25, d6);
- ASSERT_EQUAL_FP64(-2.25, d7);
- ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
- ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.25, d7);
+ ASSERT_EQUAL_FP64(-2.25, d8);
+ ASSERT_EQUAL_FP64(-2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
TEARDOWN();
}
SETUP();
START();
- __ Fmov(s13, -0.0);
- __ Fmov(s14, kFP32PositiveInfinity);
- __ Fmov(s15, kFP32NegativeInfinity);
- __ Fmov(s16, 3.25);
- __ Fmov(s17, 2.0);
- __ Fmov(s18, 0);
- __ Fmov(s19, -2.0);
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 0.0f);
+ __ Fmov(s20, -2.0f);
__ Fmov(d26, -0.0);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0);
+ __ Fmov(d29, 0.0);
__ Fmov(d30, -2.0);
__ Fmov(d31, 2.25);
- __ Fmul(s0, s16, s17);
- __ Fmul(s1, s17, s18);
- __ Fmul(s2, s13, s13);
- __ Fmul(s3, s14, s19);
- __ Fmul(s4, s15, s19);
+ __ Fmul(s0, s17, s18);
+ __ Fmul(s1, s18, s19);
+ __ Fmul(s2, s14, s14);
+ __ Fmul(s3, s15, s20);
+ __ Fmul(s4, s16, s20);
+ __ Fmul(s5, s15, s19);
+ __ Fmul(s6, s19, s16);
- __ Fmul(d5, d30, d31);
- __ Fmul(d6, d29, d31);
- __ Fmul(d7, d26, d26);
- __ Fmul(d8, d27, d30);
- __ Fmul(d9, d28, d30);
+ __ Fmul(d7, d30, d31);
+ __ Fmul(d8, d29, d31);
+ __ Fmul(d9, d26, d26);
+ __ Fmul(d10, d27, d30);
+ __ Fmul(d11, d28, d30);
+ __ Fmul(d12, d27, d29);
+ __ Fmul(d13, d29, d28);
END();
RUN();
ASSERT_EQUAL_FP32(0.0, s2);
ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
- ASSERT_EQUAL_FP64(-4.5, d5);
- ASSERT_EQUAL_FP64(0.0, d6);
- ASSERT_EQUAL_FP64(0.0, d7);
- ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
- ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.5, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
TEARDOWN();
}
-static void FmaddFmsubDoubleHelper(double n, double m, double a,
- double fmadd, double fmsub) {
+static void FmaddFmsubHelper(double n, double m, double a,
+ double fmadd, double fmsub,
+ double fnmadd, double fnmsub) {
SETUP();
START();
ASSERT_EQUAL_FP64(fmadd, d28);
ASSERT_EQUAL_FP64(fmsub, d29);
- ASSERT_EQUAL_FP64(-fmadd, d30);
- ASSERT_EQUAL_FP64(-fmsub, d31);
+ ASSERT_EQUAL_FP64(fnmadd, d30);
+ ASSERT_EQUAL_FP64(fnmsub, d31);
TEARDOWN();
}
TEST(fmadd_fmsub_double) {
INIT_V8();
- double inputs[] = {
- // Normal numbers, including -0.0.
- DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
- -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
- // Infinities.
- kFP64NegativeInfinity, kFP64PositiveInfinity,
- // Subnormal numbers.
- rawbits_to_double(0x000fffffffffffff),
- rawbits_to_double(0x0000000000000001),
- rawbits_to_double(0x000123456789abcd),
- -rawbits_to_double(0x000fffffffffffff),
- -rawbits_to_double(0x0000000000000001),
- -rawbits_to_double(0x000123456789abcd),
- // NaN.
- kFP64QuietNaN,
- -kFP64QuietNaN,
- };
- const int count = sizeof(inputs) / sizeof(inputs[0]);
-
- for (int in = 0; in < count; in++) {
- double n = inputs[in];
- for (int im = 0; im < count; im++) {
- double m = inputs[im];
- for (int ia = 0; ia < count; ia++) {
- double a = inputs[ia];
- double fmadd = fma(n, m, a);
- double fmsub = fma(-n, m, a);
-
- FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
- }
- }
- }
-}
-
-
-TEST(fmadd_fmsub_double_rounding) {
- INIT_V8();
- // Make sure we run plenty of tests where an intermediate rounding stage would
- // produce an incorrect result.
- const int limit = 1000;
- int count_fmadd = 0;
- int count_fmsub = 0;
-
- uint16_t seed[3] = {42, 43, 44};
- seed48(seed);
- while ((count_fmadd < limit) || (count_fmsub < limit)) {
- double n, m, a;
- uint32_t r[2];
- ASSERT(sizeof(r) == sizeof(n));
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
- r[0] = mrand48();
- r[1] = mrand48();
- memcpy(&n, r, sizeof(r));
- r[0] = mrand48();
- r[1] = mrand48();
- memcpy(&m, r, sizeof(r));
- r[0] = mrand48();
- r[1] = mrand48();
- memcpy(&a, r, sizeof(r));
+ // Basic operation.
+ FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
+ FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
- if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
- continue;
- }
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+ FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
- // Calculate the expected results.
- double fmadd = fma(n, m, a);
- double fmsub = fma(-n, m, a);
-
- bool test_fmadd = (fmadd != (a + n * m));
- bool test_fmsub = (fmsub != (a - n * m));
-
- // If rounding would produce a different result, increment the test count.
- count_fmadd += test_fmadd;
- count_fmsub += test_fmsub;
-
- if (test_fmadd || test_fmsub) {
- FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
- }
- }
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
}
-static void FmaddFmsubFloatHelper(float n, float m, float a,
- float fmadd, float fmsub) {
+static void FmaddFmsubHelper(float n, float m, float a,
+ float fmadd, float fmsub,
+ float fnmadd, float fnmsub) {
SETUP();
START();
__ Fmov(s0, n);
__ Fmov(s1, m);
__ Fmov(s2, a);
- __ Fmadd(s30, s0, s1, s2);
- __ Fmsub(s31, s0, s1, s2);
+ __ Fmadd(s28, s0, s1, s2);
+ __ Fmsub(s29, s0, s1, s2);
+ __ Fnmadd(s30, s0, s1, s2);
+ __ Fnmsub(s31, s0, s1, s2);
END();
RUN();
- ASSERT_EQUAL_FP32(fmadd, s30);
- ASSERT_EQUAL_FP32(fmsub, s31);
+ ASSERT_EQUAL_FP32(fmadd, s28);
+ ASSERT_EQUAL_FP32(fmsub, s29);
+ ASSERT_EQUAL_FP32(fnmadd, s30);
+ ASSERT_EQUAL_FP32(fnmsub, s31);
TEARDOWN();
}
TEST(fmadd_fmsub_float) {
INIT_V8();
- float inputs[] = {
- // Normal numbers, including -0.0f.
- FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f,
- -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
- // Infinities.
- kFP32NegativeInfinity, kFP32PositiveInfinity,
- // Subnormal numbers.
- rawbits_to_float(0x07ffffff),
- rawbits_to_float(0x00000001),
- rawbits_to_float(0x01234567),
- -rawbits_to_float(0x07ffffff),
- -rawbits_to_float(0x00000001),
- -rawbits_to_float(0x01234567),
- // NaN.
- kFP32QuietNaN,
- -kFP32QuietNaN,
- };
- const int count = sizeof(inputs) / sizeof(inputs[0]);
-
- for (int in = 0; in < count; in++) {
- float n = inputs[in];
- for (int im = 0; im < count; im++) {
- float m = inputs[im];
- for (int ia = 0; ia < count; ia++) {
- float a = inputs[ia];
- float fmadd = fmaf(n, m, a);
- float fmsub = fmaf(-n, m, a);
-
- FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
- }
- }
- }
-}
-
-
-TEST(fmadd_fmsub_float_rounding) {
- INIT_V8();
- // Make sure we run plenty of tests where an intermediate rounding stage would
- // produce an incorrect result.
- const int limit = 1000;
- int count_fmadd = 0;
- int count_fmsub = 0;
-
- uint16_t seed[3] = {42, 43, 44};
- seed48(seed);
-
- while ((count_fmadd < limit) || (count_fmsub < limit)) {
- float n, m, a;
- uint32_t r;
- ASSERT(sizeof(r) == sizeof(n));
-
- r = mrand48();
- memcpy(&n, &r, sizeof(r));
- r = mrand48();
- memcpy(&m, &r, sizeof(r));
- r = mrand48();
- memcpy(&a, &r, sizeof(r));
-
- if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
- continue;
- }
-
- // Calculate the expected results.
- float fmadd = fmaf(n, m, a);
- float fmsub = fmaf(-n, m, a);
-
- bool test_fmadd = (fmadd != (a + n * m));
- bool test_fmsub = (fmsub != (a - n * m));
-
- // If rounding would produce a different result, increment the test count.
- count_fmadd += test_fmadd;
- count_fmsub += test_fmsub;
-
- if (test_fmadd || test_fmsub) {
- FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
- }
- }
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
+
+ // Basic operation.
+ FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
+ FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
+
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
+}
+
+
+TEST(fmadd_fmsub_double_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double s1 = rawbits_to_double(0x7ff5555511111111);
+ double s2 = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double q1 = rawbits_to_double(0x7ffaaaaa11111111);
+ double q2 = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ double s1_proc = rawbits_to_double(0x7ffd555511111111);
+ double s2_proc = rawbits_to_double(0x7ffd555522222222);
+ double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
+ double q1_proc = q1;
+ double q2_proc = q2;
+ double qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+}
+
+
+TEST(fmadd_fmsub_float_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float s1 = rawbits_to_float(0x7f951111);
+ float s2 = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float q1 = rawbits_to_float(0x7fea1111);
+ float q2 = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ float s1_proc = rawbits_to_float(0x7fd51111);
+ float s2_proc = rawbits_to_float(0x7fd52222);
+ float sa_proc = rawbits_to_float(0x7fd5aaaa);
+ float q1_proc = q1;
+ float q2_proc = q2;
+ float qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
}
SETUP();
START();
- __ Fmov(s13, -0.0);
- __ Fmov(s14, kFP32PositiveInfinity);
- __ Fmov(s15, kFP32NegativeInfinity);
- __ Fmov(s16, 3.25);
- __ Fmov(s17, 2.0);
- __ Fmov(s18, 2.0);
- __ Fmov(s19, -2.0);
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 2.0f);
+ __ Fmov(s20, -2.0f);
__ Fmov(d26, -0.0);
__ Fmov(d27, kFP64PositiveInfinity);
__ Fmov(d28, kFP64NegativeInfinity);
- __ Fmov(d29, 0);
+ __ Fmov(d29, 0.0);
__ Fmov(d30, -2.0);
__ Fmov(d31, 2.25);
- __ Fdiv(s0, s16, s17);
- __ Fdiv(s1, s17, s18);
- __ Fdiv(s2, s13, s17);
- __ Fdiv(s3, s17, s14);
- __ Fdiv(s4, s17, s15);
- __ Fdiv(d5, d31, d30);
- __ Fdiv(d6, d29, d31);
- __ Fdiv(d7, d26, d31);
- __ Fdiv(d8, d31, d27);
- __ Fdiv(d9, d31, d28);
- END();
-
- RUN();
-
- ASSERT_EQUAL_FP32(1.625, s0);
- ASSERT_EQUAL_FP32(1.0, s1);
- ASSERT_EQUAL_FP32(-0.0, s2);
- ASSERT_EQUAL_FP32(0.0, s3);
- ASSERT_EQUAL_FP32(-0.0, s4);
- ASSERT_EQUAL_FP64(-1.125, d5);
- ASSERT_EQUAL_FP64(0.0, d6);
- ASSERT_EQUAL_FP64(-0.0, d7);
+ __ Fdiv(s0, s17, s18);
+ __ Fdiv(s1, s18, s19);
+ __ Fdiv(s2, s14, s18);
+ __ Fdiv(s3, s18, s15);
+ __ Fdiv(s4, s18, s16);
+ __ Fdiv(s5, s15, s16);
+ __ Fdiv(s6, s14, s14);
+
+ __ Fdiv(d7, d31, d30);
+ __ Fdiv(d8, d29, d31);
+ __ Fdiv(d9, d26, d31);
+ __ Fdiv(d10, d31, d27);
+ __ Fdiv(d11, d31, d28);
+ __ Fdiv(d12, d28, d27);
+ __ Fdiv(d13, d29, d29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.625f, s0);
+ ASSERT_EQUAL_FP32(1.0f, s1);
+ ASSERT_EQUAL_FP32(-0.0f, s2);
+ ASSERT_EQUAL_FP32(0.0f, s3);
+ ASSERT_EQUAL_FP32(-0.0f, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-1.125, d7);
ASSERT_EQUAL_FP64(0.0, d8);
ASSERT_EQUAL_FP64(-0.0, d9);
+ ASSERT_EQUAL_FP64(0.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
TEARDOWN();
}
float m,
bool min,
float quiet_nan_substitute = 0.0) {
- const uint64_t kFP32QuietNaNMask = 0x00400000UL;
uint32_t raw_n = float_to_rawbits(n);
uint32_t raw_m = float_to_rawbits(m);
- if (std::isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
+ if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
// n is signalling NaN.
- return n;
- } else if (std::isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
+ return rawbits_to_float(raw_n | kSQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
// m is signalling NaN.
- return m;
+ return rawbits_to_float(raw_m | kSQuietNanMask);
} else if (quiet_nan_substitute == 0.0) {
if (std::isnan(n)) {
// n is quiet NaN.
double m,
bool min,
double quiet_nan_substitute = 0.0) {
- const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
uint64_t raw_n = double_to_rawbits(n);
uint64_t raw_m = double_to_rawbits(m);
- if (std::isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
+ if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
// n is signalling NaN.
- return n;
- } else if (std::isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
+ return rawbits_to_double(raw_n | kDQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
// m is signalling NaN.
- return m;
+ return rawbits_to_double(raw_m | kDQuietNanMask);
} else if (quiet_nan_substitute == 0.0) {
if (std::isnan(n)) {
// n is quiet NaN.
TEST(fmax_fmin_d) {
INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ double snan = rawbits_to_double(0x7ff5555512345678);
+ double qnan = rawbits_to_double(0x7ffaaaaa87654321);
+
+ double snan_processed = rawbits_to_double(0x7ffd555512345678);
+ double qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
// Bootstrap tests.
FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
kFP64NegativeInfinity, kFP64PositiveInfinity,
kFP64NegativeInfinity, kFP64PositiveInfinity);
- FminFmaxDoubleHelper(kFP64SignallingNaN, 0,
- kFP64SignallingNaN, kFP64SignallingNaN,
- kFP64SignallingNaN, kFP64SignallingNaN);
- FminFmaxDoubleHelper(kFP64QuietNaN, 0,
- kFP64QuietNaN, kFP64QuietNaN,
+ FminFmaxDoubleHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(qnan, 0,
+ qnan_processed, qnan_processed,
0, 0);
- FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN,
- kFP64SignallingNaN, kFP64SignallingNaN,
- kFP64SignallingNaN, kFP64SignallingNaN);
+ FminFmaxDoubleHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxDoubleHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
// Iterate over all combinations of inputs.
double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
TEST(fmax_fmin_s) {
INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ float snan = rawbits_to_float(0x7f951234);
+ float qnan = rawbits_to_float(0x7fea8765);
+
+ float snan_processed = rawbits_to_float(0x7fd51234);
+ float qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
// Bootstrap tests.
FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
kFP32NegativeInfinity, kFP32PositiveInfinity,
kFP32NegativeInfinity, kFP32PositiveInfinity);
- FminFmaxFloatHelper(kFP32SignallingNaN, 0,
- kFP32SignallingNaN, kFP32SignallingNaN,
- kFP32SignallingNaN, kFP32SignallingNaN);
- FminFmaxFloatHelper(kFP32QuietNaN, 0,
- kFP32QuietNaN, kFP32QuietNaN,
+ FminFmaxFloatHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(qnan, 0,
+ qnan_processed, qnan_processed,
0, 0);
- FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN,
- kFP32SignallingNaN, kFP32SignallingNaN,
- kFP32SignallingNaN, kFP32SignallingNaN);
+ FminFmaxFloatHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxFloatHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
// Iterate over all combinations of inputs.
float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
__ Fmov(s19, 65536.0);
__ Fmov(s20, -0.0);
__ Fmov(s21, kFP32PositiveInfinity);
- __ Fmov(d22, 0.0);
- __ Fmov(d23, 1.0);
- __ Fmov(d24, 0.25);
- __ Fmov(d25, 4294967296.0);
- __ Fmov(d26, -0.0);
- __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(s22, -1.0);
+ __ Fmov(d23, 0.0);
+ __ Fmov(d24, 1.0);
+ __ Fmov(d25, 0.25);
+ __ Fmov(d26, 4294967296.0);
+ __ Fmov(d27, -0.0);
+ __ Fmov(d28, kFP64PositiveInfinity);
+ __ Fmov(d29, -1.0);
__ Fsqrt(s0, s16);
__ Fsqrt(s1, s17);
__ Fsqrt(s3, s19);
__ Fsqrt(s4, s20);
__ Fsqrt(s5, s21);
- __ Fsqrt(d6, d22);
+ __ Fsqrt(s6, s22);
__ Fsqrt(d7, d23);
__ Fsqrt(d8, d24);
__ Fsqrt(d9, d25);
__ Fsqrt(d10, d26);
__ Fsqrt(d11, d27);
+ __ Fsqrt(d12, d28);
+ __ Fsqrt(d13, d29);
END();
RUN();
ASSERT_EQUAL_FP32(256.0, s3);
ASSERT_EQUAL_FP32(-0.0, s4);
ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
- ASSERT_EQUAL_FP64(0.0, d6);
- ASSERT_EQUAL_FP64(1.0, d7);
- ASSERT_EQUAL_FP64(0.5, d8);
- ASSERT_EQUAL_FP64(65536.0, d9);
- ASSERT_EQUAL_FP64(-0.0, d10);
- ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.0, d7);
+ ASSERT_EQUAL_FP64(1.0, d8);
+ ASSERT_EQUAL_FP64(0.5, d9);
+ ASSERT_EQUAL_FP64(65536.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
TEARDOWN();
}
}
+TEST(process_nan_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(d0, sn);
+ __ Fmov(d10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(d1, d0);
+ __ Fabs(d2, d0);
+ __ Fneg(d3, d0);
+ // - Quiet NaN
+ __ Fmov(d11, d10);
+ __ Fabs(d12, d10);
+ __ Fneg(d13, d10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(d4, d0);
+ __ Frinta(d5, d0);
+ __ Frintn(d6, d0);
+ __ Frintz(d7, d0);
+ // - Quiet NaN
+ __ Fsqrt(d14, d10);
+ __ Frinta(d15, d10);
+ __ Frintn(d16, d10);
+ __ Frintz(d17, d10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint64_t qn_raw = double_to_rawbits(qn);
+ uint64_t sn_raw = double_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn, d1);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn, d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn_proc, d4);
+ ASSERT_EQUAL_FP64(sn_proc, d5);
+ ASSERT_EQUAL_FP64(sn_proc, d6);
+ ASSERT_EQUAL_FP64(sn_proc, d7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn_proc, d14);
+ ASSERT_EQUAL_FP64(qn_proc, d15);
+ ASSERT_EQUAL_FP64(qn_proc, d16);
+ ASSERT_EQUAL_FP64(qn_proc, d17);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nan_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float qn = rawbits_to_float(0x7fea1111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(s0, sn);
+ __ Fmov(s10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(s1, s0);
+ __ Fabs(s2, s0);
+ __ Fneg(s3, s0);
+ // - Quiet NaN
+ __ Fmov(s11, s10);
+ __ Fabs(s12, s10);
+ __ Fneg(s13, s10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(s4, s0);
+ __ Frinta(s5, s0);
+ __ Frintn(s6, s0);
+ __ Frintz(s7, s0);
+ // - Quiet NaN
+ __ Fsqrt(s14, s10);
+ __ Frinta(s15, s10);
+ __ Frintn(s16, s10);
+ __ Frintz(s17, s10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint32_t qn_raw = float_to_rawbits(qn);
+ uint32_t sn_raw = float_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn, s1);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn, s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn_proc, s4);
+ ASSERT_EQUAL_FP32(sn_proc, s5);
+ ASSERT_EQUAL_FP32(sn_proc, s6);
+ ASSERT_EQUAL_FP32(sn_proc, s7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn_proc, s14);
+ ASSERT_EQUAL_FP32(qn_proc, s15);
+ ASSERT_EQUAL_FP32(qn_proc, s16);
+ ASSERT_EQUAL_FP32(qn_proc, s17);
+
+ TEARDOWN();
+}
+
+
+static void ProcessNaNsHelper(double n, double m, double expected) {
+ ASSERT(isnan(n) || isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+
+ __ Fadd(d2, d0, d1);
+ __ Fsub(d3, d0, d1);
+ __ Fmul(d4, d0, d1);
+ __ Fdiv(d5, d0, d1);
+ __ Fmax(d6, d0, d1);
+ __ Fmin(d7, d0, d1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(expected, d2);
+ ASSERT_EQUAL_FP64(expected, d3);
+ ASSERT_EQUAL_FP64(expected, d4);
+ ASSERT_EQUAL_FP64(expected, d5);
+ ASSERT_EQUAL_FP64(expected, d6);
+ ASSERT_EQUAL_FP64(expected, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double sm_proc = rawbits_to_double(0x7ffd555522222222);
+ double qn_proc = qn;
+ double qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void ProcessNaNsHelper(float n, float m, float expected) {
+ ASSERT(isnan(n) || isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+
+ __ Fadd(s2, s0, s1);
+ __ Fsub(s3, s0, s1);
+ __ Fmul(s4, s0, s1);
+ __ Fdiv(s5, s0, s1);
+ __ Fmax(s6, s0, s1);
+ __ Fmin(s7, s0, s1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(expected, s2);
+ ASSERT_EQUAL_FP32(expected, s3);
+ ASSERT_EQUAL_FP32(expected, s4);
+ ASSERT_EQUAL_FP32(expected, s5);
+ ASSERT_EQUAL_FP32(expected, s6);
+ ASSERT_EQUAL_FP32(expected, s7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float sm_proc = rawbits_to_float(0x7fd52222);
+ float qn_proc = qn;
+ float qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void DefaultNaNHelper(float n, float m, float a) {
+ ASSERT(isnan(n) || isnan(m) || isnan(a));
+
+ bool test_1op = isnan(n);
+ bool test_2op = isnan(n) || isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(s10, s0);
+ __ Fabs(s11, s0);
+ __ Fneg(s12, s0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(s13, s0);
+ __ Frinta(s14, s0);
+ __ Frintn(s15, s0);
+ __ Frintz(s16, s0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(d17, s0);
+ }
+
+ if (test_2op) {
+ __ Fadd(s18, s0, s1);
+ __ Fsub(s19, s0, s1);
+ __ Fmul(s20, s0, s1);
+ __ Fdiv(s21, s0, s1);
+ __ Fmax(s22, s0, s1);
+ __ Fmin(s23, s0, s1);
+ }
+
+ __ Fmadd(s24, s0, s1, s2);
+ __ Fmsub(s25, s0, s1, s2);
+ __ Fnmadd(s26, s0, s1, s2);
+ __ Fnmsub(s27, s0, s1, s2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint32_t n_raw = float_to_rawbits(n);
+ ASSERT_EQUAL_FP32(n, s10);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
+ }
+
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_float) {
+ INIT_V8();
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, sm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, 0.0f);
+ DefaultNaNHelper(0.0f, sm, sa);
+ DefaultNaNHelper(sn, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, qm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, 0.0f);
+ DefaultNaNHelper(0.0f, qm, qa);
+ DefaultNaNHelper(qn, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
+static void DefaultNaNHelper(double n, double m, double a) {
+ ASSERT(isnan(n) || isnan(m) || isnan(a));
+
+ bool test_1op = isnan(n);
+ bool test_2op = isnan(n) || isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(d10, d0);
+ __ Fabs(d11, d0);
+ __ Fneg(d12, d0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(d13, d0);
+ __ Frinta(d14, d0);
+ __ Frintn(d15, d0);
+ __ Frintz(d16, d0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(s17, d0);
+ }
+
+ if (test_2op) {
+ __ Fadd(d18, d0, d1);
+ __ Fsub(d19, d0, d1);
+ __ Fmul(d20, d0, d1);
+ __ Fdiv(d21, d0, d1);
+ __ Fmax(d22, d0, d1);
+ __ Fmin(d23, d0, d1);
+ }
+
+ __ Fmadd(d24, d0, d1, d2);
+ __ Fmsub(d25, d0, d1, d2);
+ __ Fnmadd(d26, d0, d1, d2);
+ __ Fnmsub(d27, d0, d1, d2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint64_t n_raw = double_to_rawbits(n);
+ ASSERT_EQUAL_FP64(n, d10);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
+ }
+
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_double) {
+ INIT_V8();
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, sm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, sa);
+ DefaultNaNHelper(sn, sm, 0.0);
+ DefaultNaNHelper(0.0, sm, sa);
+ DefaultNaNHelper(sn, 0.0, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, qm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, qa);
+ DefaultNaNHelper(qn, qm, 0.0);
+ DefaultNaNHelper(0.0, qm, qa);
+ DefaultNaNHelper(qn, 0.0, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
TEST(call_no_relocation) {
Address call_start;
Address return_address;