const int dst_code,
const VFPType src_type,
const int src_code,
- Assembler::ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
- enum ConversionMode {
- FPSCRRounding = 0,
- RoundToZero = 1
- };
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
// VFP FPSCR constants.
+enum VFPConversionMode {
+ kFPSCRRounding = 0,
+ kDefaultRoundToZero = 1
+};
+
static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum FPSCRRoundingModes {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
};
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
// -----------------------------------------------------------------------------
// Hints.
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new LDoubleToI(value);
+ LDoubleToI* res = new LDoubleToI(value, TempRegister());
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LDoubleToI(LOperand* value) {
+ explicit LDoubleToI(LOperand* value, LOperand* temp1) {
inputs_[0] = value;
+ temps_[0] = temp1;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register prev_fpscr = ToRegister(instr->TempAt(0));
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch = scratch0();
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2) {
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
// Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity".
+ // - Set rounding mode.
// - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset.
__ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+ __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ __ orr(scratch, scratch, Operand(rounding_mode));
__ vmsr(scratch);
// Convert the argument to an integer.
- __ vcvt_s32_f64(single_scratch,
- input,
- Assembler::FPSCRRounding,
- al);
+ __ vcvt_s32_f64(result,
+ double_input,
+ kFPSCRRounding);
- // Retrieve FPSCR and check for vfp exceptions.
+ // Retrieve FPSCR.
__ vmrs(scratch);
- // Restore FPSCR
+ // Restore FPSCR.
__ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
__ tst(scratch, Operand(kVFPExceptionMask));
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
+ input,
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("DoDoubleToI unimplemented.");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ DoubleRegister double_input = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
+ : kRoundToNearest;
+
+
+ EmitVFPTruncate(rounding_mode,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+ // Deoptimize if we had a vfp invalid exception.
+ DeoptimizeIf(ne, instr->environment());
+ // Retrieve the result.
+ __ vmov(result_reg, single_scratch);
+
+ if (instr->truncating() &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand(0));
+ __ b(ne, &done);
+ // Check for -0.
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+
+ __ bind(&done);
+ }
}
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
+ void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
(overflow_vfp_flag_ << 2) |
(div_zero_vfp_flag_ << 1) |
(inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_ << 22);
+ (FPSCR_rounding_mode_);
set_register(rt, fpscr);
}
} else if ((instr->VLValue() == 0x0) &&
div_zero_vfp_flag_ = (rt_value >> 1) & 1;
inv_op_vfp_flag_ = (rt_value >> 0) & 1;
FPSCR_rounding_mode_ =
- static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3);
+ static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
}
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}
+bool get_inv_op_vfp_flag(VFPRoundingMode mode,
+ double val,
+ bool unsigned_) {
+ ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+ double max_uint = static_cast<double>(0xffffffffu);
+ double max_int = static_cast<double>(kMaxInt);
+ double min_int = static_cast<double>(kMinInt);
+
+ // Check for NaN.
+ if (val != val) {
+ return true;
+ }
+
+ // Check for overflow. This code works because 32bit integers can be
+ // exactly represented by ieee-754 64bit floating-point values.
+ switch (mode) {
+ case RN:
+ return unsigned_ ? (val >= (max_uint + 0.5)) ||
+ (val < -0.5)
+ : (val >= (max_int + 0.5)) ||
+ (val < (min_int - 0.5));
+
+ case RM:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val < 0)
+ : (val >= (max_int + 1.0)) ||
+ (val < min_int);
+
+ case RZ:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val <= -1)
+ : (val >= (max_int + 1.0)) ||
+ (val <= (min_int - 1.0));
+ default:
+ UNREACHABLE();
+ return true;
+ }
+}
+
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+int VFPConversionSaturate(double val, bool unsigned_res) {
+ if (val != val) {
+ return 0;
+ } else {
+ if (unsigned_res) {
+ return (val < 0) ? 0 : 0xffffffffu;
+ } else {
+ return (val < 0) ? kMinInt : kMaxInt;
+ }
+ }
+}
+
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+ (instr->Bits(27, 23) == 0x1D));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1);
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- src_precision = kDoublePrecision;
- }
+ VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
+ : kSinglePrecision;
if (to_integer) {
- bool unsigned_integer = (instr->Bit(16) == 0);
- FPSCRRoundingModes mode;
- if (instr->Bit(7) != 1) {
- // Use FPSCR defined rounding mode.
- mode = FPSCR_rounding_mode_;
- // Only RZ and RM modes are supported.
- ASSERT((mode == RM) || (mode == RZ));
- } else {
- // VFP uses round towards zero by default.
- mode = RZ;
- }
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note:
+ // C++ defines default type casting from floating point to integer as
+ // (close to) rounding toward zero ("fractional part discarded").
int dst = instr->VFPDRegValue(kSinglePrecision);
int src = instr->VFPMRegValue(src_precision);
- int32_t kMaxInt = v8::internal::kMaxInt;
- int32_t kMinInt = v8::internal::kMinInt;
- switch (mode) {
- case RM:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+ // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
+ // mode or the default Round to Zero mode.
+ VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
+ : RZ;
+ ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
+ bool unsigned_integer = (instr->Bit(16) == 0);
+ bool double_precision = (src_precision == kDoublePrecision);
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
+ double val = double_precision ? get_double_from_d_register(src)
+ : get_float_from_s_register(src);
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
+ int temp = unsigned_integer ? static_cast<uint32_t>(val)
+ : static_cast<int32_t>(val);
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
- set_s_register_from_sinteger(dst, sint);
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
+ } else {
+ switch (mode) {
+ case RN: {
+ double abs_diff =
+ unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
+ : fabs(val - temp);
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ temp += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+ }
+ break;
}
- break;
- case RZ:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
-
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
+ case RM:
+ temp = temp > val ? temp - 1 : temp;
+ break;
- set_s_register_from_sinteger(dst, sint);
- }
- break;
+ case RZ:
+ // Nothing to do.
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
}
+ // Update the destination register.
+ set_s_register_from_sinteger(dst, temp);
+
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
bool v_flag_FPSCR_;
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
- FPSCRRoundingModes FPSCR_rounding_mode_;
+ VFPRoundingMode FPSCR_rounding_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits));
+ __ orr(r9, r9, Operand(kRoundToMinusInf));
__ vmsr(r9);
// Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
// Use vcvt latency to start checking for special cases.
// Get the argument exponent and clear the sign bit.
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
+ __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
} else {
- __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
+ __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
}
__ vmov(r5, s0, ne);
}
-static void TestRoundingMode(int32_t mode, double value, int expected) {
+enum VCVTTypes {
+ s32_f64,
+ u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+ VFPRoundingMode mode,
+ double value,
+ int expected,
+ bool expected_exception = false) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
- __ vmrs(r1);
- // Set custom FPSCR.
- __ bic(r2, r1, Operand(((mode ^ 3) << 22) | 0xf));
- __ orr(r2, r2, Operand(mode << 22));
- __ vmsr(r2);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
- // Load value, convert, and move back result to r0.
- __ vmov(d1, value);
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
- __ vmov(r0, s0);
+ Label wrong_exception;
+
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
+
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception,
+ expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
- __ mov(pc, Operand(lr));
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
- CHECK(code->IsCode());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
- ::printf("res = %d\n", res);
- CHECK_EQ(expected, res);
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
+ }
}
TEST(7) {
// Test vfp rounding modes.
- // See ARM DDI 0406B Page A2-29.
- enum FPSCRRoungingMode {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
- };
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- TestRoundingMode(RZ, 0.5, 0);
- TestRoundingMode(RZ, -0.5, 0);
- TestRoundingMode(RZ, 123.7, 123);
- TestRoundingMode(RZ, -123.7, -123);
- TestRoundingMode(RZ, 123456.2, 123456);
- TestRoundingMode(RZ, -123456.2, -123456);
-
- TestRoundingMode(RM, 0.5, 0);
- TestRoundingMode(RM, -0.5, -1);
- TestRoundingMode(RM, 123.7, 123);
- TestRoundingMode(RM, -123.7, -124);
- TestRoundingMode(RM, 123456.2, 123456);
- TestRoundingMode(RM, -123456.2, -123457);
- }
+ // s32_f64 (double to integer).
+
+ TestRoundingMode(s32_f64, RN, 0, 0);
+ TestRoundingMode(s32_f64, RN, 0.5, 0);
+ TestRoundingMode(s32_f64, RN, -0.5, 0);
+ TestRoundingMode(s32_f64, RN, 1.5, 2);
+ TestRoundingMode(s32_f64, RN, -1.5, -2);
+ TestRoundingMode(s32_f64, RN, 123.7, 124);
+ TestRoundingMode(s32_f64, RN, -123.7, -124);
+ TestRoundingMode(s32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
+
+ TestRoundingMode(s32_f64, RM, 0, 0);
+ TestRoundingMode(s32_f64, RM, 0.5, 0);
+ TestRoundingMode(s32_f64, RM, -0.5, -1);
+ TestRoundingMode(s32_f64, RM, 123.7, 123);
+ TestRoundingMode(s32_f64, RM, -123.7, -124);
+ TestRoundingMode(s32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+ TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
+
+ TestRoundingMode(s32_f64, RZ, 0, 0);
+ TestRoundingMode(s32_f64, RZ, 0.5, 0);
+ TestRoundingMode(s32_f64, RZ, -0.5, 0);
+ TestRoundingMode(s32_f64, RZ, 123.7, 123);
+ TestRoundingMode(s32_f64, RZ, -123.7, -123);
+ TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+ // u32_f64 (double to integer).
+
+ // Negative values.
+ TestRoundingMode(u32_f64, RN, -0.5, 0);
+ TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+ TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RZ, -0.5, 0);
+ TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+ // Positive values.
+ // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+ static const uint32_t kMaxUInt = 0xffffffffu;
+ TestRoundingMode(u32_f64, RZ, 0, 0);
+ TestRoundingMode(u32_f64, RZ, 0.5, 0);
+ TestRoundingMode(u32_f64, RZ, 123.7, 123);
+ TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RM, 0, 0);
+ TestRoundingMode(u32_f64, RM, 0.5, 0);
+ TestRoundingMode(u32_f64, RM, 123.7, 123);
+ TestRoundingMode(u32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RN, 0, 0);
+ TestRoundingMode(u32_f64, RN, 0.5, 0);
+ TestRoundingMode(u32_f64, RN, 1.5, 2);
+ TestRoundingMode(u32_f64, RN, 123.7, 124);
+ TestRoundingMode(u32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
}
#undef __