MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
- double_offset + kPointerSize));
+ double_offset + kDoubleSize / 2));
Register scratch1;
{
};
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus, Label::kNear);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done, Label::kNear);
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done, Label::kNear);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
+ Label check_negative, process_64_bits, done;
- __ bind(&done);
+ int double_offset = offset();
+
+ // Account for return address and saved regs if input is rsp.
+ if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
+
+ Register scratch1;
+ Register scratch_candidates[3] = { rbx, rdx, rdi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ }
+
+ // Since we must use rcx for shifts below, use some other register (rax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead
+ // for the result.
+ Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(rsp);
+ __ movl(scratch1, mantissa_operand);
+ __ movsd(xmm0, mantissa_operand);
+ __ movl(rcx, exponent_operand);
+ if (stash_exponent_copy) __ push(rcx);
+
+ __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+ __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
+ __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
+ __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
+
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ __ subl(rcx, Immediate(delta));
+ __ xorl(result_reg, result_reg);
+ __ cmpl(rcx, Immediate(31));
+ __ j(above, &done);
+ __ shll_cl(scratch1);
+ __ jmp(&check_negative);
+
+ __ bind(&process_64_bits);
+ __ cvttsd2siq(result_reg, xmm0);
+ __ jmp(&done, Label::kNear);
+
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ movl(result_reg, scratch1);
+ __ negl(result_reg);
+ if (stash_exponent_copy) {
+ __ cmpl(MemOperand(rsp, 0), Immediate(0));
+ } else {
+ __ cmpl(exponent_operand, Immediate(0));
+ }
+ __ cmovl(greater, result_reg, scratch1);
+
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ addq(rsp, Immediate(kDoubleSize));
+ }
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(rcx));
+ __ movl(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
__ JumpIfSmi(rax, &rax_is_smi);
__ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
__ jmp(&done);
__ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ JumpIfNotSmi(rax, &rax_is_object);
+
__ bind(&rax_is_smi);
__ SmiToInteger32(rcx, rax);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
+ DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
+ DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
__ bind(&done);
__ movl(rax, r8);
}
Label first_non_object;
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
- // Return non-zero (eax (not rax) is not zero)
+ // Return non-zero (rax (not rax) is not zero)
Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
masm, &check_for_strings, rdx, kScratchRegister);
// We've already checked for object identity, so if both operands are
- // internalized strings they aren't equal. Register eax (not rax) already
+ // internalized strings they aren't equal. Register rax (not rax) already
// holds a non-zero value, which indicates not equal, so just return.
__ ret(0);
}
['v8_target_arch=="ia32"', {
'sources': [
'test-assembler-ia32.cc',
+ 'test-code-stubs.cc',
'test-code-stubs-ia32.cc',
'test-disasm-ia32.cc',
'test-log-stack-tracer.cc'
['v8_target_arch=="x64"', {
'sources': [
'test-assembler-x64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'
],
#include "cctest.h"
#include "code-stubs.h"
+#include "test-code-stubs.h"
#include "factory.h"
#include "macro-assembler.h"
#include "platform.h"
-#if __GNUC__
-#define STDCALL __attribute__((stdcall))
-#else
-#define STDCALL __stdcall
-#endif
-
using namespace v8::internal;
-
-typedef int32_t STDCALL ConvertDToIFuncType(double input);
-typedef ConvertDToIFuncType* ConvertDToIFunc;
-
-
-int STDCALL ConvertDToICVersion(double d) {
- Address double_ptr = reinterpret_cast<Address>(&d);
- uint32_t exponent_bits = Memory::uint32_at(double_ptr + kDoubleSize / 2);
- int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
- int32_t exponent = (((exponent_bits & shifted_mask) >>
- (Double::kPhysicalSignificandSize - 32)) -
- HeapNumber::kExponentBias);
- uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
- int result = 0;
- uint32_t max_exponent =
- static_cast<uint32_t>(Double::kPhysicalSignificandSize);
- if (unsigned_exponent >= max_exponent) {
- if ((exponent - Double::kPhysicalSignificandSize) < 32) {
- result = Memory::uint32_at(double_ptr) <<
- (exponent - Double::kPhysicalSignificandSize);
- }
- } else {
- uint64_t big_result =
- (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
- big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
- result = static_cast<uint32_t>(big_result);
- }
- if (static_cast<int32_t>(exponent_bits) < 0) {
- return (0 - result);
- } else {
- return result;
- }
-}
-
-
-void RunOneTruncationTestWithTest(ConvertDToIFunc func,
- double from,
- double raw) {
- uint64_t to = static_cast<int64_t>(raw);
- int result = (*func)(from);
- CHECK_EQ(static_cast<int>(to), result);
-}
-
-
-// #define NaN and Infinity so that it's possible to cut-and-paste these tests
-// directly to a .js file and run them.
-#define NaN (OS::nan_value())
-#define Infinity (std::numeric_limits<double>::infinity())
-#define RunOneTruncationTest(p1, p2) RunOneTruncationTestWithTest(func, p1, p2)
-
-void RunAllTruncationTests(ConvertDToIFunc func) {
- RunOneTruncationTest(0, 0);
- RunOneTruncationTest(0.5, 0);
- RunOneTruncationTest(-0.5, 0);
- RunOneTruncationTest(1.5, 1);
- RunOneTruncationTest(-1.5, -1);
- RunOneTruncationTest(5.5, 5);
- RunOneTruncationTest(-5.0, -5);
- RunOneTruncationTest(NaN, 0);
- RunOneTruncationTest(Infinity, 0);
- RunOneTruncationTest(-NaN, 0);
- RunOneTruncationTest(-Infinity, 0);
-
- RunOneTruncationTest(4.5036e+15, 0x1635E000);
- RunOneTruncationTest(-4.5036e+15, -372629504);
-
- RunOneTruncationTest(4503603922337791.0, -1);
- RunOneTruncationTest(-4503603922337791.0, 1);
- RunOneTruncationTest(4503601774854143.0, 2147483647);
- RunOneTruncationTest(-4503601774854143.0, -2147483647);
- RunOneTruncationTest(9007207844675582.0, -2);
- RunOneTruncationTest(-9007207844675582.0, 2);
- RunOneTruncationTest(2.4178527921507624e+24, -536870912);
- RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
- RunOneTruncationTest(2.417853945072267e+24, -536870912);
- RunOneTruncationTest(-2.417853945072267e+24, 536870912);
-
- RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
- RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
- RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
- RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
-
- RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
- RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
- RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
- RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
-}
-
-#undef NaN
-#undef Infinity
-#undef RunOneTruncationTest
-
#define __ assm.
ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+
+#define __ assm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ assm.set_allow_stub_calls(false);
+ int offset =
+ source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
+ DoubleToIStub stub(source_reg, destination_reg, offset, true);
+ byte* start = stub.GetCode(isolate)->instruction_start();
+
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rdx);
+ __ push(rsi);
+ __ push(rdi);
+
+ if (!source_reg.is(rsp)) {
+ __ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset));
+ }
+
+ int param_offset = 7 * kPointerSize;
+ // Save registers make sure they don't get clobbered.
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ __ push(reg);
+ param_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), xmm0);
+
+ // Call through to the actual stub
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+
+ __ addq(rsp, Immediate(kDoubleSize));
+
+ // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
+ __ cmpq(reg, MemOperand(rsp, 0));
+ __ Assert(equal, "register was clobbered");
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ }
+
+ __ movq(rax, destination_reg);
+
+ __ pop(rdi);
+ __ pop(rsi);
+ __ pop(rdx);
+ __ pop(rcx);
+ __ pop(rbx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ return reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {rsp, rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
+ Register dest_registers[] = {rax, rbx, rcx, rdx, rsi, rdi, r8, r9};
+
+ for (size_t s = 0; s < sizeof(*source_registers); s++) {
+ for (size_t d = 0; d < sizeof(*dest_registers); d++) {
+ RunAllTruncationTests(
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d]));
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <limits>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+using namespace v8::internal;
+
+
+int STDCALL ConvertDToICVersion(double d) {
+ Address double_ptr = reinterpret_cast<Address>(&d);
+ uint32_t exponent_bits = Memory::uint32_at(double_ptr + kDoubleSize / 2);
+ int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
+ int32_t exponent = (((exponent_bits & shifted_mask) >>
+ (Double::kPhysicalSignificandSize - 32)) -
+ HeapNumber::kExponentBias);
+ uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
+ int result = 0;
+ uint32_t max_exponent =
+ static_cast<uint32_t>(Double::kPhysicalSignificandSize);
+ if (unsigned_exponent >= max_exponent) {
+ if ((exponent - Double::kPhysicalSignificandSize) < 32) {
+ result = Memory::uint32_at(double_ptr) <<
+ (exponent - Double::kPhysicalSignificandSize);
+ }
+ } else {
+ uint64_t big_result =
+ (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
+ big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
+ result = static_cast<uint32_t>(big_result);
+ }
+ if (static_cast<int32_t>(exponent_bits) < 0) {
+ return (0 - result);
+ } else {
+ return result;
+ }
+}
+
+
+void RunOneTruncationTestWithTest(ConvertDToIFunc func,
+ double from,
+ double raw) {
+ uint64_t to = static_cast<int64_t>(raw);
+ int result = (*func)(from);
+ CHECK_EQ(static_cast<int>(to), result);
+}
+
+
+// #define NaN and Infinity so that it's possible to cut-and-paste these tests
+// directly to a .js file and run them.
+#define NaN (OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+#define RunOneTruncationTest(p1, p2) RunOneTruncationTestWithTest(func, p1, p2)
+
+void RunAllTruncationTests(ConvertDToIFunc func) {
+ RunOneTruncationTest(0, 0);
+ RunOneTruncationTest(0.5, 0);
+ RunOneTruncationTest(-0.5, 0);
+ RunOneTruncationTest(1.5, 1);
+ RunOneTruncationTest(-1.5, -1);
+ RunOneTruncationTest(5.5, 5);
+ RunOneTruncationTest(-5.0, -5);
+ RunOneTruncationTest(NaN, 0);
+ RunOneTruncationTest(Infinity, 0);
+ RunOneTruncationTest(-NaN, 0);
+ RunOneTruncationTest(-Infinity, 0);
+
+ RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(-4.5036e+15, -372629504);
+
+ RunOneTruncationTest(4503603922337791.0, -1);
+ RunOneTruncationTest(-4503603922337791.0, 1);
+ RunOneTruncationTest(4503601774854143.0, 2147483647);
+ RunOneTruncationTest(-4503601774854143.0, -2147483647);
+ RunOneTruncationTest(9007207844675582.0, -2);
+ RunOneTruncationTest(-9007207844675582.0, 2);
+
+ RunOneTruncationTest(2.4178527921507624e+24, -536870912);
+ RunOneTruncationTest(-2.4178527921507624e+24, 536870912);
+ RunOneTruncationTest(2.417853945072267e+24, -536870912);
+ RunOneTruncationTest(-2.417853945072267e+24, 536870912);
+
+ RunOneTruncationTest(4.8357055843015248e+24, -1073741824);
+ RunOneTruncationTest(-4.8357055843015248e+24, 1073741824);
+ RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
+ RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+
+ RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
+ RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
+ RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+}
+
+#undef NaN
+#undef Infinity
+#undef RunOneTruncationTest
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TEST_CODE_STUBS_H_
+#define V8_TEST_CODE_STUBS_H_
+
+#if V8_TARGET_ARCH_IA32
+#if __GNUC__
+#define STDCALL __attribute__((stdcall))
+#else
+#define STDCALL __stdcall
+#endif
+#else
+#define STDCALL
+#endif
+
+typedef int32_t STDCALL ConvertDToIFuncType(double input);
+typedef ConvertDToIFuncType* ConvertDToIFunc;
+
+int STDCALL ConvertDToICVersion(double d);
+
+void RunAllTruncationTests(ConvertDToIFunc func);
+
+#endif