}
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ CPURegList regs(reg1, reg2, reg3, reg4);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs.IncludesAliasOf(candidate)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return NoReg;
+}
+
+
bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister& reg3, const CPURegister& reg4,
const CPURegister& reg5, const CPURegister& reg6,
#undef ALIAS_REGISTER
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = NoReg,
+ Register reg3 = NoReg,
+ Register reg4 = NoReg);
+
+
// AreAliased returns true if any of the named registers overlap. Arguments set
// to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
}
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label done;
+ Register input = source();
+ Register result = destination();
+ ASSERT(is_truncating());
+
+ ASSERT(result.Is64Bits());
+ ASSERT(jssp.Is(masm->StackPointer()));
+
+ int double_offset = offset();
+
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
+ Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
+ Register scratch2 =
+ GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
+
+ __ Push(scratch1, scratch2);
+ // Account for saved regs if input is jssp.
+ if (input.is(jssp)) double_offset += 2 * kPointerSize;
+
+ if (!skip_fastpath()) {
+ __ Push(double_scratch);
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize;
+ __ Ldr(double_scratch, MemOperand(input, double_offset));
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
+ } else {
+ __ Ldr(result, MemOperand(input, double_offset));
+ }
+
+ // If we reach here we need to manually convert the input to an int32.
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
+ HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ __ CzeroX(result, ge);
+ __ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (masm->emit_debug_code()) {
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ __ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ __ Tst(result, kXSignMask);
+ __ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ __ Lsl(result, mantissa, exponent);
+
+ __ Bind(&done);
+ if (!skip_fastpath()) {
+ __ Pop(double_scratch);
+ }
+ __ Pop(scratch2, scratch1);
+ __ Ret();
+}
+
+
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Register left,
LOperand* value = UseRegister(instr->value());
if (instr->CanTruncateToInt32()) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
LTruncateDoubleToIntOrSmi* result =
- new(zone()) LTruncateDoubleToIntOrSmi(value, temp1, temp2);
+ new(zone()) LTruncateDoubleToIntOrSmi(value);
return DefineAsRegister(result);
} else {
LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
class LTruncateDoubleToIntOrSmi V8_FINAL
- : public LTemplateInstruction<1, 1, 2> {
+ : public LTemplateInstruction<1, 1, 0> {
public:
- LTruncateDoubleToIntOrSmi(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
"truncate-double-to-int-or-smi")
__ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
// A heap number: load value and convert to int32 using truncating function.
- __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ ECMA262ToInt32(output, dbl_scratch1, scratch1, scratch2);
+ __ TruncateHeapNumberToI(output, input);
__ B(&done);
__ Bind(&check_bools);
void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- __ ECMA262ToInt32(result, input,
- ToRegister(instr->temp1()),
- ToRegister(instr->temp2()),
- instr->tag_result()
- ? MacroAssembler::SMI
- : MacroAssembler::INT32_IN_W);
+ __ TruncateDoubleToI(result, input);
+ if (instr->tag_result()) {
+ __ SmiTag(result, result);
+ }
}
}
-void MacroAssembler::ECMA262ToInt32(Register result,
- DoubleRegister input,
- Register scratch1,
- Register scratch2,
- ECMA262ToInt32Result format) {
- ASSERT(!AreAliased(result, scratch1, scratch2));
- ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiValueSize == 32);
- Label done, tag, manual_conversion;
-
- // 1. Try to convert with a FPU convert instruction. It's trivial to compute
- // the modulo operation on an integer register so we convert to a 64-bit
- // integer, then find the 32-bit result from that.
+ // Try to convert with a FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer, then find the 32-bit result from that.
//
// Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
// when the double is out of range. NaNs and infinities will be converted to 0
// (as ECMA-262 requires).
- Fcvtzs(result, input);
+ Fcvtzs(result, double_input);
// The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
// representable using a double, so if the result is one of those then we know
// 1 will cause signed overflow.
Cmp(result, 1);
Ccmp(result, -1, VFlag, vc);
- B(vc, &tag);
- // 2. Manually convert the input to an int32.
- Fmov(result, input);
+ B(vc, done);
+}
- // Extract the exponent.
- Register exponent = scratch1;
- Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits);
- // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
- // the mantissa gets shifted completely out of the int32_t result.
- Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
- CzeroX(result, ge);
- B(ge, &done);
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+ ASSERT(jssp.Is(StackPointer()));
- // The Fcvtzs sequence handles all cases except where the conversion causes
- // signed overflow in the int64_t target. Since we've already handled
- // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+ TryInlineTruncateDoubleToI(result, double_input, &done);
- if (emit_debug_code()) {
- Cmp(exponent, HeapNumber::kExponentBias + 63);
- // Exponents less than this should have been handled by the Fcvt case.
- Check(ge, kUnexpectedValue);
- }
-
- // Isolate the mantissa bits, and set the implicit '1'.
- Register mantissa = scratch2;
- Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
- Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
-
- // Negate the mantissa if necessary.
- Tst(result, kXSignMask);
- Cneg(mantissa, mantissa, ne);
-
- // Shift the mantissa bits in the correct place. We know that we have to shift
- // it left here, because exponent >= 63 >= kMantissaBits.
- Sub(exponent, exponent,
- HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
- Lsl(result, mantissa, exponent);
-
- Bind(&tag);
- switch (format) {
- case INT32_IN_W:
- // There is nothing to do; the upper 32 bits are undefined.
- if (emit_debug_code()) {
- __ Mov(scratch1, 0x55555555);
- __ Bfi(result, scratch1, 32, 32);
- }
- break;
- case INT32_IN_X:
- Sxtw(result, result);
- break;
- case SMI:
- SmiTag(result);
- break;
- }
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ Push(double_input); // Put input on stack.
+
+ DoubleToIStub stub(jssp,
+ result,
+ 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+
+ Drop(1, kDoubleSize); // Drop the double input on the stack.
+ Pop(lr);
Bind(&done);
+
+ // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
+ // https://code.google.com/p/v8/issues/detail?id=3149
+ Sxtw(result, result.W());
}
-void MacroAssembler::HeapNumberECMA262ToInt32(Register result,
- Register heap_number,
- Register scratch1,
- Register scratch2,
- DoubleRegister double_scratch,
- ECMA262ToInt32Result format) {
- if (emit_debug_code()) {
- // Verify we indeed have a HeapNumber.
- Label ok;
- JumpIfHeapNumber(heap_number, &ok);
- Abort(kExpectedHeapNumber);
- Bind(&ok);
- }
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ ASSERT(!result.is(object));
+ ASSERT(jssp.Is(StackPointer()));
+
+ Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ TryInlineTruncateDoubleToI(result, fp_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ Pop(lr);
+
+ Bind(&done);
- Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
- ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format);
+ // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
+ // https://code.google.com/p/v8/issues/detail?id=3149
+ Sxtw(result, result.W());
}
// ---- Floating point helpers ----
- enum ECMA262ToInt32Result {
- // Provide an untagged int32_t which can be read using result.W(). That is,
- // the upper 32 bits of result are undefined.
- INT32_IN_W,
- // Provide an untagged int32_t which can be read using the 64-bit result
- // register. The int32_t result is sign-extended.
- INT32_IN_X,
-
- // Tag the int32_t result as a smi.
- SMI
- };
-
- // Applies ECMA-262 ToInt32 (see section 9.5) to a double value.
- void ECMA262ToInt32(Register result,
- DoubleRegister input,
- Register scratch1,
- Register scratch2,
- ECMA262ToInt32Result format = INT32_IN_X);
-
- // As ECMA262ToInt32, but operate on a HeapNumber.
- void HeapNumberECMA262ToInt32(Register result,
- Register heap_number,
- Register scratch1,
- Register scratch2,
- DoubleRegister double_scratch,
- ECMA262ToInt32Result format = INT32_IN_X);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-a64.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Label* not_int32);
// ---- Code generation helpers ----
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
'sources': [
'test-utils-a64.cc',
'test-assembler-a64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-a64.cc',
'test-disasm-a64.cc',
'test-fuzz-a64.cc',
'test-javascript-a64.cc',
}
-static void ECMA262ToInt32Helper(int32_t expected, double input) {
- SETUP();
- START();
-
- __ Fmov(d0, input);
-
- __ ECMA262ToInt32(x0, d0, x10, x11, MacroAssembler::INT32_IN_W);
- __ ECMA262ToInt32(x1, d0, x10, x11, MacroAssembler::INT32_IN_X);
- __ ECMA262ToInt32(x2, d0, x10, x11, MacroAssembler::SMI);
-
- // The upper bits of INT32_IN_W are undefined, so make sure we don't try to
- // test them.
- __ Mov(w0, w0);
-
- END();
-
- RUN();
-
- int64_t expected64 = expected;
-
- ASSERT_EQUAL_32(expected, w0);
- ASSERT_EQUAL_64(expected64, x1);
- ASSERT_EQUAL_64(expected64 << kSmiShift | kSmiTag, x2);
-
- TEARDOWN();
-}
-
-
-TEST(ecma_262_to_int32) {
- INIT_V8();
- // ==== exponent < 64 ====
-
- ECMA262ToInt32Helper(0, 0.0);
- ECMA262ToInt32Helper(0, -0.0);
- ECMA262ToInt32Helper(1, 1.0);
- ECMA262ToInt32Helper(-1, -1.0);
-
- // The largest representable value that is less than 1.
- ECMA262ToInt32Helper(0, 0x001fffffffffffff * pow(2.0, -53));
- ECMA262ToInt32Helper(0, 0x001fffffffffffff * -pow(2.0, -53));
- ECMA262ToInt32Helper(0, std::numeric_limits<double>::denorm_min());
- ECMA262ToInt32Helper(0, -std::numeric_limits<double>::denorm_min());
-
- // The largest conversion which doesn't require the integer modulo-2^32 step.
- ECMA262ToInt32Helper(0x7fffffff, 0x7fffffff);
- ECMA262ToInt32Helper(-0x80000000, -0x80000000);
-
- // The largest simple conversion, requiring module-2^32, but where the fcvt
- // does not saturate when converting to int64_t.
- ECMA262ToInt32Helper(0xfffffc00, 0x7ffffffffffffc00);
- ECMA262ToInt32Helper(-0xfffffc00, 0x7ffffffffffffc00 * -1.0);
-
- // ==== 64 <= exponent < 84 ====
-
- // The smallest conversion where the fcvt saturates.
- ECMA262ToInt32Helper(0, 0x8000000000000000);
- ECMA262ToInt32Helper(0, 0x8000000000000000 * -1.0);
-
- // The smallest conversion where the fcvt saturates, and where all the
- // mantissa bits are '1' (to check the shift logic).
- ECMA262ToInt32Helper(0xfffff800, 0xfffffffffffff800);
- ECMA262ToInt32Helper(-0xfffff800, 0xfffffffffffff800 * -1.0);
-
- // The largest conversion which doesn't produce a zero result.
- ECMA262ToInt32Helper(0x80000000, 0x001fffffffffffff * pow(2.0, 31));
- ECMA262ToInt32Helper(-0x80000000, 0x001fffffffffffff * -pow(2.0, 31));
-
- // Some large conversions to check the shifting function.
- ECMA262ToInt32Helper(0x6789abcd, 0x001123456789abcd);
- ECMA262ToInt32Helper(0x12345678, 0x001123456789abcd * pow(2.0, -20));
- ECMA262ToInt32Helper(0x891a2b3c, 0x001123456789abcd * pow(2.0, -21));
- ECMA262ToInt32Helper(0x11234567, 0x001123456789abcd * pow(2.0, -24));
- ECMA262ToInt32Helper(-0x6789abcd, 0x001123456789abcd * -1.0);
- ECMA262ToInt32Helper(-0x12345678, 0x001123456789abcd * -pow(2.0, -20));
- ECMA262ToInt32Helper(-0x891a2b3c, 0x001123456789abcd * -pow(2.0, -21));
- ECMA262ToInt32Helper(-0x11234567, 0x001123456789abcd * -pow(2.0, -24));
-
- // ==== 84 <= exponent ====
-
- // The smallest conversion which produces a zero result by shifting the
- // mantissa out of the int32_t range.
- ECMA262ToInt32Helper(0, pow(2.0, 32));
- ECMA262ToInt32Helper(0, -pow(2.0, 32));
-
- // Some very large conversions.
- ECMA262ToInt32Helper(0, 0x001fffffffffffff * pow(2.0, 32));
- ECMA262ToInt32Helper(0, 0x001fffffffffffff * -pow(2.0, 32));
- ECMA262ToInt32Helper(0, DBL_MAX);
- ECMA262ToInt32Helper(0, -DBL_MAX);
-
- // ==== Special values. ====
-
- ECMA262ToInt32Helper(0, std::numeric_limits<double>::infinity());
- ECMA262ToInt32Helper(0, -std::numeric_limits<double>::infinity());
- ECMA262ToInt32Helper(0, std::numeric_limits<double>::quiet_NaN());
- ECMA262ToInt32Helper(0, -std::numeric_limits<double>::quiet_NaN());
- ECMA262ToInt32Helper(0, std::numeric_limits<double>::signaling_NaN());
- ECMA262ToInt32Helper(0, -std::numeric_limits<double>::signaling_NaN());
-}
-
-
static void AbsHelperX(int64_t value) {
int64_t expected;
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "simulator.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
+
+ byte* start = stub.GetCode(isolate)->instruction_start();
+ Label done;
+
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ // Push the double argument.
+ __ Push(d0);
+ if (!source_reg.is(jssp)) {
+ __ Mov(source_reg, jssp);
+ }
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Push(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+
+ // Re-push the double argument.
+ __ Push(d0);
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ Ldr(d0, MemOperand(source_reg));
+ __ TryInlineTruncateDoubleToI(destination_reg, d0, &done);
+ if (destination_reg.is(source_reg)) {
+ // Restore clobbered source_reg.
+ __ add(source_reg, jssp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Drop(1, kDoubleSize);
+
+ // // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Pop(ip0);
+ __ cmp(reg, ip0);
+ __ Assert(eq, kRegisterWasClobbered);
+ }
+ }
+
+ __ Drop(1, kDoubleSize);
+
+ if (!destination_reg.is(x0))
+ __ Mov(x0, destination_reg);
+
+ // Restore callee save registers.
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CPU::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ return Simulator::current(Isolate::Current())->CallInt64(
+ FUNCTION_ADDR(func), Simulator::CallArgument(from),
+ Simulator::CallArgument::End());
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {jssp, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
+ x10, x11, x12, x13, x14, x15, x18, x19, x20,
+ x21, x22, x23, x24};
+ Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
+ x24};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
HeapNumber::kExponentBias);
+ if (exponent < 0) {
+ return 0;
+ }
uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
int result = 0;
uint32_t max_exponent =
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
-
- RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(4.94065645841e-324, 0);
+ RunOneTruncationTest(-4.94065645841e-324, 0);
+
+ RunOneTruncationTest(0.9999999999999999, 0);
+ RunOneTruncationTest(-0.9999999999999999, 0);
+ RunOneTruncationTest(4294967296.0, 0);
+ RunOneTruncationTest(-4294967296.0, 0);
+ RunOneTruncationTest(9223372036854775000.0, 4294966272.0);
+ RunOneTruncationTest(-9223372036854775000.0, -4294966272.0);
+ RunOneTruncationTest(4.5036e+15, 372629504);
RunOneTruncationTest(-4.5036e+15, -372629504);
+ RunOneTruncationTest(287524199.5377777, 0x11234567);
+ RunOneTruncationTest(-287524199.5377777, -0x11234567);
+ RunOneTruncationTest(2300193596.302222, 2300193596.0);
+ RunOneTruncationTest(-2300193596.302222, -2300193596.0);
+ RunOneTruncationTest(4600387192.604444, 305419896);
+ RunOneTruncationTest(-4600387192.604444, -305419896);
+ RunOneTruncationTest(4823855600872397.0, 1737075661);
+ RunOneTruncationTest(-4823855600872397.0, -1737075661);
+
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+ RunOneTruncationTest(2147483647.0, 2147483647.0);
+ RunOneTruncationTest(-2147483648.0, -2147483648.0);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(1.9342813113834065e+25, 2147483648.0);
+ RunOneTruncationTest(-1.9342813113834065e+25, 2147483648.0);
+
+ RunOneTruncationTest(3.868562622766813e+25, 0);
+ RunOneTruncationTest(-3.868562622766813e+25, 0);
+ RunOneTruncationTest(1.7976931348623157e+308, 0);
+ RunOneTruncationTest(-1.7976931348623157e+308, 0);
}
#undef NaN