From ffe7f6a1baff143c480dc7cfd0e7c00ca937d811 Mon Sep 17 00:00:00 2001 From: "danno@chromium.org" Date: Sun, 7 Apr 2013 04:34:20 +0000 Subject: [PATCH] Remove ARM support for VFP2 R=yangguo@chromium.org Review URL: https://codereview.chromium.org/13560007 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14159 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/assembler-arm-inl.h | 18 +- src/arm/assembler-arm.cc | 75 +-- src/arm/assembler-arm.h | 5 - src/arm/code-stubs-arm.cc | 909 +++++++++++------------------------- src/arm/code-stubs-arm.h | 6 +- src/arm/codegen-arm.cc | 26 +- src/arm/deoptimizer-arm.cc | 71 ++- src/arm/full-codegen-arm.cc | 59 +-- src/arm/lithium-arm.cc | 11 +- src/arm/lithium-codegen-arm.cc | 265 ++--------- src/arm/lithium-gap-resolver-arm.cc | 11 +- src/arm/macro-assembler-arm.cc | 107 +---- src/arm/macro-assembler-arm.h | 20 +- src/arm/stub-cache-arm.cc | 396 +++------------- src/assembler.cc | 6 +- src/code-stubs.h | 2 +- src/flag-definitions.h | 5 +- src/mips/lithium-mips.cc | 2 +- src/platform-linux.cc | 3 - src/v8globals.h | 9 +- test/cctest/test-assembler-arm.cc | 392 ++++++++-------- 21 files changed, 672 insertions(+), 1726 deletions(-) diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h index 123013b..0f9630b 100644 --- a/src/arm/assembler-arm-inl.h +++ b/src/arm/assembler-arm-inl.h @@ -48,29 +48,17 @@ namespace internal { int Register::NumAllocatableRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return kMaxNumAllocatableRegisters; - } else { - return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double; - } + return kMaxNumAllocatableRegisters; } int DwVfpRegister::NumRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; - } else { - return 1; - } + return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; } int DwVfpRegister::NumAllocatableRegisters() { - if (CpuFeatures::IsSupported(VFP2)) { - return NumRegisters() - kNumReservedRegisters; - } else { - return 1; - } + return NumRegisters() - kNumReservedRegisters; } diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index 1574d51..3741ac2 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -66,11 +66,8 @@ static unsigned CpuFeaturesImpliedByCompiler() { answer |= 1u << ARMv7; #endif // CAN_USE_ARMV7_INSTRUCTIONS #ifdef CAN_USE_VFP3_INSTRUCTIONS - answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7; + answer |= 1u << VFP3 | 1u << ARMv7; #endif // CAN_USE_VFP3_INSTRUCTIONS -#ifdef CAN_USE_VFP2_INSTRUCTIONS - answer |= 1u << VFP2; -#endif // CAN_USE_VFP2_INSTRUCTIONS #ifdef CAN_USE_VFP32DREGS answer |= 1u << VFP32DREGS; #endif // CAN_USE_VFP32DREGS @@ -81,7 +78,7 @@ static unsigned CpuFeaturesImpliedByCompiler() { // point support implies VFPv3, see ARM DDI 0406B, page A1-6. #if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \ && !defined(__SOFTFP__) - answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; + answer |= 1u << VFP3 | 1u << ARMv7; #endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) // && !defined(__SOFTFP__) #endif // _arm__ @@ -94,18 +91,13 @@ static unsigned CpuFeaturesImpliedByCompiler() { const char* DwVfpRegister::AllocationIndexToString(int index) { - if (CpuFeatures::IsSupported(VFP2)) { - ASSERT(index >= 0 && index < NumAllocatableRegisters()); - ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == - kNumReservedRegisters - 1); - if (index >= kDoubleRegZero.code()) - index += kNumReservedRegisters; - - return VFPRegisters::Name(index, true); - } else { - ASSERT(index == 0); - return "sfpd0"; - } + ASSERT(index >= 0 && index < NumAllocatableRegisters()); + ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == + kNumReservedRegisters - 1); + if (index >= kDoubleRegZero.code()) + index += kNumReservedRegisters; + + return VFPRegisters::Name(index, true); } @@ -133,8 +125,7 @@ void CpuFeatures::Probe() { if (FLAG_enable_vfp3) { supported_ |= static_cast(1) << VFP3 | - static_cast(1) << ARMv7 | - static_cast(1) << VFP2; + static_cast(1) << ARMv7; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { @@ -157,14 +148,11 @@ void CpuFeatures::Probe() { // Probe for additional features not already known to be available. if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { // This implementation also sets the VFP flags if runtime - // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI + // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI // 0406B, page A1-6. found_by_runtime_probing_only_ |= static_cast(1) << VFP3 | - static_cast(1) << ARMv7 | - static_cast(1) << VFP2; - } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { - found_by_runtime_probing_only_ |= static_cast(1) << VFP2; + static_cast(1) << ARMv7; } if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { @@ -193,8 +181,8 @@ void CpuFeatures::Probe() { supported_ |= found_by_runtime_probing_only_; #endif - // Assert that VFP3 implies VFP2 and ARMv7. - ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7))); + // Assert that VFP3 implies ARMv7. + ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); } @@ -1763,7 +1751,6 @@ void Assembler::vldr(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-924. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | offset - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1807,7 +1794,6 @@ void Assembler::vldr(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-628. // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | offset - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1851,7 +1837,6 @@ void Assembler::vstr(const DwVfpRegister src, // Instruction details available in ARM DDI 0406C.b, A8-1082. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | (offset/4) - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1895,7 +1880,6 @@ void Assembler::vstr(const SwVfpRegister src, // Instruction details available in ARM DDI 0406A, A8-786. // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | (offset/4) - ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1938,7 +1922,6 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-922. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1960,7 +1943,6 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-1080. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1981,7 +1963,6 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-626. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1010(11-8) | (count/2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2002,7 +1983,6 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-784. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count/2) - ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2076,8 +2056,6 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { void Assembler::vmov(const DwVfpRegister dst, double imm, const Register scratch) { - ASSERT(IsEnabled(VFP2)); - uint32_t enc; if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { // The double can be encoded in the instruction. @@ -2148,7 +2126,6 @@ void Assembler::vmov(const SwVfpRegister dst, const Condition cond) { // Sd = Sm // Instruction details available in ARM DDI 0406B, A8-642. - ASSERT(IsEnabled(VFP2)); int sd, d, sm, m; dst.split_code(&sd, &d); src.split_code(&sm, &m); @@ -2163,7 +2140,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-938. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2181,7 +2157,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-940. // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(index.index == 0 || index.index == 1); int vd, d; dst.split_code(&vd, &d); @@ -2198,7 +2173,6 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(IsEnabled(VFP2)); ASSERT(!src1.is(pc) && !src2.is(pc)); int vm, m; dst.split_code(&vm, &m); @@ -2215,7 +2189,6 @@ void Assembler::vmov(const Register dst1, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(IsEnabled(VFP2)); ASSERT(!dst1.is(pc) && !dst2.is(pc)); int vm, m; src.split_code(&vm, &m); @@ -2231,7 +2204,6 @@ void Assembler::vmov(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(!src.is(pc)); int sn, n; dst.split_code(&sn, &n); @@ -2246,7 +2218,6 @@ void Assembler::vmov(const Register dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(!dst.is(pc)); int sn, n; src.split_code(&sn, &n); @@ -2371,7 +2342,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); } @@ -2380,7 +2350,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); } @@ -2389,7 +2358,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); } @@ -2398,7 +2366,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); } @@ -2407,7 +2374,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); } @@ -2416,7 +2382,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); } @@ -2425,7 +2390,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); } @@ -2436,7 +2400,6 @@ void Assembler::vneg(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-968. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2453,7 +2416,6 @@ void Assembler::vabs(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-524. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2472,7 +2434,6 @@ void Assembler::vadd(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-830. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2493,7 +2454,6 @@ void Assembler::vsub(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1086. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2514,7 +2474,6 @@ void Assembler::vmul(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-960. // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2571,7 +2530,6 @@ void Assembler::vdiv(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-882. // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2590,7 +2548,6 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; src1.split_code(&vd, &d); int vm, m; @@ -2607,7 +2564,6 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); ASSERT(src2 == 0.0); int vd, d; src1.split_code(&vd, &d); @@ -2619,7 +2575,6 @@ void Assembler::vmsr(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xE*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2629,7 +2584,6 @@ void Assembler::vmrs(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xF*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2641,7 +2595,6 @@ void Assembler::vsqrt(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1058. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) - ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 045638e..eab9852 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -59,7 +59,6 @@ class CpuFeatures : public AllStatic { static bool IsSupported(CpuFeature f) { ASSERT(initialized_); if (f == VFP3 && !FLAG_enable_vfp3) return false; - if (f == VFP2 && !FLAG_enable_vfp2) return false; if (f == SUDIV && !FLAG_enable_sudiv) return false; if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { return false; @@ -117,7 +116,6 @@ struct Register { static const int kNumRegisters = 16; static const int kMaxNumAllocatableRegisters = 8; static const int kSizeInBytes = 4; - static const int kGPRsPerNonVFP2Double = 2; inline static int NumAllocatableRegisters(); @@ -370,9 +368,6 @@ const DwVfpRegister d29 = { 29 }; const DwVfpRegister d30 = { 30 }; const DwVfpRegister d31 = { 31 }; -const Register sfpd_lo = { kRegister_r6_Code }; -const Register sfpd_hi = { kRegister_r7_Code }; - // Aliases for double registers. Defined using #define instead of // "static const DwVfpRegister&" because Clang complains otherwise when a // compilation unit that includes this header doesn't use the variables. diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index f883c4f..0aba7c1 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -145,7 +145,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, Label* lhs_not_nan, Label* slow, bool strict); -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, Register rhs); @@ -515,30 +514,15 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register scratch1, Register scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); - __ vmov(d7.high(), scratch1); - __ vcvt_f64_s32(d7, d7.high()); - __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); - __ vmov(d6.high(), scratch1); - __ vcvt_f64_s32(d6, d6.high()); - if (destination == kCoreRegisters) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } - } else { - ASSERT(destination == kCoreRegisters); - // Write Smi from r0 to r3 and r2 in double format. - __ mov(scratch1, Operand(r0)); - ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); - __ push(lr); - __ Call(stub1.GetCode(masm->isolate())); - // Write Smi from r1 to r1 and r0 in double format. - __ mov(scratch1, Operand(r1)); - ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); - __ Call(stub2.GetCode(masm->isolate())); - __ pop(lr); + __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); + __ vmov(d7.high(), scratch1); + __ vcvt_f64_s32(d7, d7.high()); + __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); + __ vmov(d6.high(), scratch1); + __ vcvt_f64_s32(d6, d6.high()); + if (destination == kCoreRegisters) { + __ vmov(r2, r3, d7); + __ vmov(r0, r1, d6); } } @@ -565,9 +549,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); // Handle loading a double from a heap number. - if (CpuFeatures::IsSupported(VFP2) && - destination == kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); + if (destination == kVFPRegisters) { // Load the double from tagged HeapNumber to double register. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(dst, scratch1, HeapNumber::kValueOffset); @@ -580,23 +562,12 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Convert smi to double using VFP instructions. - __ vmov(dst.high(), scratch1); - __ vcvt_f64_s32(dst, dst.high()); - if (destination == kCoreRegisters) { - // Load the converted smi to dst1 and dst2 in double format. - __ vmov(dst1, dst2, dst); - } - } else { - ASSERT(destination == kCoreRegisters); - // Write smi to dst1 and dst2 double format. - __ mov(scratch1, Operand(object)); - ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); - __ push(lr); - __ Call(stub.GetCode(masm->isolate())); - __ pop(lr); + // Convert smi to double using VFP instructions. + __ vmov(dst.high(), scratch1); + __ vcvt_f64_s32(dst, dst.high()); + if (destination == kCoreRegisters) { + // Load the converted smi to dst1 and dst2 in double format. + __ vmov(dst1, dst2, dst); } __ bind(&done); @@ -643,62 +614,10 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Label done; - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(single_scratch, int_scratch); - __ vcvt_f64_s32(double_dst, single_scratch); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - } else { - Label fewer_than_20_useful_bits; - // Expected output: - // | dst_exponent | dst_mantissa | - // | s | exp | mantissa | - - // Check for zero. - __ cmp(int_scratch, Operand::Zero()); - __ mov(dst_exponent, int_scratch); - __ mov(dst_mantissa, int_scratch); - __ b(eq, &done); - - // Preload the sign of the value. - __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); - // Get the absolute value of the object (as an unsigned integer). - __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); - - // Get mantissa[51:20]. - - // Get the position of the first set bit. - __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); - __ rsb(dst_mantissa, dst_mantissa, Operand(31)); - - // Set the exponent. - __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst_exponent, scratch2, scratch2, - HeapNumber::kExponentShift, HeapNumber::kExponentBits); - - // Clear the first non null bit. - __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); - - __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); - // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), - SetCC); - __ b(mi, &fewer_than_20_useful_bits); - // Set the higher 20 bits of the mantissa. - __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); - __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); - __ b(&done); - - __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); - __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst_exponent, dst_exponent, scratch2); - // Set dst1 to 0. - __ mov(dst_mantissa, Operand::Zero()); + __ vmov(single_scratch, int_scratch); + __ vcvt_f64_s32(double_dst, single_scratch); + if (destination == kCoreRegisters) { + __ vmov(dst_mantissa, dst_exponent, double_dst); } __ bind(&done); } @@ -737,65 +656,17 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); // Load the number. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - - __ TestDoubleIsInt32(double_dst, double_scratch); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - - } else { - ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers. - bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); - if (save_registers) { - // Save both output registers, because the other one probably holds - // an important value too. - __ Push(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - - // Check for 0 and -0. - Label zero; - __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst_mantissa)); - __ cmp(scratch1, Operand::Zero()); - __ b(eq, &zero); - - // Check that the value can be exactly represented by a 32-bit integer. - // Jump to not_int32 if that's not the case. - Label restore_input_and_miss; - DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, - &restore_input_and_miss); - - // dst_* were trashed. Reload the double value. - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - __ b(&done); - - __ bind(&restore_input_and_miss); - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ b(not_int32); + __ TestDoubleIsInt32(double_dst, double_scratch); + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); - __ bind(&zero); - if (save_registers) { - __ Drop(2); - } + if (destination == kCoreRegisters) { + __ vmov(dst_mantissa, dst_exponent, double_dst); } - __ bind(&done); } @@ -828,43 +699,13 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Object is a heap number. // Convert the floating point value to a 32-bit integer. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); + // Load the double value. + __ sub(scratch1, object, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); - __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - } else { - // Load the double value in the destination registers. - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); - - // Check for 0 and -0. - __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); - __ orr(dst, scratch2, Operand(dst)); - __ cmp(dst, Operand::Zero()); - __ b(eq, &done); - - DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); - - // Registers state after DoubleIs32BitInteger. - // dst: mantissa[51:20]. - // scratch2: 1 - - // Shift back the higher bits of the mantissa. - __ mov(dst, Operand(dst, LSR, scratch3)); - // Set the implicit first bit. - __ rsb(scratch3, scratch3, Operand(32)); - __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); - // Set the sign. - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); - } + __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); + // Jump to not_int32 if the operation did not succeed. + __ b(ne, not_int32); __ b(&done); __ bind(&maybe_undefined); @@ -958,7 +799,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(lr); __ PrepareCallCFunction(0, 2, scratch); if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -970,7 +810,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1183,23 +1022,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } // Lhs is a smi, rhs is a number. - if (CpuFeatures::IsSupported(VFP2)) { - // Convert lhs to a double in d7. - CpuFeatureScope scope(masm, VFP2); - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); - // Load the double from rhs, tagged HeapNumber r0, to d6. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - __ push(lr); - // Convert lhs to a double in r2, r3. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode(masm->isolate())); - // Load rhs to a double in r0, r1. - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - __ pop(lr); - } + // Convert lhs to a double in d7. + __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); + // Load the double from rhs, tagged HeapNumber r0, to d6. + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); // We now have both loaded as doubles but we can skip the lhs nan check // since it's a smi. @@ -1223,23 +1050,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } // Rhs is a smi, lhs is a heap number. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Load the double from lhs, tagged HeapNumber r1, to d7. - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - // Convert rhs to a double in d6 . - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); - } else { - __ push(lr); - // Load lhs to a double in r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - // Convert rhs to a double in r0, r1. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(masm->isolate())); - __ pop(lr); - } + // Load the double from lhs, tagged HeapNumber r1, to d7. + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + // Convert rhs to a double in d6 . + __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); // Fall through to both_loaded_as_doubles. } @@ -1296,60 +1111,6 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { // See comment at call site. -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, - Condition cond) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - - // r0, r1, r2, r3 have the two doubles. Neither is a NaN. - if (cond == eq) { - // Doubles are not equal unless they have the same bit pattern. - // Exception: 0 and -0. - __ cmp(rhs_mantissa, Operand(lhs_mantissa)); - __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); - // Return non-zero if the numbers are unequal. - __ Ret(ne); - - __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); - // If exponents are equal then return 0. - __ Ret(eq); - - // Exponents are unequal. The only way we can return that the numbers - // are equal is if one is -0 and the other is 0. We already dealt - // with the case where both are -0 or both are 0. - // We start by seeing if the mantissas (that are equal) or the bottom - // 31 bits of the rhs exponent are non-zero. If so we return not - // equal. - __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); - __ mov(r0, Operand(r4), LeaveCC, ne); - __ Ret(ne); - // Now they are equal if and only if the lhs exponent is zero in its - // low 31 bits. - __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); - __ Ret(); - } else { - // Call a native function to do a comparison between two non-NaNs. - // Call C routine that may not cause GC or other trouble. - __ push(lr); - __ PrepareCallCFunction(0, 2, r5); - if (masm->use_eabi_hardfloat()) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } - - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), - 0, 2); - __ pop(pc); // Return. - } -} - - -// See comment at call site. static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, Register rhs) { @@ -1412,16 +1173,10 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - } + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); __ jmp(both_loaded_as_doubles); } @@ -1502,42 +1257,37 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Label load_result_from_cache; if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); - - STATIC_ASSERT(8 == kDoubleSize); - __ add(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); - __ eor(scratch1, scratch1, Operand(scratch2)); - __ and_(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch1, - number_string_cache, - Operand(scratch1, LSL, kPointerSizeLog2 + 1)); - - Register probe = mask; - __ ldr(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ sub(scratch2, object, Operand(kHeapObjectTag)); - __ vldr(d0, scratch2, HeapNumber::kValueOffset); - __ sub(probe, probe, Operand(kHeapObjectTag)); - __ vldr(d1, probe, HeapNumber::kValueOffset); - __ VFPCompareAndSetFlags(d0, d1); - __ b(ne, not_found); // The cache did not contain this value. - __ b(&load_result_from_cache); - } else { - __ b(not_found); - } + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + __ add(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); + __ eor(scratch1, scratch1, Operand(scratch2)); + __ and_(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch1, + number_string_cache, + Operand(scratch1, LSL, kPointerSizeLog2 + 1)); + + Register probe = mask; + __ ldr(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + __ sub(scratch2, object, Operand(kHeapObjectTag)); + __ vldr(d0, scratch2, HeapNumber::kValueOffset); + __ sub(probe, probe, Operand(kHeapObjectTag)); + __ vldr(d1, probe, HeapNumber::kValueOffset); + __ VFPCompareAndSetFlags(d0, d1); + __ b(ne, not_found); // The cache did not contain this value. + __ b(&load_result_from_cache); } __ bind(&is_smi); @@ -1652,37 +1402,27 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // The arguments have been converted to doubles and stored in d6 and d7, if // VFP3 is supported, or in r0, r1, r2, and r3. Isolate* isolate = masm->isolate(); - if (CpuFeatures::IsSupported(VFP2)) { - __ bind(&lhs_not_nan); - CpuFeatureScope scope(masm, VFP2); - Label no_nan; - // ARMv7 VFP3 instructions to implement double precision comparison. - __ VFPCompareAndSetFlags(d7, d6); - Label nan; - __ b(vs, &nan); - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); + __ bind(&lhs_not_nan); + Label no_nan; + // ARMv7 VFP3 instructions to implement double precision comparison. + __ VFPCompareAndSetFlags(d7, d6); + Label nan; + __ b(vs, &nan); + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); - __ bind(&nan); - // If one of the sides was a NaN then the v flag is set. Load r0 with - // whatever it takes to make the comparison fail, since comparisons with NaN - // always fail. - if (cc == lt || cc == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); + __ bind(&nan); + // If one of the sides was a NaN then the v flag is set. Load r0 with + // whatever it takes to make the comparison fail, since comparisons with NaN + // always fail. + if (cc == lt || cc == le) { + __ mov(r0, Operand(GREATER)); } else { - // Checks for NaN in the doubles we have loaded. Can return the answer or - // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc); - // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the - // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc); + __ mov(r0, Operand(LESS)); } + __ Ret(); __ bind(¬_smis); // At this point we know we are dealing with two different objects, @@ -1779,7 +1519,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { // we cannot call anything that could cause a GC from this stub. Label patch; const Register map = r9.is(tos_) ? r7 : r9; - const Register temp = map; // undefined -> false. CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); @@ -1822,9 +1561,9 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { if (types_.Contains(STRING)) { // String value -> false iff empty. - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); - __ Ret(lt); // the string length is OK as the return value + __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); + __ Ret(lt); // the string length is OK as the return value } if (types_.Contains(HEAP_NUMBER)) { @@ -1833,55 +1572,13 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); __ b(ne, ¬_heap_number); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ VFPCompareAndSetFlags(d1, 0.0); - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN - } else { - Label done, not_nan, not_zero; - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); - // -0 maps to false: - __ bic( - temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); - __ b(ne, ¬_zero); - // If exponent word is zero then the answer depends on the mantissa word. - __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); - __ jmp(&done); - - // Check for NaN. - __ bind(¬_zero); - // We already zeroed the sign bit, now shift out the mantissa so we only - // have the exponent left. - __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); - unsigned int shifted_exponent_mask = - HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; - __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); - __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. - - // Reload exponent word. - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); - __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); - // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand::Zero(), LeaveCC, ne); - __ b(ne, &done); - - // Load mantissa word. - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); - __ cmp(temp, Operand::Zero()); - // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand::Zero(), LeaveCC, ne); - __ b(ne, &done); - - __ bind(¬_nan); - __ mov(tos_, Operand(1, RelocInfo::NONE32)); - __ bind(&done); - } + __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ VFPCompareAndSetFlags(d1, 0.0); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO + __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN __ Ret(); __ bind(¬_heap_number); } @@ -1934,7 +1631,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const Register scratch = r1; if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -1954,8 +1650,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatureScope scope(masm, VFP2); - // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -2180,19 +1874,10 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ bind(&heapnumber_allocated); } - if (CpuFeatures::IsSupported(VFP2)) { - // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r1); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ Ret(); - } else { - // WriteInt32ToHeapNumberStub does not trigger GC, so we do not - // have to set up a frame. - WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); - } + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ Ret(); } @@ -2248,7 +1933,7 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { void BinaryOpStub::Initialize() { - platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); + platform_specific_bit_ = true; // VFP2 is a base requirement for V8 } @@ -2527,7 +2212,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = - CpuFeatures::IsSupported(VFP2) && op != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; @@ -2571,7 +2255,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Using VFP registers: // d6: Left value // d7: Right value - CpuFeatureScope scope(masm, VFP2); switch (op) { case Token::ADD: __ vadd(d5, d6, d7); @@ -2662,11 +2345,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // The code below for writing into heap numbers isn't capable of // writing the register as an unsigned int so we go to slow case if we // hit this case. - if (CpuFeatures::IsSupported(VFP2)) { - __ b(mi, &result_not_a_smi); - } else { - __ b(mi, not_numbers); - } + __ b(mi, &result_not_a_smi); break; case Token::SHL: // Use only the 5 least significant bits of the shift count. @@ -2702,25 +2381,17 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // result. __ mov(r0, Operand(r5)); - if (CpuFeatures::IsSupported(VFP2)) { - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As - // mentioned above SHR needs to always produce a positive result. - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r2); - if (op == Token::SHR) { - __ vcvt_f64_u32(d0, s0); - } else { - __ vcvt_f64_s32(d0, s0); - } - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As + // mentioned above SHR needs to always produce a positive result. + __ vmov(s0, r2); + if (op == Token::SHR) { + __ vcvt_f64_u32(d0, s0); } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); + __ vcvt_f64_s32(d0, s0); } + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); break; } default: @@ -2866,8 +2537,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. - FloatingPointHelper::Destination destination = - (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) + FloatingPointHelper::Destination destination = (op_ != Token::MOD) ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; @@ -2897,7 +2567,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); Label return_heap_number; switch (op_) { case Token::ADD: @@ -3065,17 +2734,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // We only get a negative result if the shift value (r2) is 0. // This result cannot be respresented as a signed 32-bit integer, try // to return a heap number if we can. - // The non vfp2 code does not support this special case, so jump to - // runtime if we don't support it. - if (CpuFeatures::IsSupported(VFP2)) { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &return_heap_number); - } else { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &call_runtime); - } + __ b(mi, (result_type_ <= BinaryOpIC::INT32) + ? &transition + : &return_heap_number); break; case Token::SHL: __ and_(r2, r2, Operand(0x1f)); @@ -3103,31 +2764,22 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &call_runtime, mode_); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - if (op_ != Token::SHR) { - // Convert the result to a floating point value. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_s32(double_scratch, double_scratch.low()); - } else { - // The result must be interpreted as an unsigned 32-bit integer. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_u32(double_scratch, double_scratch.low()); - } - - // Store the result. - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(double_scratch, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); + if (op_ != Token::SHR) { + // Convert the result to a floating point value. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_s32(double_scratch, double_scratch.low()); } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - __ mov(r0, r5); - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); + // The result must be interpreted as an unsigned 32-bit integer. + __ vmov(double_scratch.low(), r2); + __ vcvt_f64_u32(double_scratch, double_scratch.low()); } + // Store the result. + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(double_scratch, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); + break; } @@ -3306,100 +2958,96 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const Register cache_entry = r0; const bool tagged = (argument_type_ == TAGGED); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - if (tagged) { - // Argument is a number and is on stack and in r0. - // Load argument and check if it is a smi. - __ JumpIfNotSmi(r0, &input_not_smi); - - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &calculate, - DONT_DO_SMI_CHECK); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ vmov(r2, r3, d0); - } else { - // Input is untagged double in d2. Output goes to d2. - __ vmov(r2, r3, d2); - } - __ bind(&loaded); - // r2 = low 32 bits of double value - // r3 = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ eor(r1, r2, Operand(r3)); - __ eor(r1, r1, Operand(r1, ASR, 16)); - __ eor(r1, r1, Operand(r1, ASR, 8)); - ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); - - // r2 = low 32 bits of double value. - // r3 = high 32 bits of double value. - // r1 = TranscendentalCache::hash(double value). - Isolate* isolate = masm->isolate(); - ExternalReference cache_array = - ExternalReference::transcendental_cache_array_address(isolate); - __ mov(cache_entry, Operand(cache_array)); - // cache_entry points to cache array. - int cache_array_index - = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); - __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); - // r0 points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(cache_entry, Operand::Zero()); - __ b(eq, &invalid_cache); + if (tagged) { + // Argument is a number and is on stack and in r0. + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r0, &input_not_smi); + + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &calculate, + DONT_DO_SMI_CHECK); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ vmov(r2, r3, d0); + } else { + // Input is untagged double in d2. Output goes to d2. + __ vmov(r2, r3, d2); + } + __ bind(&loaded); + // r2 = low 32 bits of double value + // r3 = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ eor(r1, r2, Operand(r3)); + __ eor(r1, r1, Operand(r1, ASR, 16)); + __ eor(r1, r1, Operand(r1, ASR, 8)); + ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); + __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); + + // r2 = low 32 bits of double value. + // r3 = high 32 bits of double value. + // r1 = TranscendentalCache::hash(double value). + Isolate* isolate = masm->isolate(); + ExternalReference cache_array = + ExternalReference::transcendental_cache_array_address(isolate); + __ mov(cache_entry, Operand(cache_array)); + // cache_entry points to cache array. + int cache_array_index + = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); + __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); + // r0 points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ cmp(cache_entry, Operand::Zero()); + __ b(eq, &invalid_cache); #ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::SubCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } + // Check that the layout of cache elements match expectations. + { TranscendentalCache::SubCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } #endif - // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. - __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); - __ cmp(r2, r4); - __ cmp(r3, r5, eq); - __ b(ne, &calculate); - // Cache hit. Load result, cleanup and return. - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter( - counters->transcendental_cache_hit(), 1, scratch0, scratch1); - if (tagged) { - // Pop input value from stack and load result into r0. - __ pop(); - __ mov(r0, Operand(r6)); - } else { - // Load result into d2. - __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); - } - __ Ret(); - } // if (CpuFeatures::IsSupported(VFP3)) + // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. + __ add(r1, r1, Operand(r1, LSL, 1)); + __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); + __ cmp(r2, r4); + __ cmp(r3, r5, eq); + __ b(ne, &calculate); + // Cache hit. Load result, cleanup and return. + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter( + counters->transcendental_cache_hit(), 1, scratch0, scratch1); + if (tagged) { + // Pop input value from stack and load result into r0. + __ pop(); + __ mov(r0, Operand(r6)); + } else { + // Load result into d2. + __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); + } + __ Ret(); __ bind(&calculate); - Counters* counters = masm->isolate()->counters(); __ IncrementCounter( counters->transcendental_cache_miss(), 1, scratch0, scratch1); if (tagged) { @@ -3408,9 +3056,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { ExternalReference(RuntimeFunction(), masm->isolate()); __ TailCallExternalReference(runtime_function, 1, 1); } else { - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(masm, VFP2); - Label no_update; Label skip_cache; @@ -3470,7 +3115,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, Register scratch) { - ASSERT(masm->IsEnabled(VFP2)); Isolate* isolate = masm->isolate(); __ push(lr); @@ -3531,7 +3175,6 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatureScope vfp2_scope(masm, VFP2); const Register base = r1; const Register exponent = r2; const Register heapnumbermap = r5; @@ -3750,9 +3393,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { void CodeStub::GenerateFPStubs(Isolate* isolate) { - SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) - ? kSaveFPRegs - : kDontSaveFPRegs; + SaveFPRegsMode mode = kSaveFPRegs; CEntryStub save_doubles(1, mode); StoreBufferOverflowStub stub(mode); // These stubs might already be in the snapshot, detect that and don't @@ -4014,13 +3655,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Save callee-saved registers (incl. cp and fp), sp, and lr __ stm(db_w, sp, kCalleeSaved | lr.bit()); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Save callee-saved vfp registers. - __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - // Set up the reserved register for 0.0. - __ vmov(kDoubleRegZero, 0.0); - } + // Save callee-saved vfp registers. + __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); + // Set up the reserved register for 0.0. + __ vmov(kDoubleRegZero, 0.0); // Get address of argv, see stm above. // r0: code entry @@ -4030,9 +3668,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Set up argv in r4. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; - if (CpuFeatures::IsSupported(VFP2)) { - offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; - } + offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; __ ldr(r4, MemOperand(sp, offset_to_argv)); // Push a frame with special values setup to mark it as an entry frame. @@ -4168,11 +3804,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } #endif - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - // Restore callee-saved vfp registers. - __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - } + // Restore callee-saved vfp registers. + __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); } @@ -6877,50 +6510,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { } // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP2 is unsupported. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - // Load left and right operand. - Label done, left, left_smi, right_smi; - __ JumpIfSmi(r0, &right_smi); - __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, - DONT_DO_SMI_CHECK); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vldr(d1, r2, HeapNumber::kValueOffset); - __ b(&left); - __ bind(&right_smi); - __ SmiUntag(r2, r0); // Can't clobber r0 yet. - SwVfpRegister single_scratch = d2.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d1, single_scratch); - - __ bind(&left); - __ JumpIfSmi(r1, &left_smi); - __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, - DONT_DO_SMI_CHECK); - __ sub(r2, r1, Operand(kHeapObjectTag)); - __ vldr(d0, r2, HeapNumber::kValueOffset); - __ b(&done); - __ bind(&left_smi); - __ SmiUntag(r2, r1); // Can't clobber r1 yet. - single_scratch = d3.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d0, single_scratch); + // stub if NaN is involved. + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(r0, &right_smi); + __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vldr(d1, r2, HeapNumber::kValueOffset); + __ b(&left); + __ bind(&right_smi); + __ SmiUntag(r2, r0); // Can't clobber r0 yet. + SwVfpRegister single_scratch = d2.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d1, single_scratch); + + __ bind(&left); + __ JumpIfSmi(r1, &left_smi); + __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ sub(r2, r1, Operand(kHeapObjectTag)); + __ vldr(d0, r2, HeapNumber::kValueOffset); + __ b(&done); + __ bind(&left_smi); + __ SmiUntag(r2, r1); // Can't clobber r1 yet. + single_scratch = d3.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d0, single_scratch); - __ bind(&done); - // Compare operands. - __ VFPCompareAndSetFlags(d0, d1); + __ bind(&done); + // Compare operands. + __ VFPCompareAndSetFlags(d0, d1); - // Don't base result on status bits when a NaN is involved. - __ b(vs, &unordered); + // Don't base result on status bits when a NaN is involved. + __ b(vs, &unordered); - // Return a result of -1, 0, or 1, based on status bits. - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); - } + // Return a result of -1, 0, or 1, based on status bits. + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); __ bind(&unordered); __ bind(&generic_stub); @@ -7552,7 +7181,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { bool CodeStub::CanUseFPRegisters() { - return CpuFeatures::IsSupported(VFP2); + return true; // VFP2 is a base requirement for V8 } diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index fe1fff6..741ff9c 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -61,9 +61,7 @@ class TranscendentalCacheStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { - ASSERT(CpuFeatures::IsSafeForSnapshot(VFP2) || save_fp == kDontSaveFPRegs); - } + : save_doubles_(save_fp) {} void Generate(MacroAssembler* masm); @@ -473,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatureScope scope(masm, VFP2); masm->sub(sp, sp, Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); @@ -491,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatureScope scope(masm, VFP2); // Restore all VFP registers except d0. // TODO(hans): We should probably restore d0 too. And maybe use vldm. for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 1c82946..9d773d4 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -62,7 +62,6 @@ double fast_exp_simulator(double x) { UnaryMathFunction CreateExpFunction() { - if (!CpuFeatures::IsSupported(VFP2)) return &exp; if (!FLAG_fast_math) return &exp; size_t actual_size; byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); @@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() { MacroAssembler masm(NULL, buffer, static_cast(actual_size)); { - CpuFeatureScope use_vfp(&masm, VFP2); DwVfpRegister input = d0; DwVfpRegister result = d1; DwVfpRegister double_scratch1 = d2; @@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // -- r4 : scratch (elements) // ----------------------------------- Label loop, entry, convert_hole, gc_required, only_change_map, done; - bool vfp2_supported = CpuFeatures::IsSupported(VFP2); if (mode == TRACK_ALLOCATION_SITE) { __ TestJSArrayForAllocationSiteInfo(r2, r4); @@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // r5: kHoleNanUpper32 // r6: end of destination FixedDoubleArray, not tagged // r7: begin of FixedDoubleArray element fields, not tagged - if (!vfp2_supported) __ Push(r1, r0); __ b(&entry); @@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); // Normal smi, convert to double and store. - if (vfp2_supported) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, r9); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r7, 0); - __ add(r7, r7, Operand(8)); - } else { - FloatingPointHelper::ConvertIntToDouble(masm, - r9, - FloatingPointHelper::kCoreRegisters, - d0, - r0, - r1, - lr, - s0); - __ Strd(r0, r1, MemOperand(r7, 8, PostIndex)); - } + __ vmov(s0, r9); + __ vcvt_f64_s32(d0, s0); + __ vstr(d0, r7, 0); + __ add(r7, r7, Operand(8)); __ b(&entry); // Hole found, store the-hole NaN. @@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ cmp(r7, r6); __ b(lt, &loop); - if (!vfp2_supported) __ Pop(r1, r0); __ pop(lr); __ bind(&done); } diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index 9bcc1ac..3b50ad4 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -594,23 +594,18 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Save all allocatable VFP registers before messing with them. - ASSERT(kDoubleRegZero.code() == 14); - ASSERT(kScratchDoubleReg.code() == 15); - - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(ip); - - // Push registers d0-d13, and possibly d16-d31, on the stack. - // If d16-d31 are not pushed, decrease the stack pointer instead. - __ vstm(db_w, sp, d16, d31, ne); - __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); - __ vstm(db_w, sp, d0, d13); - } else { - __ sub(sp, sp, Operand(kDoubleRegsSize)); - } + // Save all allocatable VFP registers before messing with them. + ASSERT(kDoubleRegZero.code() == 14); + ASSERT(kScratchDoubleReg.code() == 15); + + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); + + // Push registers d0-d13, and possibly d16-d31, on the stack. + // If d16-d31 are not pushed, decrease the stack pointer instead. + __ vstm(db_w, sp, d16, d31, ne); + __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); + __ vstm(db_w, sp, d0, d13); // Push all 16 registers (needed to populate FrameDescription::registers_). // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps @@ -669,17 +664,14 @@ void Deoptimizer::EntryGenerator::Generate() { __ str(r2, MemOperand(r1, offset)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Copy VFP registers to - // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] - int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ vldr(d0, sp, src_offset); - __ vstr(d0, r1, dst_offset); - } + // Copy VFP registers to + // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] + int double_regs_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ vldr(d0, sp, src_offset); + __ vstr(d0, r1, dst_offset); } // Remove the bailout id, eventually return address, and the saved registers @@ -749,21 +741,18 @@ void Deoptimizer::EntryGenerator::Generate() { __ cmp(r4, r1); __ b(lt, &outer_push_loop); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(ip); + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); - __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); - int src_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { - if (i == kDoubleRegZero.code()) continue; - if (i == kScratchDoubleReg.code()) continue; + __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); + int src_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { + if (i == kDoubleRegZero.code()) continue; + if (i == kScratchDoubleReg.code()) continue; - const DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, r1, src_offset, i < 16 ? al : ne); - src_offset += kDoubleSize; - } + const DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, r1, src_offset, i < 16 ? al : ne); + src_offset += kDoubleSize; } // Push state, pc, and continuation from the last output frame. diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 755143d..ac11041 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -3027,37 +3027,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // Convert 32 random bits in r0 to 0.(32 random bits) in a double // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). - if (CpuFeatures::IsSupported(VFP2)) { - __ PrepareCallCFunction(1, r0); - __ ldr(r0, - ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); - __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - - CpuFeatureScope scope(masm(), VFP2); - // 0x41300000 is the top half of 1.0 x 2^20 as a double. - // Create this constant using mov/orr to avoid PC relative load. - __ mov(r1, Operand(0x41000000)); - __ orr(r1, r1, Operand(0x300000)); - // Move 0x41300000xxxxxxxx (x = random bits) to VFP. - __ vmov(d7, r0, r1); - // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand::Zero()); - __ vmov(d8, r0, r1); - // Subtract and store the result in the heap number. - __ vsub(d7, d7, d8); - __ sub(r0, r4, Operand(kHeapObjectTag)); - __ vstr(d7, r0, HeapNumber::kValueOffset); - __ mov(r0, r4); - } else { - __ PrepareCallCFunction(2, r0); - __ ldr(r1, - ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ mov(r0, Operand(r4)); - __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset)); - __ CallCFunction( - ExternalReference::fill_heap_number_with_random_function(isolate()), 2); - } + __ PrepareCallCFunction(1, r0); + __ ldr(r0, + ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); + __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); + + // 0x41300000 is the top half of 1.0 x 2^20 as a double. + // Create this constant using mov/orr to avoid PC relative load. + __ mov(r1, Operand(0x41000000)); + __ orr(r1, r1, Operand(0x300000)); + // Move 0x41300000xxxxxxxx (x = random bits) to VFP. + __ vmov(d7, r0, r1); + // Move 0x4130000000000000 to VFP. + __ mov(r0, Operand::Zero()); + __ vmov(d8, r0, r1); + // Subtract and store the result in the heap number. + __ vsub(d7, d7, d8); + __ sub(r0, r4, Operand(kHeapObjectTag)); + __ vstr(d7, r0, HeapNumber::kValueOffset); + __ mov(r0, r4); context()->Plug(r0); } @@ -3194,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { ASSERT(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - if (CpuFeatures::IsSupported(VFP2)) { - MathPowStub stub(MathPowStub::ON_STACK); - __ CallStub(&stub); - } else { - __ CallRuntime(Runtime::kMath_pow, 2); - } + MathPowStub stub(MathPowStub::ON_STACK); + __ CallStub(&stub); context()->Plug(r0); } diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc index 32daff0..31c7e8d9 100644 --- a/src/arm/lithium-arm.cc +++ b/src/arm/lithium-arm.cc @@ -2133,16 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - // float->double conversion on non-VFP2 requires an extra scratch - // register. For convenience, just mark the elements register as "UseTemp" - // so that it can be used as a temp during the float->double conversion - // after it's no longer needed after the float load. - bool needs_temp = - !CpuFeatures::IsSupported(VFP2) && - (elements_kind == EXTERNAL_FLOAT_ELEMENTS); - LOperand* external_pointer = needs_temp - ? UseTempRegister(instr->elements()) - : UseRegister(instr->elements()); + LOperand* external_pointer = UseRegister(instr->elements()); result = new(zone()) LLoadKeyed(external_pointer, key); } diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 738084a..769764e 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -195,8 +195,7 @@ bool LCodeGen::GeneratePrologue() { } } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); + if (info()->saves_caller_doubles()) { Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -1209,8 +1208,6 @@ void LCodeGen::DoModI(LModI* instr) { Label vfp_modulo, both_positive, right_negative; - CpuFeatureScope scope(masm(), VFP2); - // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { __ cmp(right, Operand::Zero()); @@ -1615,7 +1612,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, LOperand* left_argument, LOperand* right_argument, Token::Value op) { - CpuFeatureScope vfp_scope(masm(), VFP2); Register left = ToRegister(left_argument); Register right = ToRegister(right_argument); @@ -1901,7 +1897,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); - CpuFeatureScope scope(masm(), VFP2); double v = instr->value(); __ Vmov(result, v, scratch0()); } @@ -2072,7 +2067,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left_reg = ToDoubleRegister(left); DwVfpRegister right_reg = ToDoubleRegister(right); DwVfpRegister result_reg = ToDoubleRegister(instr->result()); @@ -2118,7 +2112,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left = ToDoubleRegister(instr->left()); DwVfpRegister right = ToDoubleRegister(instr->right()); DwVfpRegister result = ToDoubleRegister(instr->result()); @@ -2209,7 +2202,6 @@ void LCodeGen::DoBranch(LBranch* instr) { __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); @@ -2301,7 +2293,6 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { - CpuFeatureScope scope(masm(), VFP2); // heap number -> false iff +0, -0, or NaN. DwVfpRegister dbl_scratch = double_scratch0(); Label not_heap_number; @@ -2381,7 +2372,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatureScope scope(masm(), VFP2); // Compare left and right operands as doubles and load the // resulting flags into the normal status register. __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2936,8 +2926,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ push(r0); __ CallRuntime(Runtime::kTraceExit, 1); } - if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); + if (info()->saves_caller_doubles()) { ASSERT(NeedsEagerFrame()); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -3319,58 +3308,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ? Operand(constant_key << element_size_shift) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, kScratchDoubleReg.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); - } - } else { - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - Register value = external_pointer; - __ ldr(value, MemOperand(scratch0(), additional_offset)); - __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask)); - - __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits)); - __ and_(scratch0(), scratch0(), - Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ teq(scratch0(), Operand(0x00)); - __ b(eq, &exponent_rebiased); - - __ teq(scratch0(), Operand(0xff)); - __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq); - __ b(eq, &exponent_rebiased); - - // Rebias exponent. - __ add(scratch0(), - scratch0(), - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ and_(sfpd_hi, value, Operand(kBinary32SignMask)); - __ orr(sfpd_hi, sfpd_hi, - Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord)); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ orr(sfpd_hi, sfpd_hi, - Operand(sfpd_lo, LSR, kMantissaShiftForHiWord)); - __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord)); - - } else { - __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset)); - __ ldr(sfpd_hi, MemOperand(scratch0(), - additional_offset + kPointerSize)); - } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, kScratchDoubleReg.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); } } else { Register result = ToRegister(instr->result()); @@ -3444,23 +3386,12 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (!key_is_constant) { __ add(elements, elements, Operand(key, LSL, shift_size)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ add(elements, elements, Operand(base_offset)); - __ vldr(result, elements, 0); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } - } else { - __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); - __ ldr(sfpd_lo, MemOperand(elements, base_offset)); - if (instr->hydrogen()->RequiresHoleCheck()) { - ASSERT(kPointerSize == sizeof(kHoleNanLower32)); - __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } + __ add(elements, elements, Operand(base_offset)); + __ vldr(result, elements, 0); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); } } @@ -3902,7 +3833,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3939,7 +3869,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); Register input_high = scratch0(); @@ -3962,7 +3891,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); @@ -4002,7 +3930,6 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); @@ -4010,7 +3937,6 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister temp = ToDoubleRegister(instr->temp()); @@ -4032,7 +3958,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { - CpuFeatureScope scope(masm(), VFP2); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -4065,7 +3990,6 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { - CpuFeatureScope scope(masm(), VFP2); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -4144,7 +4068,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); @@ -4442,7 +4365,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - CpuFeatureScope scope(masm(), VFP2); Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); @@ -4513,7 +4435,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - CpuFeatureScope scope(masm(), VFP2); DwVfpRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = no_reg; @@ -4814,7 +4735,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4832,7 +4752,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4894,43 +4813,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register src. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register src, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); - if (mantissa_shift_for_hi_word > 0) { - masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word)); - masm->orr(hiword, scratch, - Operand(src, LSR, mantissa_shift_for_hi_word)); - } else { - masm->mov(loword, Operand::Zero()); - masm->orr(hiword, scratch, - Operand(src, LSL, -mantissa_shift_for_hi_word)); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); - } -} - - void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { @@ -4952,35 +4834,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(src, dst); __ eor(src, src, Operand(0x80000000)); } - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vmov(flt_scratch, src); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); - } else { - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0, - sfpd_lo, sfpd_hi, - scratch0(), s0); - } + __ vmov(flt_scratch, src); + __ vcvt_f64_s32(dbl_scratch, flt_scratch); } else { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vmov(flt_scratch, src); - __ vcvt_f64_u32(dbl_scratch, flt_scratch); - } else { - Label no_leading_zero, convert_done; - __ tst(src, Operand(0x80000000)); - __ b(ne, &no_leading_zero); - - // Integer has one leading zeros. - GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1); - __ b(&convert_done); - - __ bind(&no_leading_zero); - GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0); - __ bind(&convert_done); - } + __ vmov(flt_scratch, src); + __ vcvt_f64_u32(dbl_scratch, flt_scratch); } if (FLAG_inline_new) { @@ -4996,30 +4854,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. - if (!CpuFeatures::IsSupported(VFP2)) { - // Preserve sfpd_lo. - __ mov(r9, sfpd_lo); - } __ mov(ip, Operand::Zero()); __ StoreToSafepointRegisterSlot(ip, dst); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); __ Move(dst, r0); - if (!CpuFeatures::IsSupported(VFP2)) { - // Restore sfpd_lo. - __ mov(sfpd_lo, r9); - } __ sub(dst, dst, Operand(kHeapObjectTag)); // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); - } else { - __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); - __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); - } + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); __ add(dst, dst, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -5052,45 +4896,19 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Label no_special_nan_handling; Label done; if (convert_hole) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - DwVfpRegister input_reg = ToDoubleRegister(instr->value()); - __ VFPCompareAndSetFlags(input_reg, input_reg); - __ b(vc, &no_special_nan_handling); - __ vmov(reg, scratch0(), input_reg); - __ cmp(scratch0(), Operand(kHoleNanUpper32)); - Label canonicalize; - __ b(ne, &canonicalize); - __ Move(reg, factory()->the_hole_value()); - __ b(&done); - __ bind(&canonicalize); - __ Vmov(input_reg, - FixedDoubleArray::canonical_not_the_hole_nan_as_double(), - no_reg); - } else { - Label not_hole; - __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); - __ b(ne, ¬_hole); - __ Move(reg, factory()->the_hole_value()); - __ b(&done); - __ bind(¬_hole); - __ and_(scratch, sfpd_hi, Operand(0x7ff00000)); - __ cmp(scratch, Operand(0x7ff00000)); - __ b(ne, &no_special_nan_handling); - Label special_nan_handling; - __ tst(sfpd_hi, Operand(0x000FFFFF)); - __ b(ne, &special_nan_handling); - __ cmp(sfpd_lo, Operand(0)); - __ b(eq, &no_special_nan_handling); - __ bind(&special_nan_handling); - double canonical_nan = - FixedDoubleArray::canonical_not_the_hole_nan_as_double(); - uint64_t casted_nan = BitCast(canonical_nan); - __ mov(sfpd_lo, - Operand(static_cast(casted_nan & 0xFFFFFFFF))); - __ mov(sfpd_hi, - Operand(static_cast(casted_nan >> 32))); - } + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); + __ VFPCompareAndSetFlags(input_reg, input_reg); + __ b(vc, &no_special_nan_handling); + __ vmov(reg, scratch0(), input_reg); + __ cmp(scratch0(), Operand(kHoleNanUpper32)); + Label canonicalize; + __ b(ne, &canonicalize); + __ Move(reg, factory()->the_hole_value()); + __ b(&done); + __ bind(&canonicalize); + __ Vmov(input_reg, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + no_reg); } __ bind(&no_special_nan_handling); @@ -5104,13 +4922,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm(), VFP2); - __ vstr(input_reg, reg, HeapNumber::kValueOffset); - } else { - __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); - __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); - } + __ vstr(input_reg, reg, HeapNumber::kValueOffset); // Now that we have finished with the object's real address tag it __ add(reg, reg, Operand(kHeapObjectTag)); __ bind(&done); @@ -5160,7 +4972,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); - CpuFeatureScope scope(masm(), VFP2); Label load_smi, heap_number, done; @@ -5249,7 +5060,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { - CpuFeatureScope scope(masm(), VFP2); Register scratch3 = ToRegister(instr->temp2()); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && @@ -5270,8 +5080,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - __ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(input_reg, double_scratch2, double_scratch, + scratch1, scratch2, scratch3); } else { CpuFeatureScope scope(masm(), VFP3); @@ -5369,8 +5179,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); - __ ECMAToInt32VFP(result_reg, double_input, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(result_reg, double_input, double_scratch, + scratch1, scratch2, scratch3); } else { __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); // Deoptimize if the input wasn't a int32 (inside a double). @@ -5486,7 +5296,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatureScope vfp_scope(masm(), VFP2); DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); @@ -5495,7 +5304,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - CpuFeatureScope scope(masm(), VFP2); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -5503,7 +5311,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatureScope scope(masm(), VFP2); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc index a65ab7e..596d58f 100644 --- a/src/arm/lithium-gap-resolver-arm.cc +++ b/src/arm/lithium-gap-resolver-arm.cc @@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); @@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); - // ip is overwritten while saving the value to the destination. + // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. __ vldr(kScratchDoubleReg.low(), source_operand); @@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); DwVfpRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); @@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - CpuFeatureScope scope(cgen_->masm(), VFP2); - MemOperand source_operand = cgen_->ToMemOperand(source); + MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); } else { diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index ecedfd8..465bd10 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); if (!dst.is(src)) { vmov(dst, src); } @@ -811,7 +809,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, void MacroAssembler::Vmov(const DwVfpRegister dst, const double imm, const Register scratch) { - ASSERT(IsEnabled(VFP2)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); @@ -873,7 +870,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - CpuFeatureScope scope(this, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. CheckFor32DRegs(ip); @@ -938,7 +934,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count) { // Optionally restore all double registers. if (save_doubles) { - CpuFeatureScope scope(this, VFP2); // Calculate the stack location of the saved doubles and restore them. const int offset = 2 * kPointerSize; sub(r3, fp, @@ -975,7 +970,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(dst, d0); } else { @@ -2046,11 +2040,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, // scratch1 is now effective address of the double element FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP2)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } + destination = FloatingPointHelper::kVFPRegisters; Register untagged_value = elements_reg; SmiUntag(untagged_value, value_reg); @@ -2063,7 +2053,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, scratch4, s2); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(this, VFP2); vstr(d0, scratch1, 0); } else { str(mantissa_reg, MemOperand(scratch1, 0)); @@ -2423,9 +2412,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi, void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, DwVfpRegister double_scratch) { ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); - vcvt_s32_f64(double_scratch.low(), double_input); vcvt_f64_s32(double_scratch, double_scratch.low()); VFPCompareAndSetFlags(double_input, double_scratch); @@ -2436,9 +2422,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result, DwVfpRegister double_input, DwVfpRegister double_scratch) { ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); - vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low()); @@ -2454,8 +2437,6 @@ void MacroAssembler::TryInt32Floor(Register result, Label* exact) { ASSERT(!result.is(input_high)); ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatureScope scope(this, VFP2); Label negative, exception; // Test for NaN and infinities. @@ -2500,26 +2481,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source, Register scratch, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(this, VFP2); - vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32VFP(result, double_scratch1, double_scratch2, - scratch, input_high, input_low); - } else { - Ldrd(input_low, input_high, - FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32NoVFP(result, scratch, input_high, input_low); - } + vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); + ECMAToInt32(result, double_scratch1, double_scratch2, + scratch, input_high, input_low); } -void MacroAssembler::ECMAToInt32VFP(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Register scratch, - Register input_high, - Register input_low) { - CpuFeatureScope scope(this, VFP2); +void MacroAssembler::ECMAToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low) { ASSERT(!input_high.is(result)); ASSERT(!input_low.is(result)); ASSERT(!input_low.is(input_high)); @@ -2559,58 +2532,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result, } -void MacroAssembler::ECMAToInt32NoVFP(Register result, - Register scratch, - Register input_high, - Register input_low) { - ASSERT(!result.is(scratch)); - ASSERT(!result.is(input_high)); - ASSERT(!result.is(input_low)); - ASSERT(!scratch.is(input_high)); - ASSERT(!scratch.is(input_low)); - ASSERT(!input_high.is(input_low)); - - Label both, out_of_range, negate, done; - - Ubfx(scratch, input_high, - HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // Load scratch with exponent. - sub(scratch, scratch, Operand(HeapNumber::kExponentBias)); - // If exponent is negative, 0 < input < 1, the result is 0. - // If exponent is greater than or equal to 84, the 32 less significant - // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), - // the result is 0. - // This test also catch Nan and infinities which also return 0. - cmp(scratch, Operand(84)); - // We do an unsigned comparison so negative numbers are treated as big - // positive number and the two tests above are done in one test. - b(hs, &out_of_range); - - // Load scratch with 20 - exponent. - rsb(scratch, scratch, Operand(20), SetCC); - b(mi, &both); - - // Test 0 and -0. - bic(result, input_high, Operand(HeapNumber::kSignMask)); - orr(result, result, Operand(input_low), SetCC); - b(eq, &done); - // 0 <= exponent <= 20, shift only input_high. - // Scratch contains: 20 - exponent. - Ubfx(result, input_high, - 0, HeapNumber::kMantissaBitsInTopWord); - // Set the implicit 1 before the mantissa part in input_high. - orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - mov(result, Operand(result, LSR, scratch)); - b(&negate); - - bind(&both); - // Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP. - rsb(scratch, scratch, Operand(19)); - ECMAToInt32Tail(result, scratch, input_high, input_low, - &out_of_range, &negate, &done); -} - - void MacroAssembler::ECMAToInt32Tail(Register result, Register scratch, Register input_high, @@ -2713,10 +2634,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) - ? kSaveFPRegs - : kDontSaveFPRegs; - CEntryStub stub(1, mode); + CEntryStub stub(1, kSaveFPRegs); CallStub(&stub); } @@ -3461,7 +3379,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); } else { @@ -3472,7 +3389,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { if (dreg2.is(d0)) { ASSERT(!dreg1.is(d1)); @@ -3491,7 +3407,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, Register reg) { - ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); Move(r0, reg); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 958fcac..e72b676 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -969,20 +969,12 @@ class MacroAssembler: public Assembler { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. // Exits with 'result' holding the answer and all other registers clobbered. - void ECMAToInt32VFP(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Register scratch, - Register input_high, - Register input_low); - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void ECMAToInt32NoVFP(Register result, - Register scratch, - Register input_high, - Register input_low); + void ECMAToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low); // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 676fa0f..d03eb36 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -975,66 +975,11 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register dst, Register wordoffset, Register ival, - Register fval, - Register scratch1, - Register scratch2) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - __ vmov(s0, ival); - __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); - __ vcvt_f32_s32(s0, s0); - __ vstr(s0, scratch1, 0); - } else { - Label not_special, done; - // Move sign bit from source to destination. This works because the sign - // bit in the exponent word of the double has the same position and polarity - // as the 2's complement sign bit in a Smi. - ASSERT(kBinary32SignMask == 0x80000000u); - - __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); - // Negate value if it is negative. - __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne); - - // We have -1, 0 or 1, which we treat specially. Register ival contains - // absolute value: it is either equal to 1 (special case of -1 and 1), - // greater than 1 (not a special case) or less than 1 (special case of 0). - __ cmp(ival, Operand(1)); - __ b(gt, ¬_special); - - // For 1 or -1 we need to or in the 0 exponent (biased). - static const uint32_t exponent_word_for_1 = - kBinary32ExponentBias << kBinary32ExponentShift; - - __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); - __ b(&done); - - __ bind(¬_special); - // Count leading zeros. - // Gets the wrong answer for 0, but we already checked for that case above. - Register zeros = scratch2; - __ CountLeadingZeros(zeros, ival, scratch1); - - // Compute exponent and or it into the exponent register. - __ rsb(scratch1, - zeros, - Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); - - __ orr(fval, - fval, - Operand(scratch1, LSL, kBinary32ExponentShift)); - - // Shift up the source chopping the top bit off. - __ add(zeros, zeros, Operand(1)); - // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. - __ mov(ival, Operand(ival, LSL, zeros)); - // And the top (top 20 bits). - __ orr(fval, - fval, - Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); - - __ bind(&done); - __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); - } + Register scratch1) { + __ vmov(s0, ival); + __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); + __ vcvt_f32_s32(s0, s0); + __ vstr(s0, scratch1, 0); } @@ -2082,11 +2027,6 @@ Handle CallStubCompiler::CompileMathFloorCall( // -- sp[argc * 4] : receiver // ----------------------------------- - if (!CpuFeatures::IsSupported(VFP2)) { - return Handle::null(); - } - - CpuFeatureScope scope_vfp2(masm(), VFP2); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -3126,36 +3066,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( } -static bool IsElementTypeSigned(ElementsKind elements_kind) { - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - return true; - - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - return false; - - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - return false; - } - return false; -} - - static void GenerateSmiKeyCheck(MacroAssembler* masm, Register key, Register scratch0, @@ -3163,29 +3073,23 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label* fail) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - Label key_ok; - // Check for smi or a smi inside a heap number. We convert the heap - // number and check if the conversion is exact and fits into the smi - // range. - __ JumpIfSmi(key, &key_ok); - __ CheckMap(key, - scratch0, - Heap::kHeapNumberMapRootIndex, - fail, - DONT_DO_SMI_CHECK); - __ sub(ip, key, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); - __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); - __ b(ne, fail); - __ TrySmiTag(scratch0, fail, scratch1); - __ mov(key, scratch0); - __ bind(&key_ok); - } else { - // Check that the key is a smi. - __ JumpIfNotSmi(key, fail); - } + Label key_ok; + // Check for smi or a smi inside a heap number. We convert the heap + // number and check if the conversion is exact and fits into the smi + // range. + __ JumpIfSmi(key, &key_ok); + __ CheckMap(key, + scratch0, + Heap::kHeapNumberMapRootIndex, + fail, + DONT_DO_SMI_CHECK); + __ sub(ip, key, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); + __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); + __ b(ne, fail); + __ TrySmiTag(scratch0, fail, scratch1); + __ mov(key, scratch0); + __ bind(&key_ok); } @@ -3255,28 +3159,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: // Perform int-to-float conversion and store to memory. __ SmiUntag(r4, key); - StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); + StoreIntAsFloat(masm, r3, r4, r5, r7); break; case EXTERNAL_DOUBLE_ELEMENTS: __ add(r3, r3, Operand(key, LSL, 2)); // r3: effective address of the double element FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP2)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } + destination = FloatingPointHelper::kVFPRegisters; FloatingPointHelper::ConvertIntToDouble( masm, r5, destination, d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. r4, s2); // These are: scratch2, single_scratch. - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatureScope scope(masm, VFP2); - __ vstr(d0, r3, 0); - } else { - __ str(r6, MemOperand(r3, 0)); - __ str(r7, MemOperand(r3, Register::kSizeInBytes)); - } + __ vstr(d0, r3, 0); break; case FAST_ELEMENTS: case FAST_SMI_ELEMENTS: @@ -3306,201 +3200,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // The WebGL specification leaves the behavior of storing NaN and // +/-Infinity into integer arrays basically undefined. For more // reproducible behavior, convert these to zero. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(masm, VFP2); - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - // vldr requires offset to be a multiple of 4 so we can not - // include -kHeapObjectTag into it. - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(key, LSL, 1)); - __ vcvt_f32_f64(s0, d0); - __ vstr(s0, r5, 0); - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(key, LSL, 2)); - __ vstr(d0, r5, 0); - } else { - // Hoisted load. vldr requires offset to be a multiple of 4 so we can - // not include -kHeapObjectTag into it. - __ sub(r5, value, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9); - - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + // vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(key, LSL, 1)); + __ vcvt_f32_f64(s0, d0); + __ vstr(s0, r5, 0); + } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(key, LSL, 2)); + __ vstr(d0, r5, 0); } else { - // VFP3 is not available do manual conversions. - __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); - __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - Label done, nan_or_infinity_or_zero; - static const int kMantissaInHiWordShift = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaInLoWordShift = - kBitsPerInt - kMantissaInHiWordShift; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ b(eq, &nan_or_infinity_or_zero); - - __ teq(r9, Operand(r7)); - __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); - __ b(eq, &nan_or_infinity_or_zero); - - // Rebias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ add(r9, - r9, - Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); - - __ cmp(r9, Operand(kBinary32MaxExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); - __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); - __ b(gt, &done); - - __ cmp(r9, Operand(kBinary32MinExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); - __ b(lt, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); - __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); - - __ bind(&done); - __ str(r5, MemOperand(r3, key, LSL, 1)); - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); - - __ bind(&nan_or_infinity_or_zero); - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r9, r9, r7); - __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); - __ b(&done); - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ add(r7, r3, Operand(key, LSL, 2)); - // r7: effective address of destination element. - __ str(r6, MemOperand(r7, 0)); - __ str(r5, MemOperand(r7, Register::kSizeInBytes)); - __ Ret(); - } else { - bool is_signed_type = IsElementTypeSigned(elements_kind); - int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; - int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; - - Label done, sign; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand::Zero(), LeaveCC, eq); - __ b(eq, &done); - - __ teq(r9, Operand(r7)); - __ mov(r5, Operand::Zero(), LeaveCC, eq); - __ b(eq, &done); - - // Unbias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); - // If exponent is negative then result is 0. - __ mov(r5, Operand::Zero(), LeaveCC, mi); - __ b(mi, &done); - - // If exponent is too big then result is minimal value. - __ cmp(r9, Operand(meaningfull_bits - 1)); - __ mov(r5, Operand(min_value), LeaveCC, ge); - __ b(ge, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); - - __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); - __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); - __ b(pl, &sign); - - __ rsb(r9, r9, Operand::Zero()); - __ mov(r5, Operand(r5, LSL, r9)); - __ rsb(r9, r9, Operand(meaningfull_bits)); - __ orr(r5, r5, Operand(r6, LSR, r9)); - - __ bind(&sign); - __ teq(r7, Operand::Zero()); - __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne); - - __ bind(&done); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } + // Hoisted load. vldr requires offset to be a multiple of 4 so we can + // not include -kHeapObjectTag into it. + __ sub(r5, value, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ ECMAToInt32(r5, d0, d1, r6, r7, r9); + + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ strb(r5, MemOperand(r3, key, LSR, 1)); + break; + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ strh(r5, MemOperand(r3, key, LSL, 0)); + break; + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ str(r5, MemOperand(r3, key, LSL, 1)); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; } } + + // Entry registers are intact, r0 holds the value which is the return + // value. + __ Ret(); } // Slow case, key and receiver still in r0 and r1. diff --git a/src/assembler.cc b/src/assembler.cc index deef28b..458930b 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) uint64_t mask = static_cast(1) << f; // TODO(svenpanne) This special case below doesn't belong here! #if V8_TARGET_ARCH_ARM - // VFP2 and ARMv7 are implied by VFP3. + // ARMv7 is implied by VFP3. if (f == VFP3) { - mask |= - static_cast(1) << VFP2 | - static_cast(1) << ARMv7; + mask |= static_cast(1) << ARMv7; } #endif assembler_->set_enabled_cpu_features(old_enabled_ | mask); diff --git a/src/code-stubs.h b/src/code-stubs.h index 0e989ec..748f476 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -770,7 +770,7 @@ class BinaryOpStub: public PlatformCodeStub { private: Token::Value op_; OverwriteMode mode_; - bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM. + bool platform_specific_bit_; // Indicates SSE3 on IA32. // Operand type information determined at runtime. BinaryOpIC::TypeInfo left_type_; diff --git a/src/flag-definitions.h b/src/flag-definitions.h index ea72168..746d8e1 100644 --- a/src/flag-definitions.h +++ b/src/flag-definitions.h @@ -309,10 +309,7 @@ DEFINE_bool(enable_rdtsc, true, DEFINE_bool(enable_sahf, true, "enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true, - "enable use of VFP3 instructions if available - this implies " - "enabling ARMv7 and VFP2 instructions (ARM only)") -DEFINE_bool(enable_vfp2, true, - "enable use of VFP2 instructions if available") + "enable use of VFP3 instructions if available") DEFINE_bool(enable_armv7, true, "enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_sudiv, true, diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc index 7a2975a..afe658b 100644 --- a/src/mips/lithium-mips.cc +++ b/src/mips/lithium-mips.cc @@ -2008,7 +2008,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - // float->double conversion on non-VFP2 requires an extra scratch + // float->double conversion on soft float requires an extra scratch // register. For convenience, just mark the elements register as "UseTemp" // so that it can be used as a temp during the float->double conversion // after it's no longer needed after the float load. diff --git a/src/platform-linux.cc b/src/platform-linux.cc index 1f9cde1..2a2284a 100644 --- a/src/platform-linux.cc +++ b/src/platform-linux.cc @@ -146,9 +146,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) { // facility is universally available on the ARM architectures, // so it's up to individual OSes to provide such. switch (feature) { - case VFP2: - search_string = "vfp"; - break; case VFP3: search_string = "vfpv3"; break; diff --git a/src/v8globals.h b/src/v8globals.h index 588d592..02010b9 100644 --- a/src/v8globals.h +++ b/src/v8globals.h @@ -433,11 +433,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86 CPUID = 10, // x86 VFP3 = 1, // ARM ARMv7 = 2, // ARM - VFP2 = 3, // ARM - SUDIV = 4, // ARM - UNALIGNED_ACCESSES = 5, // ARM - MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM - VFP32DREGS = 7, // ARM + SUDIV = 3, // ARM + UNALIGNED_ACCESSES = 4, // ARM + MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM + VFP32DREGS = 6, // ARM SAHF = 0, // x86 FPU = 1}; // MIPS diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc index 5cb4ab3..1a1bbf0 100644 --- a/test/cctest/test-assembler-arm.cc +++ b/test/cctest/test-assembler-arm.cc @@ -654,81 +654,77 @@ TEST(8) { // single precision values around in memory. Assembler assm(isolate, NULL, 0); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(&assm, VFP2); - - __ mov(ip, Operand(sp)); - __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); - __ sub(fp, ip, Operand(4)); + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); - __ add(r4, r0, Operand(OFFSET_OF(D, a))); - __ vldm(ia_w, r4, d0, d3); - __ vldm(ia_w, r4, d4, d7); + __ add(r4, r0, Operand(OFFSET_OF(D, a))); + __ vldm(ia_w, r4, d0, d3); + __ vldm(ia_w, r4, d4, d7); - __ add(r4, r0, Operand(OFFSET_OF(D, a))); - __ vstm(ia_w, r4, d6, d7); - __ vstm(ia_w, r4, d0, d5); + __ add(r4, r0, Operand(OFFSET_OF(D, a))); + __ vstm(ia_w, r4, d6, d7); + __ vstm(ia_w, r4, d0, d5); - __ add(r4, r1, Operand(OFFSET_OF(F, a))); - __ vldm(ia_w, r4, s0, s3); - __ vldm(ia_w, r4, s4, s7); + __ add(r4, r1, Operand(OFFSET_OF(F, a))); + __ vldm(ia_w, r4, s0, s3); + __ vldm(ia_w, r4, s4, s7); - __ add(r4, r1, Operand(OFFSET_OF(F, a))); - __ vstm(ia_w, r4, s6, s7); - __ vstm(ia_w, r4, s0, s5); + __ add(r4, r1, Operand(OFFSET_OF(F, a))); + __ vstm(ia_w, r4, s6, s7); + __ vstm(ia_w, r4, s0, s5); - __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); - CodeDesc desc; - assm.GetCode(&desc); - Object* code = isolate->heap()->CreateCode( - desc, - Code::ComputeFlags(Code::STUB), - Handle())->ToObjectChecked(); - CHECK(code->IsCode()); + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); #ifdef DEBUG - Code::cast(code)->Print(); + Code::cast(code)->Print(); #endif - F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); - d.a = 1.1; - d.b = 2.2; - d.c = 3.3; - d.d = 4.4; - d.e = 5.5; - d.f = 6.6; - d.g = 7.7; - d.h = 8.8; - - f.a = 1.0; - f.b = 2.0; - f.c = 3.0; - f.d = 4.0; - f.e = 5.0; - f.f = 6.0; - f.g = 7.0; - f.h = 8.0; - - Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); - USE(dummy); + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); - CHECK_EQ(7.7, d.a); - CHECK_EQ(8.8, d.b); - CHECK_EQ(1.1, d.c); - CHECK_EQ(2.2, d.d); - CHECK_EQ(3.3, d.e); - CHECK_EQ(4.4, d.f); - CHECK_EQ(5.5, d.g); - CHECK_EQ(6.6, d.h); - - CHECK_EQ(7.0, f.a); - CHECK_EQ(8.0, f.b); - CHECK_EQ(1.0, f.c); - CHECK_EQ(2.0, f.d); - CHECK_EQ(3.0, f.e); - CHECK_EQ(4.0, f.f); - CHECK_EQ(5.0, f.g); - CHECK_EQ(6.0, f.h); - } + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); } @@ -766,85 +762,81 @@ TEST(9) { // single precision values around in memory. Assembler assm(isolate, NULL, 0); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(&assm, VFP2); - - __ mov(ip, Operand(sp)); - __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); - __ sub(fp, ip, Operand(4)); + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); - __ add(r4, r0, Operand(OFFSET_OF(D, a))); - __ vldm(ia, r4, d0, d3); - __ add(r4, r4, Operand(4 * 8)); - __ vldm(ia, r4, d4, d7); + __ add(r4, r0, Operand(OFFSET_OF(D, a))); + __ vldm(ia, r4, d0, d3); + __ add(r4, r4, Operand(4 * 8)); + __ vldm(ia, r4, d4, d7); - __ add(r4, r0, Operand(OFFSET_OF(D, a))); - __ vstm(ia, r4, d6, d7); - __ add(r4, r4, Operand(2 * 8)); - __ vstm(ia, r4, d0, d5); + __ add(r4, r0, Operand(OFFSET_OF(D, a))); + __ vstm(ia, r4, d6, d7); + __ add(r4, r4, Operand(2 * 8)); + __ vstm(ia, r4, d0, d5); - __ add(r4, r1, Operand(OFFSET_OF(F, a))); - __ vldm(ia, r4, s0, s3); - __ add(r4, r4, Operand(4 * 4)); - __ vldm(ia, r4, s4, s7); + __ add(r4, r1, Operand(OFFSET_OF(F, a))); + __ vldm(ia, r4, s0, s3); + __ add(r4, r4, Operand(4 * 4)); + __ vldm(ia, r4, s4, s7); - __ add(r4, r1, Operand(OFFSET_OF(F, a))); - __ vstm(ia, r4, s6, s7); - __ add(r4, r4, Operand(2 * 4)); - __ vstm(ia, r4, s0, s5); + __ add(r4, r1, Operand(OFFSET_OF(F, a))); + __ vstm(ia, r4, s6, s7); + __ add(r4, r4, Operand(2 * 4)); + __ vstm(ia, r4, s0, s5); - __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); - CodeDesc desc; - assm.GetCode(&desc); - Object* code = isolate->heap()->CreateCode( - desc, - Code::ComputeFlags(Code::STUB), - Handle())->ToObjectChecked(); - CHECK(code->IsCode()); + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); #ifdef DEBUG - Code::cast(code)->Print(); + Code::cast(code)->Print(); #endif - F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); - d.a = 1.1; - d.b = 2.2; - d.c = 3.3; - d.d = 4.4; - d.e = 5.5; - d.f = 6.6; - d.g = 7.7; - d.h = 8.8; - - f.a = 1.0; - f.b = 2.0; - f.c = 3.0; - f.d = 4.0; - f.e = 5.0; - f.f = 6.0; - f.g = 7.0; - f.h = 8.0; - - Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); - USE(dummy); + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); - CHECK_EQ(7.7, d.a); - CHECK_EQ(8.8, d.b); - CHECK_EQ(1.1, d.c); - CHECK_EQ(2.2, d.d); - CHECK_EQ(3.3, d.e); - CHECK_EQ(4.4, d.f); - CHECK_EQ(5.5, d.g); - CHECK_EQ(6.6, d.h); - - CHECK_EQ(7.0, f.a); - CHECK_EQ(8.0, f.b); - CHECK_EQ(1.0, f.c); - CHECK_EQ(2.0, f.d); - CHECK_EQ(3.0, f.e); - CHECK_EQ(4.0, f.f); - CHECK_EQ(5.0, f.g); - CHECK_EQ(6.0, f.h); - } + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); } @@ -882,81 +874,77 @@ TEST(10) { // single precision values around in memory. Assembler assm(isolate, NULL, 0); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatureScope scope(&assm, VFP2); - - __ mov(ip, Operand(sp)); - __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); - __ sub(fp, ip, Operand(4)); + __ mov(ip, Operand(sp)); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); + __ sub(fp, ip, Operand(4)); - __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8)); - __ vldm(db_w, r4, d4, d7); - __ vldm(db_w, r4, d0, d3); + __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8)); + __ vldm(db_w, r4, d4, d7); + __ vldm(db_w, r4, d0, d3); - __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8)); - __ vstm(db_w, r4, d0, d5); - __ vstm(db_w, r4, d6, d7); + __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8)); + __ vstm(db_w, r4, d0, d5); + __ vstm(db_w, r4, d6, d7); - __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4)); - __ vldm(db_w, r4, s4, s7); - __ vldm(db_w, r4, s0, s3); + __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4)); + __ vldm(db_w, r4, s4, s7); + __ vldm(db_w, r4, s0, s3); - __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4)); - __ vstm(db_w, r4, s0, s5); - __ vstm(db_w, r4, s6, s7); + __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4)); + __ vstm(db_w, r4, s0, s5); + __ vstm(db_w, r4, s6, s7); - __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); - CodeDesc desc; - assm.GetCode(&desc); - Object* code = isolate->heap()->CreateCode( - desc, - Code::ComputeFlags(Code::STUB), - Handle())->ToObjectChecked(); - CHECK(code->IsCode()); + CodeDesc desc; + assm.GetCode(&desc); + Object* code = isolate->heap()->CreateCode( + desc, + Code::ComputeFlags(Code::STUB), + Handle())->ToObjectChecked(); + CHECK(code->IsCode()); #ifdef DEBUG - Code::cast(code)->Print(); + Code::cast(code)->Print(); #endif - F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); - d.a = 1.1; - d.b = 2.2; - d.c = 3.3; - d.d = 4.4; - d.e = 5.5; - d.f = 6.6; - d.g = 7.7; - d.h = 8.8; - - f.a = 1.0; - f.b = 2.0; - f.c = 3.0; - f.d = 4.0; - f.e = 5.0; - f.f = 6.0; - f.g = 7.0; - f.h = 8.0; - - Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); - USE(dummy); + F4 fn = FUNCTION_CAST(Code::cast(code)->entry()); + d.a = 1.1; + d.b = 2.2; + d.c = 3.3; + d.d = 4.4; + d.e = 5.5; + d.f = 6.6; + d.g = 7.7; + d.h = 8.8; + + f.a = 1.0; + f.b = 2.0; + f.c = 3.0; + f.d = 4.0; + f.e = 5.0; + f.f = 6.0; + f.g = 7.0; + f.h = 8.0; + + Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0); + USE(dummy); - CHECK_EQ(7.7, d.a); - CHECK_EQ(8.8, d.b); - CHECK_EQ(1.1, d.c); - CHECK_EQ(2.2, d.d); - CHECK_EQ(3.3, d.e); - CHECK_EQ(4.4, d.f); - CHECK_EQ(5.5, d.g); - CHECK_EQ(6.6, d.h); - - CHECK_EQ(7.0, f.a); - CHECK_EQ(8.0, f.b); - CHECK_EQ(1.0, f.c); - CHECK_EQ(2.0, f.d); - CHECK_EQ(3.0, f.e); - CHECK_EQ(4.0, f.f); - CHECK_EQ(5.0, f.g); - CHECK_EQ(6.0, f.h); - } + CHECK_EQ(7.7, d.a); + CHECK_EQ(8.8, d.b); + CHECK_EQ(1.1, d.c); + CHECK_EQ(2.2, d.d); + CHECK_EQ(3.3, d.e); + CHECK_EQ(4.4, d.f); + CHECK_EQ(5.5, d.g); + CHECK_EQ(6.6, d.h); + + CHECK_EQ(7.0, f.a); + CHECK_EQ(8.0, f.b); + CHECK_EQ(1.0, f.c); + CHECK_EQ(2.0, f.d); + CHECK_EQ(3.0, f.e); + CHECK_EQ(4.0, f.f); + CHECK_EQ(5.0, f.g); + CHECK_EQ(6.0, f.h); } -- 2.7.4