From e4edbb05353861e438f8cf02ee6963c31f65434f Mon Sep 17 00:00:00 2001 From: "svenpanne@chromium.org" Date: Tue, 5 Mar 2013 10:48:16 +0000 Subject: [PATCH] Cleaned up CpuFeature scope handling. First of all, it has nothing to do with Isolates, it is related to the assembler at hand. Furthermore, the saving/restoring is platform-independent. Cleaned up some platform-specific stuff on the way. Note that there are some things which still need some cleanup, like e.g. using EnumSet instead of uint64_t, making Probe() more uniform across platforms etc., but the CL is already big enough. BUG=v8:2487 Review URL: https://codereview.chromium.org/12391055 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13823 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/assembler-arm.cc | 110 +++++++++++++++++--------------- src/arm/assembler-arm.h | 55 ++-------------- src/arm/code-stubs-arm.cc | 76 +++++++++++----------- src/arm/code-stubs-arm.h | 4 +- src/arm/codegen-arm.cc | 4 +- src/arm/deoptimizer-arm.cc | 6 +- src/arm/full-codegen-arm.cc | 2 +- src/arm/lithium-codegen-arm.cc | 76 +++++++++++----------- src/arm/lithium-gap-resolver-arm.cc | 14 ++--- src/arm/macro-assembler-arm.cc | 20 +++--- src/arm/stub-cache-arm.cc | 10 +-- src/assembler.cc | 31 +++++++++ src/assembler.h | 25 ++++++++ src/ia32/assembler-ia32.cc | 115 +++++++++++++++++----------------- src/ia32/assembler-ia32.h | 57 +++-------------- src/ia32/code-stubs-ia32.cc | 52 +++++++-------- src/ia32/code-stubs-ia32.h | 4 +- src/ia32/codegen-ia32.cc | 14 ++--- src/ia32/deoptimizer-ia32.cc | 6 +- src/ia32/full-codegen-ia32.cc | 2 +- src/ia32/lithium-codegen-ia32.cc | 82 ++++++++++++------------ src/ia32/lithium-gap-resolver-ia32.cc | 10 +-- src/ia32/macro-assembler-ia32.cc | 10 +-- src/ia32/stub-cache-ia32.cc | 10 +-- src/isolate.h | 1 - src/mips/assembler-mips.cc | 30 ++++----- src/mips/assembler-mips.h | 54 ++-------------- src/mips/code-stubs-mips.cc | 78 +++++++++++------------ src/mips/code-stubs-mips.h | 4 +- src/mips/codegen-mips.cc | 4 +- src/mips/deoptimizer-mips.cc | 6 +- src/mips/full-codegen-mips.cc | 2 +- src/mips/lithium-codegen-mips.cc | 66 +++++++++---------- src/mips/lithium-gap-resolver-mips.cc | 14 ++--- src/mips/macro-assembler-mips.cc | 30 ++++----- src/mips/stub-cache-mips.cc | 10 +-- src/x64/assembler-x64.cc | 25 ++++---- src/x64/assembler-x64.h | 59 +++-------------- src/x64/lithium-codegen-x64.cc | 2 +- src/x64/macro-assembler-x64.cc | 4 +- test/cctest/test-assembler-arm.cc | 16 ++--- test/cctest/test-assembler-ia32.cc | 10 ++- test/cctest/test-disasm-arm.cc | 4 +- test/cctest/test-disasm-ia32.cc | 12 ++-- 44 files changed, 550 insertions(+), 676 deletions(-) diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index a8c32d9..1574d51 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -48,7 +48,7 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; ExternalReference ExternalReference::cpu_features() { @@ -110,7 +110,7 @@ const char* DwVfpRegister::AllocationIndexToString(int index) { void CpuFeatures::Probe() { - unsigned standard_features = static_cast( + uint64_t standard_features = static_cast( OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); ASSERT(supported_ == 0 || supported_ == standard_features); #ifdef DEBUG @@ -131,23 +131,26 @@ void CpuFeatures::Probe() { // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. if (FLAG_enable_vfp3) { - supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; + supported_ |= + static_cast(1) << VFP3 | + static_cast(1) << ARMv7 | + static_cast(1) << VFP2; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { - supported_ |= 1u << ARMv7; + supported_ |= static_cast(1) << ARMv7; } if (FLAG_enable_sudiv) { - supported_ |= 1u << SUDIV; + supported_ |= static_cast(1) << SUDIV; } if (FLAG_enable_movw_movt) { - supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + supported_ |= static_cast(1) << MOVW_MOVT_IMMEDIATE_LOADS; } if (FLAG_enable_32dregs) { - supported_ |= 1u << VFP32DREGS; + supported_ |= static_cast(1) << VFP32DREGS; } #else // __arm__ @@ -156,33 +159,38 @@ void CpuFeatures::Probe() { // This implementation also sets the VFP flags if runtime // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI // 0406B, page A1-6. - found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; + found_by_runtime_probing_only_ |= + static_cast(1) << VFP3 | + static_cast(1) << ARMv7 | + static_cast(1) << VFP2; } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { - found_by_runtime_probing_ |= 1u << VFP2; + found_by_runtime_probing_only_ |= static_cast(1) << VFP2; } if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << ARMv7; + found_by_runtime_probing_only_ |= static_cast(1) << ARMv7; } if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { - found_by_runtime_probing_ |= 1u << SUDIV; + found_by_runtime_probing_only_ |= static_cast(1) << SUDIV; } if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; + found_by_runtime_probing_only_ |= + static_cast(1) << UNALIGNED_ACCESSES; } if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + found_by_runtime_probing_only_ |= + static_cast(1) << MOVW_MOVT_IMMEDIATE_LOADS; } if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) { - found_by_runtime_probing_ |= 1u << VFP32DREGS; + found_by_runtime_probing_only_ |= static_cast(1) << VFP32DREGS; } - supported_ |= found_by_runtime_probing_; + supported_ |= found_by_runtime_probing_only_; #endif // Assert that VFP3 implies VFP2 and ARMv7. @@ -1541,7 +1549,7 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { - ASSERT(CpuFeatures::IsEnabled(ARMv7)); + ASSERT(IsEnabled(ARMv7)); ASSERT(src.rm().is(no_reg)); ASSERT(!dst1.is(lr)); // r14. ASSERT_EQ(0, dst1.code() % 2); @@ -1556,7 +1564,7 @@ void Assembler::strd(Register src1, Register src2, ASSERT(!src1.is(lr)); // r14. ASSERT_EQ(0, src1.code() % 2); ASSERT_EQ(src1.code() + 1, src2.code()); - ASSERT(CpuFeatures::IsEnabled(ARMv7)); + ASSERT(IsEnabled(ARMv7)); addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); } @@ -1755,7 +1763,7 @@ void Assembler::vldr(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-924. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | offset - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1799,7 +1807,7 @@ void Assembler::vldr(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-628. // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | offset - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1843,7 +1851,7 @@ void Assembler::vstr(const DwVfpRegister src, // Instruction details available in ARM DDI 0406C.b, A8-1082. // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | // Vd(15-12) | 1011(11-8) | (offset/4) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1887,7 +1895,7 @@ void Assembler::vstr(const SwVfpRegister src, // Instruction details available in ARM DDI 0406A, A8-786. // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | (offset/4) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1930,7 +1938,7 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-922. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1952,7 +1960,7 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406C.b, A8-1080. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1973,7 +1981,7 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-626. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1010(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1994,7 +2002,7 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-784. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2068,7 +2076,7 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { void Assembler::vmov(const DwVfpRegister dst, double imm, const Register scratch) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); uint32_t enc; if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { @@ -2140,7 +2148,7 @@ void Assembler::vmov(const SwVfpRegister dst, const Condition cond) { // Sd = Sm // Instruction details available in ARM DDI 0406B, A8-642. - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int sd, d, sm, m; dst.split_code(&sd, &d); src.split_code(&sm, &m); @@ -2155,7 +2163,7 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-938. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2173,7 +2181,7 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-940. // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(index.index == 0 || index.index == 1); int vd, d; dst.split_code(&vd, &d); @@ -2190,7 +2198,7 @@ void Assembler::vmov(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!src1.is(pc) && !src2.is(pc)); int vm, m; dst.split_code(&vm, &m); @@ -2207,7 +2215,7 @@ void Assembler::vmov(const Register dst1, // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!dst1.is(pc) && !dst2.is(pc)); int vm, m; src.split_code(&vm, &m); @@ -2223,7 +2231,7 @@ void Assembler::vmov(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!src.is(pc)); int sn, n; dst.split_code(&sn, &n); @@ -2238,7 +2246,7 @@ void Assembler::vmov(const Register dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!dst.is(pc)); int sn, n; src.split_code(&sn, &n); @@ -2363,7 +2371,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); } @@ -2372,7 +2380,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); } @@ -2381,7 +2389,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); } @@ -2390,7 +2398,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); } @@ -2399,7 +2407,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); } @@ -2408,7 +2416,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); } @@ -2417,7 +2425,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); } @@ -2428,7 +2436,7 @@ void Assembler::vneg(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-968. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2445,7 +2453,7 @@ void Assembler::vabs(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-524. // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; @@ -2464,7 +2472,7 @@ void Assembler::vadd(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-830. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2485,7 +2493,7 @@ void Assembler::vsub(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1086. // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2506,7 +2514,7 @@ void Assembler::vmul(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-960. // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2563,7 +2571,7 @@ void Assembler::vdiv(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-882. // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vn, n; @@ -2582,7 +2590,7 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; src1.split_code(&vd, &d); int vm, m; @@ -2599,7 +2607,7 @@ void Assembler::vcmp(const DwVfpRegister src1, // Instruction details available in ARM DDI 0406C.b, A8-864. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(src2 == 0.0); int vd, d; src1.split_code(&vd, &d); @@ -2611,7 +2619,7 @@ void Assembler::vmsr(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xE*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2621,7 +2629,7 @@ void Assembler::vmrs(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xF*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2633,7 +2641,7 @@ void Assembler::vsqrt(const DwVfpRegister dst, // Instruction details available in ARM DDI 0406C.b, A8-1058. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) | // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int vd, d; dst.split_code(&vd, &d); int vm, m; diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 12cee54..6b00f32 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -48,7 +48,7 @@ namespace v8 { namespace internal { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer @@ -68,56 +68,11 @@ class CpuFeatures : public AllStatic { return (supported_ & (1u << f)) != 0; } -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - unsigned enabled = static_cast(isolate->enabled_cpu_features()); - return (enabled & (1u << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - unsigned mask = 1u << f; - // VFP2 and ARMv7 are implied by VFP3. - if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = static_cast(isolate_->enabled_cpu_features()); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - unsigned old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; class TryForceFeatureScope BASE_EMBEDDED { public: @@ -150,7 +105,7 @@ class CpuFeatures : public AllStatic { static bool initialized_; #endif static unsigned supported_; - static unsigned found_by_runtime_probing_; + static unsigned found_by_runtime_probing_only_; friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index d25406a..cf8fd6a 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -651,7 +651,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); __ vmov(d7.high(), scratch1); __ vcvt_f64_s32(d7, d7.high()); @@ -702,7 +702,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a heap number. if (CpuFeatures::IsSupported(VFP2) && destination == kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double from tagged HeapNumber to double register. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(dst, scratch1, HeapNumber::kValueOffset); @@ -716,7 +716,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Convert smi to double using VFP instructions. __ vmov(dst.high(), scratch1); __ vcvt_f64_s32(dst, dst.high()); @@ -792,7 +792,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Label done; if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(single_scratch, int_scratch); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { @@ -886,7 +886,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the number. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double value. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); @@ -983,7 +983,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Object is a heap number. // Convert the floating point value to a 32-bit integer. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double value. __ sub(scratch1, object, Operand(kHeapObjectTag)); @@ -1118,7 +1118,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(lr); __ PrepareCallCFunction(0, 2, scratch); if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -1130,7 +1130,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1345,7 +1345,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Lhs is a smi, rhs is a number. if (CpuFeatures::IsSupported(VFP2)) { // Convert lhs to a double in d7. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); // Load the double from rhs, tagged HeapNumber r0, to d6. __ sub(r7, rhs, Operand(kHeapObjectTag)); @@ -1384,7 +1384,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a heap number. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double from lhs, tagged HeapNumber r1, to d7. __ sub(r7, lhs, Operand(kHeapObjectTag)); __ vldr(d7, r7, HeapNumber::kValueOffset); @@ -1496,7 +1496,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ push(lr); __ PrepareCallCFunction(0, 2, r5); if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -1573,7 +1573,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ sub(r7, rhs, Operand(kHeapObjectTag)); __ vldr(d6, r7, HeapNumber::kValueOffset); __ sub(r7, lhs, Operand(kHeapObjectTag)); @@ -1663,7 +1663,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, @@ -1814,7 +1814,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); if (CpuFeatures::IsSupported(VFP2)) { __ bind(&lhs_not_nan); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label no_nan; // ARMv7 VFP3 instructions to implement double precision comparison. __ VFPCompareAndSetFlags(d7, d6); @@ -1994,7 +1994,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ b(ne, ¬_heap_number); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); __ VFPCompareAndSetFlags(d1, 0.0); @@ -2094,7 +2094,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const Register scratch = r1; if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -2114,7 +2114,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(scratch); @@ -2350,7 +2350,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( if (CpuFeatures::IsSupported(VFP2)) { // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r1); __ vcvt_f64_s32(d0, s0); __ sub(r2, r0, Operand(kHeapObjectTag)); @@ -2745,7 +2745,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Using VFP registers: // d6: Left value // d7: Right value - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); switch (op) { case Token::ADD: __ vadd(d5, d6, d7); @@ -2877,7 +2877,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, if (CpuFeatures::IsSupported(VFP2)) { // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As // mentioned above SHR needs to always produce a positive result. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r2); if (op == Token::SHR) { __ vcvt_f64_u32(d0, s0); @@ -3069,7 +3069,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label return_heap_number; switch (op_) { case Token::ADD: @@ -3278,7 +3278,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { mode_); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (op_ != Token::SHR) { // Convert the result to a floating point value. __ vmov(double_scratch.low(), r2); @@ -3481,7 +3481,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (tagged) { // Argument is a number and is on stack and in r0. // Load argument and check if it is a smi. @@ -3583,7 +3583,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ TailCallExternalReference(runtime_function, 1, 1); } else { ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label no_update; Label skip_cache; @@ -3644,7 +3644,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, Register scratch) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(masm->IsEnabled(VFP2)); Isolate* isolate = masm->isolate(); __ push(lr); @@ -3705,7 +3705,7 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope vfp2_scope(VFP2); + CpuFeatureScope vfp2_scope(masm, VFP2); const Register base = r1; const Register exponent = r2; const Register heapnumbermap = r5; @@ -3931,21 +3931,15 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) { // These stubs might already be in the snapshot, detect that and don't // regenerate, which would lead to code stub initialization state being messed // up. - Code* save_doubles_code = NULL; - Code* store_buffer_overflow_code = NULL; - if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope2(VFP2); - save_doubles_code = *save_doubles.GetCode(isolate); - store_buffer_overflow_code = *stub.GetCode(isolate); - } else { - save_doubles_code = *save_doubles.GetCode(isolate); - store_buffer_overflow_code = *stub.GetCode(isolate); - } + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { + save_doubles_code = *save_doubles.GetCode(isolate); save_doubles_code->set_is_pregenerated(true); + + Code* store_buffer_overflow_code = *stub.GetCode(isolate); store_buffer_overflow_code->set_is_pregenerated(true); } - ISOLATE->set_fp_stubs_generated(true); + isolate->set_fp_stubs_generated(true); } @@ -4192,7 +4186,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ stm(db_w, sp, kCalleeSaved | lr.bit()); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Save callee-saved vfp registers. __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); // Set up the reserved register for 0.0. @@ -4346,7 +4340,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { #endif if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Restore callee-saved vfp registers. __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); } @@ -7090,7 +7084,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { // Inlining the double comparison and falling back to the general compare // stub if NaN is involved or VFP2 is unsupported. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load left and right operand. Label done, left, left_smi, right_smi; diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index 3e37712..a5d08af 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -471,7 +471,7 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); masm->sub(sp, sp, Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); @@ -489,7 +489,7 @@ class RecordWriteStub: public PlatformCodeStub { if (mode == kSaveFPRegs) { // Number of d-regs not known at snapshot time. ASSERT(!Serializer::enabled()); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Restore all VFP registers except d0. // TODO(hans): We should probably restore d0 too. And maybe use vldm. for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 6e3c635..ff97ab5 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -72,7 +72,7 @@ UnaryMathFunction CreateExpFunction() { MacroAssembler masm(NULL, buffer, static_cast(actual_size)); { - CpuFeatures::Scope use_vfp(VFP2); + CpuFeatureScope use_vfp(&masm, VFP2); DwVfpRegister input = d0; DwVfpRegister result = d1; DwVfpRegister double_scratch1 = d2; @@ -277,7 +277,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert to double and store. if (vfp2_supported) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r9); __ vcvt_f64_s32(d0, s0); __ vstr(d0, r7, 0); diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index 2e1e3e3..28b0f0d 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -876,7 +876,7 @@ void Deoptimizer::EntryGenerator::Generate() { kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Save all allocatable VFP registers before messing with them. ASSERT(kDoubleRegZero.code() == 14); ASSERT(kScratchDoubleReg.code() == 15); @@ -951,7 +951,7 @@ void Deoptimizer::EntryGenerator::Generate() { } if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Copy VFP registers to // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] int double_regs_offset = FrameDescription::double_registers_offset(); @@ -1031,7 +1031,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ b(lt, &outer_push_loop); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Check CPU flags for number of registers, setting the Z condition flag. __ CheckFor32DRegs(ip); diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index d3c1957..eb1f988 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -3053,7 +3053,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // 0x41300000 is the top half of 1.0 x 2^20 as a double. // Create this constant using mov/orr to avoid PC relative load. __ mov(r1, Operand(0x41000000)); diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 8fd1a54..34f5939 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -196,7 +196,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -1162,7 +1162,7 @@ void LCodeGen::DoModI(LModI* instr) { Label done; if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatures::Scope scope(SUDIV); + CpuFeatureScope scope(masm(), SUDIV); // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { __ cmp(right, Operand::Zero()); @@ -1208,7 +1208,7 @@ void LCodeGen::DoModI(LModI* instr) { Label vfp_modulo, both_positive, right_negative; - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { @@ -1567,7 +1567,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { __ teq(remainder, Operand(divisor), ne); __ sub(result, result, Operand(1), LeaveCC, mi); } else { - CpuFeatures::Scope scope(SUDIV); + CpuFeatureScope scope(masm(), SUDIV); const Register right = ToRegister(instr->right()); // Check for x / 0. @@ -1614,7 +1614,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, LOperand* left_argument, LOperand* right_argument, Token::Value op) { - CpuFeatures::Scope vfp_scope(VFP2); + CpuFeatureScope vfp_scope(masm(), VFP2); Register left = ToRegister(left_argument); Register right = ToRegister(right_argument); @@ -1900,7 +1900,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); double v = instr->value(); __ Vmov(result, v, scratch0()); } @@ -2078,7 +2078,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left_reg = ToDoubleRegister(left); DwVfpRegister right_reg = ToDoubleRegister(right); DwVfpRegister result_reg = ToDoubleRegister(instr->result()); @@ -2124,7 +2124,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister left = ToDoubleRegister(instr->left()); DwVfpRegister right = ToDoubleRegister(instr->right()); DwVfpRegister result = ToDoubleRegister(instr->result()); @@ -2215,7 +2215,7 @@ void LCodeGen::DoBranch(LBranch* instr) { __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); @@ -2301,7 +2301,7 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // heap number -> false iff +0, -0, or NaN. DwVfpRegister dbl_scratch = double_scratch0(); Label not_heap_number; @@ -2381,7 +2381,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Compare left and right operands as doubles and load the // resulting flags into the normal status register. __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2937,7 +2937,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); ASSERT(NeedsEagerFrame()); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -3312,7 +3312,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); __ vcvt_f64_f32(result, kScratchDoubleReg.low()); @@ -3437,7 +3437,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { __ add(elements, elements, Operand(key, LSL, shift_size)); } if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); __ add(elements, elements, Operand(base_offset)); __ vldr(result, elements, 0); if (instr->hydrogen()->RequiresHoleCheck()) { @@ -3894,7 +3894,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3931,7 +3931,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3957,7 +3957,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); @@ -4023,7 +4023,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); @@ -4031,7 +4031,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister temp = ToDoubleRegister(instr->temp()); @@ -4053,7 +4053,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -4086,7 +4086,7 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -4165,7 +4165,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); @@ -4456,7 +4456,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); @@ -4477,7 +4477,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(masm(), VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); Operand operand(key_is_constant ? Operand(constant_key << element_size_shift) @@ -4527,7 +4527,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = no_reg; @@ -4828,7 +4828,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4846,7 +4846,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4966,7 +4966,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ eor(src, src, Operand(0x80000000)); } if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); __ vmov(flt_scratch, src); __ vcvt_f64_s32(dbl_scratch, flt_scratch); } else { @@ -4978,7 +4978,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, } } else { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); __ vmov(flt_scratch, src); __ vcvt_f64_u32(dbl_scratch, flt_scratch); } else { @@ -5019,7 +5019,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // number. __ bind(&done); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); } else { __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); @@ -5058,7 +5058,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Label done; if (convert_hole) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); DwVfpRegister input_reg = ToDoubleRegister(instr->value()); __ VFPCompareAndSetFlags(input_reg, input_reg); __ b(vc, &no_special_nan_handling); @@ -5110,7 +5110,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { } __ bind(deferred->exit()); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); __ vstr(input_reg, reg, HeapNumber::kValueOffset); } else { __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); @@ -5165,7 +5165,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Label load_smi, heap_number, done; @@ -5254,7 +5254,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Register scratch3 = ToRegister(instr->temp2()); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && @@ -5283,7 +5283,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { scratch3); } else { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(masm(), VFP3); // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment()); @@ -5511,7 +5511,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatures::Scope vfp_scope(VFP2); + CpuFeatureScope vfp_scope(masm(), VFP2); DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); @@ -5520,7 +5520,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -5528,7 +5528,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc index 4df1338..a65ab7e 100644 --- a/src/arm/lithium-gap-resolver-arm.cc +++ b/src/arm/lithium-gap-resolver-arm.cc @@ -171,10 +171,10 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -194,10 +194,10 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); @@ -233,7 +233,7 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. @@ -272,7 +272,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); DwVfpRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); @@ -282,7 +282,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(cgen_->masm(), VFP2); MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index e1d121f..400f615 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -292,7 +292,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); if (!dst.is(src)) { vmov(dst, src); } @@ -717,7 +717,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2, // Generate two ldr instructions if ldrd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(this, ARMv7); ldrd(dst1, dst2, src, cond); } else { if ((src.am() == Offset) || (src.am() == NegOffset)) { @@ -759,7 +759,7 @@ void MacroAssembler::Strd(Register src1, Register src2, // Generate two str instructions if strd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(this, ARMv7); strd(src1, src2, dst, cond); } else { MemOperand dst2(dst); @@ -813,7 +813,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, void MacroAssembler::Vmov(const DwVfpRegister dst, const double imm, const Register scratch) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); @@ -875,7 +875,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); // Check CPU flags for number of registers, setting the Z condition flag. CheckFor32DRegs(ip); @@ -940,7 +940,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count) { // Optionally restore all double registers. if (save_doubles) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); // Calculate the stack location of the saved doubles and restore them. const int offset = 2 * kPointerSize; sub(r3, fp, @@ -2080,7 +2080,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, scratch4, s2); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); vstr(d0, scratch1, 0); } else { str(mantissa_reg, MemOperand(scratch1, 0)); @@ -2446,7 +2446,7 @@ void MacroAssembler::ConvertToInt32(Register source, DwVfpRegister double_scratch, Label *not_int32) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); sub(scratch, source, Operand(kHeapObjectTag)); vldr(double_scratch, scratch, HeapNumber::kValueOffset); vcvt_s32_f64(double_scratch.low(), double_scratch); @@ -2559,7 +2559,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, ASSERT(!double_input.is(double_scratch)); ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); Register prev_fpscr = result; Label done; @@ -2685,7 +2685,7 @@ void MacroAssembler::EmitECMATruncate(Register result, Register scratch, Register input_high, Register input_low) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); ASSERT(!input_high.is(result)); ASSERT(!input_low.is(result)); ASSERT(!input_low.is(input_high)); diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 9603410..bccc139 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -1004,7 +1004,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, ival); __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); __ vcvt_f32_s32(s0, s0); @@ -2118,7 +2118,7 @@ Handle CallStubCompiler::CompileMathFloorCall( return Handle::null(); } - CpuFeatures::Scope scope_vfp2(VFP2); + CpuFeatureScope scope_vfp2(masm(), VFP2); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -3379,7 +3379,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DwVfpRegister double_scratch1, Label* fail) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label key_ok; // Check for smi or a smi inside a heap number. We convert the heap // number and check if the conversion is exact and fits into the smi @@ -3491,7 +3491,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. r4, s2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vstr(d0, r3, 0); } else { __ str(r6, MemOperand(r3, 0)); @@ -3527,7 +3527,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // +/-Infinity into integer arrays basically undefined. For more // reproducible behavior, convert these to zero. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { // vldr requires offset to be a multiple of 4 so we can not diff --git a/src/assembler.cc b/src/assembler.cc index b3640c3..fdc000e 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -115,6 +115,7 @@ static double* math_exp_log_table_array = NULL; AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) : isolate_(isolate), jit_cookie_(0), + enabled_cpu_features_(0), emit_debug_code_(FLAG_debug_code), predictable_code_size_(false) { if (FLAG_mask_constants_with_cookie && isolate != NULL) { @@ -180,6 +181,36 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() { // ----------------------------------------------------------------------------- +// Implementation of CpuFeatureScope + +#ifdef DEBUG +CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) + : assembler_(assembler) { + ASSERT(CpuFeatures::IsSupported(f)); + ASSERT(!Serializer::enabled() || + !CpuFeatures::IsFoundByRuntimeProbingOnly(f)); + old_enabled_ = assembler_->enabled_cpu_features(); + uint64_t mask = static_cast(1) << f; + // TODO(svenpanne) This special case below doesn't belong here! +#if V8_TARGET_ARCH_ARM + // VFP2 and ARMv7 are implied by VFP3. + if (f == VFP3) { + mask |= + static_cast(1) << VFP2 | + static_cast(1) << ARMv7; + } +#endif + assembler_->set_enabled_cpu_features(old_enabled_ | mask); +} + + +CpuFeatureScope::~CpuFeatureScope() { + assembler_->set_enabled_cpu_features(old_enabled_); +} +#endif + + +// ----------------------------------------------------------------------------- // Implementation of Label int Label::pos() const { diff --git a/src/assembler.h b/src/assembler.h index 06c3b76..0fee67a 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -68,6 +68,14 @@ class AssemblerBase: public Malloced { bool predictable_code_size() const { return predictable_code_size_; } void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + uint64_t enabled_cpu_features() const { return enabled_cpu_features_; } + void set_enabled_cpu_features(uint64_t features) { + enabled_cpu_features_ = features; + } + bool IsEnabled(CpuFeature f) { + return (enabled_cpu_features_ & (static_cast(1) << f)) != 0; + } + // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for // cross-snapshotting. static void QuietNaN(HeapObject* nan) { } @@ -89,6 +97,7 @@ class AssemblerBase: public Malloced { private: Isolate* isolate_; int jit_cookie_; + uint64_t enabled_cpu_features_; bool emit_debug_code_; bool predictable_code_size_; }; @@ -109,6 +118,22 @@ class PredictableCodeSizeScope { }; +// Enable a specified feature within a scope. +class CpuFeatureScope BASE_EMBEDDED { + public: +#ifdef DEBUG + CpuFeatureScope(AssemblerBase* assembler, CpuFeature f); + ~CpuFeatureScope(); + + private: + AssemblerBase* assembler_; + uint64_t old_enabled_; +#else + CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {} +#endif +}; + + // ----------------------------------------------------------------------------- // Labels represent pc locations; they are typically jump or call targets. // After declaration, a label can be freely used to denote known or (yet) diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index 123383c..d926316 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -52,7 +52,7 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = 0; -uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; ExternalReference ExternalReference::cpu_features() { @@ -146,7 +146,7 @@ void CpuFeatures::Probe() { __ bind(&cpuid); __ mov(eax, 1); supported_ = (1 << CPUID); - { Scope fscope(CPUID); + { CpuFeatureScope fscope(&assm, CPUID); __ cpuid(); } supported_ = 0; @@ -169,11 +169,10 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST(reinterpret_cast
(memory->address())); - supported_ = probe(); - found_by_runtime_probing_ = supported_; - uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); - supported_ |= os_guarantees; - found_by_runtime_probing_ &= ~os_guarantees; + uint64_t probed_features = probe(); + uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); + supported_ = probed_features | platform_features; + found_by_runtime_probing_only_ = probed_features & ~platform_features; delete memory; } @@ -475,7 +474,7 @@ void Assembler::CodeTargetAlign() { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CPUID)); + ASSERT(IsEnabled(CPUID)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0xA2); @@ -697,7 +696,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) { void Assembler::cmov(Condition cc, Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CMOV)); + ASSERT(IsEnabled(CMOV)); EnsureSpace ensure_space(this); // Opcode: 0f 40 + cc /r. EMIT(0x0F); @@ -1306,7 +1305,7 @@ void Assembler::nop() { void Assembler::rdtsc() { - ASSERT(CpuFeatures::IsEnabled(RDTSC)); + ASSERT(IsEnabled(RDTSC)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x31); @@ -1660,7 +1659,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDB); emit_operand(ecx, adr); @@ -1668,7 +1667,7 @@ void Assembler::fisttp_s(const Operand& adr) { void Assembler::fisttp_d(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDD); emit_operand(ecx, adr); @@ -1930,7 +1929,7 @@ void Assembler::setcc(Condition cc, Register reg) { void Assembler::cvttss2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -1940,7 +1939,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { void Assembler::cvttsd2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1950,7 +1949,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { void Assembler::cvtsd2si(Register dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1960,7 +1959,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) { void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1970,7 +1969,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -1980,7 +1979,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1990,7 +1989,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { void Assembler::addsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2000,7 +1999,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) { void Assembler::addsd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2010,7 +2009,7 @@ void Assembler::addsd(XMMRegister dst, const Operand& src) { void Assembler::mulsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2020,7 +2019,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) { void Assembler::mulsd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2030,7 +2029,7 @@ void Assembler::mulsd(XMMRegister dst, const Operand& src) { void Assembler::subsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2040,7 +2039,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) { void Assembler::divsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2050,7 +2049,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) { void Assembler::xorpd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2095,7 +2094,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2105,7 +2104,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2115,7 +2114,7 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) { void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2127,7 +2126,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { } void Assembler::movmskpd(Register dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2137,7 +2136,7 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { void Assembler::movmskps(Register dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x50); @@ -2146,7 +2145,7 @@ void Assembler::movmskps(Register dst, XMMRegister src) { void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2156,7 +2155,7 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2167,7 +2166,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { void Assembler::movaps(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x28); @@ -2176,7 +2175,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) { void Assembler::movdqa(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2186,7 +2185,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) { void Assembler::movdqa(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2196,7 +2195,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) { void Assembler::movdqu(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2206,7 +2205,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) { void Assembler::movdqu(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2216,7 +2215,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) { void Assembler::movntdqa(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2227,7 +2226,7 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) { void Assembler::movntdq(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2260,7 +2259,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) { void Assembler::movsd(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2270,7 +2269,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) { void Assembler::movsd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2280,7 +2279,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) { void Assembler::movsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2290,7 +2289,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) { void Assembler::movss(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2300,7 +2299,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) { void Assembler::movss(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2310,7 +2309,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) { void Assembler::movss(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2320,7 +2319,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) { void Assembler::movd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2330,7 +2329,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) { void Assembler::movd(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2353,7 +2352,7 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { void Assembler::pand(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2363,7 +2362,7 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) { void Assembler::pxor(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2373,7 +2372,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) { void Assembler::por(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2383,7 +2382,7 @@ void Assembler::por(XMMRegister dst, XMMRegister src) { void Assembler::ptest(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2394,7 +2393,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) { void Assembler::psllq(XMMRegister reg, int8_t shift) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2405,7 +2404,7 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) { void Assembler::psllq(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2415,7 +2414,7 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) { void Assembler::psrlq(XMMRegister reg, int8_t shift) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2426,7 +2425,7 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) { void Assembler::psrlq(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2436,7 +2435,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) { void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2447,7 +2446,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2459,7 +2458,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index 315bc17..9fa092a 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -505,10 +505,10 @@ class Displacement BASE_EMBEDDED { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. // Example: -// if (CpuFeatures::IsSupported(SSE2)) { -// CpuFeatures::Scope fscope(SSE2); +// if (assembler->IsSupported(SSE2)) { +// CpuFeatureScope fscope(assembler, SSE2); // // Generate SSE2 floating point code. // } else { // // Generate standard x87 floating point code. @@ -530,54 +530,11 @@ class CpuFeatures : public AllStatic { return (supported_ & (static_cast(1) << f)) != 0; } -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - uint64_t enabled = isolate->enabled_cpu_features(); - return (enabled & (static_cast(1) << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - uint64_t mask = static_cast(1) << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = isolate_->enabled_cpu_features(); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - uint64_t old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; class TryForceFeatureScope BASE_EMBEDDED { public: @@ -610,7 +567,7 @@ class CpuFeatures : public AllStatic { static bool initialized_; #endif static uint64_t supported_; - static uint64_t found_by_runtime_probing_; + static uint64_t found_by_runtime_probing_only_; friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index b0a32ea..9e666e0 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -653,7 +653,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // restore them. __ pushad(); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); @@ -670,7 +670,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); __ movdbl(reg, Operand(esp, i * kDoubleSize)); @@ -820,7 +820,7 @@ static void IntegerConvert(MacroAssembler* masm, __ cmp(scratch2, Immediate(kResultIsZeroExponent)); __ j(above, &done); if (use_sse3) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm, SSE3); // Check whether the exponent is too big for a 64 bit signed integer. static const uint32_t kTooBigExponent = 63; __ cmp(scratch2, Immediate(kTooBigExponent)); @@ -1183,7 +1183,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ bind(&heapnumber_allocated); } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ecx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1568,7 +1568,7 @@ static void BinaryOpStub_GenerateSmiCode( } else { ASSERT_EQ(Token::SHL, op); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, left); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1612,7 +1612,7 @@ static void BinaryOpStub_GenerateSmiCode( } __ AllocateHeapNumber(ecx, ebx, no_reg, slow); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); FloatingPointHelper::LoadSSE2Smis(masm, ebx); switch (op) { case Token::ADD: __ addsd(xmm0, xmm1); break; @@ -1777,7 +1777,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label not_floats; Label not_int32; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); // It could be that only SMIs have been seen at either the left // or the right operand. For precise type feedback, patch the IC // again if this changes. @@ -1908,7 +1908,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1998,7 +1998,7 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { case Token::DIV: { Label not_floats; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); // It could be that only SMIs have been seen at either the left // or the right operand. For precise type feedback, patch the IC @@ -2125,7 +2125,7 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -2205,7 +2205,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { case Token::DIV: { Label not_floats; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); switch (op_) { @@ -2306,7 +2306,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -2476,9 +2476,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&loaded); } else { // UNTAGGED. - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope sse4_scope(SSE4_1); + CpuFeatureScope sse4_scope(masm, SSE4_1); __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. } else { __ pshufd(xmm0, xmm1, 0x1); @@ -2549,7 +2549,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ fstp(0); __ ret(kPointerSize); } else { // UNTAGGED. - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2562,7 +2562,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { if (tagged) { __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); } else { // UNTAGGED. - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); @@ -2577,7 +2577,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { if (tagged) { __ ret(kPointerSize); } else { // UNTAGGED. - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); @@ -2610,7 +2610,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { ExternalReference(RuntimeFunction(), masm->isolate()); __ TailCallExternalReference(runtime, 1, 1); } else { // UNTAGGED. - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ bind(&runtime_call_clear_stack); __ bind(&runtime_call); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); @@ -2776,7 +2776,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers( // Get the untagged integer version of the edx heap number in ecx. if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); ConvertHeapNumberToInt32(masm, edx, conversion_failure); } else { IntegerConvert(masm, edx, use_sse3, conversion_failure); @@ -2811,7 +2811,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers( // Get the untagged integer version of the eax heap number in ecx. if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); ConvertHeapNumberToInt32(masm, eax, conversion_failure); } else { IntegerConvert(masm, eax, use_sse3, conversion_failure); @@ -3019,7 +3019,7 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); Factory* factory = masm->isolate()->factory(); const Register exponent = eax; const Register base = edx; @@ -4407,7 +4407,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, FixedArray::kHeaderSize)); __ JumpIfSmi(probe, not_found); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); __ ucomisd(xmm0, xmm1); @@ -4667,8 +4667,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Label non_number_comparison; Label unordered; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - CpuFeatures::Scope use_cmov(CMOV); + CpuFeatureScope use_sse2(masm, SSE2); + CpuFeatureScope use_cmov(masm, CMOV); FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); __ ucomisd(xmm0, xmm1); @@ -6890,8 +6890,8 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { // Inlining the double comparison and falling back to the general compare // stub if NaN is involved or SSE2 or CMOV is unsupported. if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope scope1(SSE2); - CpuFeatures::Scope scope2(CMOV); + CpuFeatureScope scope1(masm, SSE2); + CpuFeatureScope scope2(masm, CMOV); // Load left and right operand. Label done, left, left_smi, right_smi; diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h index af1c999..9d5c0be 100644 --- a/src/ia32/code-stubs-ia32.h +++ b/src/ia32/code-stubs-ia32.h @@ -540,7 +540,7 @@ class RecordWriteStub: public PlatformCodeStub { if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); masm->sub(esp, Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); // Save all XMM registers except XMM0. @@ -554,7 +554,7 @@ class RecordWriteStub: public PlatformCodeStub { inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); // Restore all XMM registers except XMM0. for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 5368811..b3fce81 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -114,7 +114,7 @@ UnaryMathFunction CreateExpFunction() { // esp[1 * kPointerSize]: raw double input // esp[0 * kPointerSize]: return address { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(&masm, SSE2); XMMRegister input = xmm1; XMMRegister result = xmm2; __ movdbl(input, Operand(esp, 1 * kPointerSize)); @@ -154,7 +154,7 @@ UnaryMathFunction CreateSqrtFunction() { // esp[0 * kPointerSize]: return address // Move double input into registers. { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(&masm, SSE2); __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); __ sqrtsd(xmm0, xmm0); __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); @@ -214,7 +214,7 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ bind(&ok); } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope enable(SSE2); + CpuFeatureScope enable(&masm, SSE2); __ push(edi); __ push(esi); stack_offset += 2 * kPointerSize; @@ -479,7 +479,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ExternalReference::address_of_the_hole_nan(); XMMRegister the_hole_nan = xmm1; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ movdbl(the_hole_nan, Operand::StaticVariable(canonical_the_hole_nan_reference)); } @@ -504,7 +504,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert it to double and store. __ SmiUntag(ebx); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), xmm0); @@ -525,7 +525,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), the_hole_nan); } else { @@ -635,7 +635,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); // edx: new heap number if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ movdbl(xmm0, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index e27ea4c..2f2430c 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -977,7 +977,7 @@ void Deoptimizer::EntryGenerator::Generate() { XMMRegister::kNumAllocatableRegisters; __ sub(esp, Immediate(kDoubleRegsSize)); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; @@ -1032,7 +1032,7 @@ void Deoptimizer::EntryGenerator::Generate() { int double_regs_offset = FrameDescription::double_registers_offset(); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); // Fill in the double input registers. for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { int dst_offset = i * kDoubleSize + double_regs_offset; @@ -1133,7 +1133,7 @@ void Deoptimizer::EntryGenerator::Generate() { // In case of OSR or a failed STUB, we have to restore the XMM registers. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int src_offset = i * kDoubleSize + double_regs_offset; diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 166cb6f..25c5e65 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -2994,7 +2994,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // This is implemented on both SSE2 and FPU. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm(), SSE2); __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. __ movd(xmm1, ebx); __ movd(xmm0, eax); diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 0f2d32b..6fa1c42 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -260,7 +260,7 @@ bool LCodeGen::GeneratePrologue() { if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { Comment(";;; Save clobbered callee double registers"); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -1695,8 +1695,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) { int32_t lower = static_cast(int_val); int32_t upper = static_cast(int_val >> (kBitsPerInt)); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope1(SSE2); - CpuFeatures::Scope scope2(SSE4_1); + CpuFeatureScope scope1(masm(), SSE2); + CpuFeatureScope scope2(masm(), SSE4_1); if (lower != 0) { __ Set(temp, Immediate(lower)); __ movd(res, Operand(temp)); @@ -1708,7 +1708,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { __ pinsrd(res, Operand(temp), 1); } } else { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); __ Set(temp, Immediate(upper)); __ movd(res, Operand(temp)); __ psllq(res, 32); @@ -1871,7 +1871,7 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); @@ -1933,7 +1933,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister left = ToDoubleRegister(instr->left()); XMMRegister right = ToDoubleRegister(instr->right()); XMMRegister result = ToDoubleRegister(instr->result()); @@ -2018,7 +2018,7 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { void LCodeGen::DoBranch(LBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { @@ -2188,7 +2188,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); // Don't base result on EFLAGS when a NaN is involved. Instead // jump to the false block. __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2706,7 +2706,7 @@ void LCodeGen::DoReturn(LReturn* instr) { } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { ASSERT(NeedsEagerFrame()); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); int count = 0; @@ -3119,7 +3119,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); __ cvtss2sd(result, result); @@ -3129,7 +3129,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { } } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); __ movdbl(ToDoubleRegister(instr->result()), operand); } else { __ fld_d(operand); @@ -3223,7 +3223,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { FixedDoubleArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister result = ToDoubleRegister(instr->result()); __ movdbl(result, double_load_operand); } else { @@ -3647,7 +3647,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { ASSERT(instr->value()->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); if (r.IsDouble()) { XMMRegister scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3669,13 +3669,13 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = xmm0; Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope(masm(), SSE4_1); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Deoptimize on negative zero. Label non_zero; @@ -3734,7 +3734,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } void LCodeGen::DoMathRound(LMathRound* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = xmm0; @@ -3795,7 +3795,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); __ sqrtsd(input_reg, input_reg); @@ -3803,7 +3803,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = ToRegister(instr->temp()); @@ -3880,7 +3880,7 @@ void LCodeGen::DoRandom(LRandom* instr) { DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); // Having marked this instruction as a call we can use any // registers. ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); @@ -3948,7 +3948,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); Label positive, done, zero; @@ -3980,7 +3980,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input = ToDoubleRegister(instr->value()); XMMRegister result = ToDoubleRegister(instr->result()); Register temp1 = ToRegister(instr->temp1()); @@ -4270,11 +4270,11 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 0, instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); __ movss(operand, xmm0); } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); __ movdbl(operand, ToDoubleRegister(instr->value())); } else { Register value = ToRegister(instr->value()); @@ -4310,7 +4310,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); if (instr->NeedsCanonicalization()) { @@ -4585,7 +4585,7 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4598,7 +4598,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); LOperand* input = instr->value(); LOperand* output = instr->result(); LOperand* temp = instr->temp(); @@ -4677,7 +4677,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(reg); __ xor_(reg, 0x80000000); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope feature_scope(SSE2); + CpuFeatureScope feature_scope(masm(), SSE2); __ cvtsi2sd(xmm0, Operand(reg)); } else { __ push(reg); @@ -4686,7 +4686,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, } } else { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope feature_scope(SSE2); + CpuFeatureScope feature_scope(masm(), SSE2); __ LoadUint32(xmm0, reg, xmm1); } else { // There's no fild variant for unsigned values, so zero-extend to a 64-bit @@ -4726,7 +4726,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // number. __ bind(&done); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope feature_scope(SSE2); + CpuFeatureScope feature_scope(masm(), SSE2); __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); } else { __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); @@ -4760,7 +4760,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { if (convert_hole) { bool use_sse2 = CpuFeatures::IsSupported(SSE2); if (use_sse2) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ ucomisd(input_reg, input_reg); } else { @@ -4775,7 +4775,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ j(parity_odd, &no_special_nan_handling); __ sub(esp, Immediate(kDoubleSize)); if (use_sse2) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ movdbl(MemOperand(esp, 0), input_reg); } else { @@ -4794,7 +4794,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { ExternalReference nan = ExternalReference::address_of_canonical_non_hole_nan(); if (use_sse2) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ movdbl(input_reg, Operand::StaticVariable(nan)); } else { @@ -4813,7 +4813,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { } __ bind(deferred->exit()); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); } else { @@ -4956,7 +4956,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ bind(&heap_number); if (CpuFeatures::IsSupported(SSE3)) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm(), SSE3); Label convert; // Use more powerful conversion when sse3 is available. // Load x87 register with heap number. @@ -4981,7 +4981,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. __ add(Operand(esp), Immediate(kDoubleSize)); } else { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ cvttsd2si(input_reg, Operand(xmm0)); @@ -4996,7 +4996,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { DeoptimizeIf(parity_even, instr->environment()); // NaN. } } else if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); // Deoptimize if we don't have a heap number. __ RecordComment("Deferred TaggedToI: not a heap number"); DeoptimizeIf(not_equal, instr->environment()); @@ -5063,7 +5063,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { ASSERT(result->IsDoubleRegister()); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); Register input_reg = ToRegister(input); XMMRegister result_reg = ToDoubleRegister(result); @@ -5106,7 +5106,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { ASSERT(input->IsDoubleRegister()); LOperand* result = instr->result(); ASSERT(result->IsRegister()); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); Register result_reg = ToRegister(result); @@ -5118,7 +5118,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { __ cmp(result_reg, 0x80000000u); if (CpuFeatures::IsSupported(SSE3)) { // This will deoptimize if the exponent of the input in out of range. - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm(), SSE3); Label convert, done; __ j(not_equal, &done, Label::kNear); __ sub(Operand(esp), Immediate(kDoubleSize)); @@ -5323,7 +5323,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampDoubleToUint8(value_reg, xmm0, result_reg); @@ -5338,7 +5338,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc index 6fee7fe..b062ba5 100644 --- a/src/ia32/lithium-gap-resolver-ia32.cc +++ b/src/ia32/lithium-gap-resolver-ia32.cc @@ -324,7 +324,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(cgen_->masm(), SSE2); XMMRegister src = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { XMMRegister dst = cgen_->ToDoubleRegister(destination); @@ -335,7 +335,7 @@ void LGapResolver::EmitMove(int index) { __ movdbl(dst, src); } } else if (source->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(cgen_->masm(), SSE2); ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); Operand src = cgen_->ToOperand(source); @@ -411,7 +411,7 @@ void LGapResolver::EmitSwap(int index) { __ mov(src, tmp0); } } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-register swap. We rely on having xmm0 // available as a fixed scratch register. XMMRegister src = cgen_->ToDoubleRegister(source); @@ -421,7 +421,7 @@ void LGapResolver::EmitSwap(int index) { __ movaps(dst, xmm0); } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-memory swap. We rely on having xmm0 // available as a fixed scratch register. ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); @@ -435,7 +435,7 @@ void LGapResolver::EmitSwap(int index) { __ movdbl(reg, Operand(xmm0)); } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(cgen_->masm(), SSE2); // Double-width memory-to-memory. Spill on demand to use a general // purpose temporary register and also rely on having xmm0 available as // a fixed scratch register. diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index 3629a40..494a1bf 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -527,7 +527,7 @@ void MacroAssembler::StoreNumberToDoubleElements( ExternalReference canonical_nan_reference = ExternalReference::address_of_canonical_non_hole_nan(); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(this, SSE2); movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); bind(&have_double_value); movdbl(FieldOperand(elements, key, times_4, @@ -549,7 +549,7 @@ void MacroAssembler::StoreNumberToDoubleElements( j(zero, ¬_nan); bind(&is_nan); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(this, SSE2); movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); } else { fld_d(Operand::StaticVariable(canonical_nan_reference)); @@ -562,7 +562,7 @@ void MacroAssembler::StoreNumberToDoubleElements( mov(scratch1, maybe_number); SmiUntag(scratch1); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(this, SSE2); cvtsi2sd(scratch2, scratch1); movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize - elements_offset), @@ -790,7 +790,7 @@ void MacroAssembler::EnterExitFramePrologue() { void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { // Optionally save all XMM registers. if (save_doubles) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; @@ -836,7 +836,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) { void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Optionally restore all XMM registers. if (save_doubles) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index 86caecb..154f18e 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -2047,7 +2047,7 @@ Handle CallStubCompiler::CompileMathFloorCall( return Handle::null(); } - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm(), SSE2); const int argc = arguments().immediate(); @@ -3198,7 +3198,7 @@ Handle ConstructStubCompiler::CompileConstructStub( __ mov(ebx, edi); __ cmp(eax, arg_number); if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope use_cmov(CMOV); + CpuFeatureScope use_cmov(masm(), CMOV); __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize)); } else { Label not_passed; @@ -3311,7 +3311,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, // Check that key is a smi and if SSE2 is available a heap number // containing a smi and branch if the check fails. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); Label key_ok; __ JumpIfSmi(key, &key_ok); __ cmp(FieldOperand(key, HeapObject::kMapOffset), @@ -3448,7 +3448,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( if ((elements_kind == EXTERNAL_INT_ELEMENTS || elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) && CpuFeatures::IsSupported(SSE3)) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm, SSE3); // fisttp stores values as signed integers. To represent the // entire range of int and unsigned int arrays, store as a // 64-bit int and discard the high 32 bits. @@ -3473,7 +3473,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ mov(Operand(edi, ecx, times_2, 0), ebx); } else { ASSERT(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset)); __ cmp(ebx, 0x80000000u); __ j(equal, &slow); diff --git a/src/isolate.h b/src/isolate.h index 33e4d3e..3ab2b6a 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -368,7 +368,6 @@ typedef List DebugObjectCache; V(unsigned, ast_node_count, 0) \ /* SafeStackFrameIterator activations count. */ \ V(int, safe_stack_iterator_counter, 0) \ - V(uint64_t, enabled_cpu_features, 0) \ V(CpuProfiler*, cpu_profiler, NULL) \ V(HeapProfiler*, heap_profiler, NULL) \ V(bool, observer_delivery_pending, false) \ diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc index 962255d..4c11c7f 100644 --- a/src/mips/assembler-mips.cc +++ b/src/mips/assembler-mips.cc @@ -47,7 +47,7 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; ExternalReference ExternalReference::cpu_features() { @@ -63,7 +63,7 @@ ExternalReference ExternalReference::cpu_features() { static uint64_t CpuFeaturesImpliedByCompiler() { uint64_t answer = 0; #ifdef CAN_USE_FPU_INSTRUCTIONS - answer |= 1u << FPU; + answer |= static_cast(1) << FPU; #endif // def CAN_USE_FPU_INSTRUCTIONS #ifdef __mips__ @@ -71,7 +71,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() { // generation even when generating snapshots. This won't work for cross // compilation. #if(defined(__mips_hard_float) && __mips_hard_float != 0) - answer |= 1u << FPU; + answer |= static_cast(1) << FPU; #endif // defined(__mips_hard_float) && __mips_hard_float != 0 #endif // def __mips__ @@ -129,15 +129,15 @@ void CpuFeatures::Probe() { #if !defined(__mips__) // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. if (FLAG_enable_fpu) { - supported_ |= 1u << FPU; + supported_ |= static_cast(1) << FPU; } #else // Probe for additional features not already known to be available. if (OS::MipsCpuHasFeature(FPU)) { // This implementation also sets the FPU flags if // runtime detection of FPU returns true. - supported_ |= 1u << FPU; - found_by_runtime_probing_ |= 1u << FPU; + supported_ |= static_cast(1) << FPU; + found_by_runtime_probing_only_ |= static_cast(1) << FPU; } #endif } @@ -874,7 +874,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPURegister fd, SecondaryField func) { ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -888,7 +888,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPURegister fd, SecondaryField func) { ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -902,7 +902,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPURegister fd, SecondaryField func) { ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -915,7 +915,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPUControlRegister fs, SecondaryField func) { ASSERT(fs.is_valid() && rt.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; emit(instr); @@ -950,7 +950,7 @@ void Assembler::GenInstrImmediate(Opcode opcode, FPURegister ft, int32_t j) { ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | (j & kImm16Mask); emit(instr); @@ -1872,7 +1872,7 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { // Conditions. void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, FPURegister ft, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); ASSERT((fmt & ~(31 << kRsShift)) == 0); Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift @@ -1883,7 +1883,7 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt, void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(src2 == 0.0); mtc1(zero_reg, f14); cvt_d_w(f14, f14); @@ -1892,7 +1892,7 @@ void Assembler::fcmp(FPURegister src1, const double src2, void Assembler::bc1f(int16_t offset, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); emit(instr); @@ -1900,7 +1900,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) { void Assembler::bc1t(int16_t offset, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); emit(instr); diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h index d108edc..c235a4d 100644 --- a/src/mips/assembler-mips.h +++ b/src/mips/assembler-mips.h @@ -393,7 +393,7 @@ class MemOperand : public Operand { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer @@ -407,55 +407,11 @@ class CpuFeatures : public AllStatic { return (supported_ & (1u << f)) != 0; } - -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - unsigned enabled = static_cast(isolate->enabled_cpu_features()); - return (enabled & (1u << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - unsigned mask = 1u << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = static_cast(isolate_->enabled_cpu_features()); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - unsigned old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; class TryForceFeatureScope BASE_EMBEDDED { public: @@ -488,7 +444,7 @@ class CpuFeatures : public AllStatic { static bool initialized_; #endif static unsigned supported_; - static unsigned found_by_runtime_probing_; + static unsigned found_by_runtime_probing_only_; friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index b60bd29..91203c6 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -614,7 +614,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(scratch1, a0, kSmiTagSize); __ mtc1(scratch1, f14); __ cvt_d_w(f14, f14); @@ -665,7 +665,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a heap number. if (CpuFeatures::IsSupported(FPU) && destination == kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double from tagged HeapNumber to double register. // ARM uses a workaround here because of the unaligned HeapNumber @@ -684,7 +684,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Convert smi to double using FPU instructions. __ mtc1(scratch1, dst); __ cvt_d_w(dst, dst); @@ -760,7 +760,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Label done; if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(int_scratch, single_scratch); __ cvt_d_w(double_dst, single_scratch); if (destination == kCoreRegisters) { @@ -862,7 +862,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the number. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double value. __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); @@ -959,7 +959,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Object is a heap number. // Convert the floating point value to a 32-bit integer. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double value. __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); @@ -1097,7 +1097,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(ra); __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. if (!IsMipsSoftFloatABI) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // We are not using MIPS FPU instructions, and parameters for the runtime // function call are prepaired in a0-a3 registers, but function we are // calling is compiled with hard-float flag and expecting hard float ABI @@ -1113,7 +1113,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } // Store answer in the overwritable heap number. if (!IsMipsSoftFloatABI) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Double returned in register f0. __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1337,7 +1337,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a number. // Convert smi rhs to double. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(at, rhs, kSmiTagSize); __ mtc1(at, f14); __ cvt_d_w(f14, f14); @@ -1376,7 +1376,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Lhs is a smi, rhs is a number. // Convert smi lhs to double. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(at, lhs, kSmiTagSize); __ mtc1(at, f12); __ cvt_d_w(f12, f12); @@ -1404,7 +1404,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, void EmitNanCheck(MacroAssembler* masm, Condition cc) { bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Lhs and rhs are already loaded to f12 and f14 register pairs. __ Move(t0, t1, f14); __ Move(t2, t3, f12); @@ -1471,7 +1471,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { // Exception: 0 and -0. bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Lhs and rhs are already loaded to f12 and f14 register pairs. __ Move(t0, t1, f14); __ Move(t2, t3, f12); @@ -1527,7 +1527,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { __ pop(ra); // Because this function returns int, result is in v0. __ Ret(); } else { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label equal, less_than; __ BranchF(&equal, NULL, eq, f12, f14); __ BranchF(&less_than, NULL, lt, f12, f14); @@ -1603,7 +1603,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); } else { @@ -1698,7 +1698,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, @@ -1851,7 +1851,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label nan; __ li(t0, Operand(LESS)); __ li(t1, Operand(GREATER)); @@ -1986,7 +1986,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { // This stub uses FPU instructions. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label patch; const Register map = t5.is(tos_) ? t3 : t5; @@ -2101,7 +2101,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // restore them. __ MultiPush(kJSCallerSaved | ra.bit()); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ MultiPushFPU(kCallerSavedFPU); } const int argument_count = 1; @@ -2115,7 +2115,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ MultiPopFPU(kCallerSavedFPU); } @@ -2348,7 +2348,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( if (CpuFeatures::IsSupported(FPU)) { // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(a1, f0); __ cvt_d_w(f0, f0); __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); @@ -2693,7 +2693,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Using FPU registers: // f12: Left value. // f14: Right value. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); switch (op) { case Token::ADD: __ add_d(f10, f12, f14); @@ -2825,7 +2825,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, if (CpuFeatures::IsSupported(FPU)) { // Convert the int32 in a2 to the heap number in a0. As // mentioned above SHR needs to always produce a positive result. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(a2, f0); if (op == Token::SHR) { __ Cvt_d_uw(f0, f0, f22); @@ -3020,7 +3020,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label return_heap_number; switch (op_) { case Token::ADD: @@ -3234,7 +3234,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { mode_); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); if (op_ != Token::SHR) { // Convert the result to a floating point value. @@ -3438,7 +3438,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); if (tagged) { // Argument is a number and is on stack and in a0. @@ -3548,7 +3548,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 1); } else { ASSERT(CpuFeatures::IsSupported(FPU)); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label no_update; Label skip_cache; @@ -3676,7 +3676,7 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope fpu_scope(FPU); + CpuFeatureScope fpu_scope(masm, FPU); const Register base = a1; const Register exponent = a2; const Register heapnumbermap = t1; @@ -3919,21 +3919,15 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) { // These stubs might already be in the snapshot, detect that and don't // regenerate, which would lead to code stub initialization state being messed // up. - Code* save_doubles_code = NULL; - Code* store_buffer_overflow_code = NULL; - if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope2(FPU); - save_doubles_code = *save_doubles.GetCode(isolate); - store_buffer_overflow_code = *stub.GetCode(isolate); - } else { - save_doubles_code = *save_doubles.GetCode(isolate); - store_buffer_overflow_code = *stub.GetCode(isolate); - } + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { + save_doubles_code = *save_doubles.GetCode(isolate); save_doubles_code->set_is_pregenerated(true); + + Code* store_buffer_overflow_code = *stub.GetCode(isolate); store_buffer_overflow_code->set_is_pregenerated(true); } - ISOLATE->set_fp_stubs_generated(true); + isolate->set_fp_stubs_generated(true); } @@ -4189,7 +4183,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ MultiPush(kCalleeSaved | ra.bit()); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Save callee-saved FPU registers. __ MultiPushFPU(kCalleeSavedFPU); // Set up the reserved register for 0.0. @@ -4338,7 +4332,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Restore callee-saved fpu registers. __ MultiPopFPU(kCalleeSavedFPU); } @@ -7027,7 +7021,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { // Inlining the double comparison and falling back to the general compare // stub if NaN is involved or FPU is unsupported. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load left and right operand. Label done, left, left_smi, right_smi; diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h index cc7ac28..37db215 100644 --- a/src/mips/code-stubs-mips.h +++ b/src/mips/code-stubs-mips.h @@ -484,7 +484,7 @@ class RecordWriteStub: public PlatformCodeStub { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); masm->MultiPushFPU(kCallerSavedFPU); } } @@ -492,7 +492,7 @@ class RecordWriteStub: public PlatformCodeStub { inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); masm->MultiPopFPU(kCallerSavedFPU); } masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc index bbb1a31..f5cb5e4 100644 --- a/src/mips/codegen-mips.cc +++ b/src/mips/codegen-mips.cc @@ -72,7 +72,7 @@ UnaryMathFunction CreateExpFunction() { MacroAssembler masm(NULL, buffer, static_cast(actual_size)); { - CpuFeatures::Scope use_fpu(FPU); + CpuFeatureScope use_fpu(&masm, FPU); DoubleRegister input = f12; DoubleRegister result = f0; DoubleRegister double_scratch1 = f4; @@ -278,7 +278,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert to double and store. if (fpu_supported) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(t5, f0); __ cvt_d_w(f0, f0); __ sdc1(f0, MemOperand(t3)); diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc index 8e96cd5..09c69c7 100644 --- a/src/mips/deoptimizer-mips.cc +++ b/src/mips/deoptimizer-mips.cc @@ -868,7 +868,7 @@ void Deoptimizer::EntryGenerator::Generate() { kDoubleSize * FPURegister::kMaxNumAllocatableRegisters; if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // Save all FPU registers before messing with them. __ Subu(sp, sp, Operand(kDoubleRegsSize)); for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { @@ -951,7 +951,7 @@ void Deoptimizer::EntryGenerator::Generate() { int double_regs_offset = FrameDescription::double_registers_offset(); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // Copy FPU registers to // double_registers_[DoubleRegister::kNumAllocatableRegisters] for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { @@ -1029,7 +1029,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ Branch(&outer_push_loop, lt, t0, Operand(a1)); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index 54eeb8d..7f2cf65 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -3048,7 +3048,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // 0x41300000 is the top half of 1.0 x 2^20 as a double. __ li(a1, Operand(0x41300000)); // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU. diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc index fc4ff30..1946525 100644 --- a/src/mips/lithium-codegen-mips.cc +++ b/src/mips/lithium-codegen-mips.cc @@ -65,7 +65,7 @@ bool LCodeGen::GenerateCode() { HPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // Open a frame scope to indicate that there is a frame on the stack. The // NONE indicates that the scope shouldn't actually generate code to set up @@ -194,7 +194,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); Comment(";;; Save clobbered callee double registers"); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); @@ -1474,7 +1474,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DoubleRegister result = ToDoubleRegister(instr->result()); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); double v = instr->value(); __ Move(result, v); } @@ -1672,7 +1672,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ bind(&done); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); FPURegister left_reg = ToDoubleRegister(left); FPURegister right_reg = ToDoubleRegister(right); FPURegister result_reg = ToDoubleRegister(instr->result()); @@ -1713,7 +1713,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister left = ToDoubleRegister(instr->left()); DoubleRegister right = ToDoubleRegister(instr->right()); DoubleRegister result = ToDoubleRegister(instr->result()); @@ -1823,7 +1823,7 @@ void LCodeGen::DoBranch(LBranch* instr) { Register reg = ToRegister(instr->value()); EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); } else if (r.IsDouble()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister reg = ToDoubleRegister(instr->value()); // Test the double value. Zero and NaN are false. EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); @@ -1901,7 +1901,7 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // heap number -> false iff +0, -0, or NaN. DoubleRegister dbl_scratch = double_scratch0(); Label not_heap_number; @@ -1981,7 +1981,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // Compare left and right as doubles and load the // resulting flags into the normal status register. FPURegister left_reg = ToDoubleRegister(left); @@ -2544,7 +2544,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); ASSERT(NeedsEagerFrame()); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); @@ -2923,7 +2923,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { __ Addu(scratch0(), scratch0(), external_pointer); } if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ lwc1(result, MemOperand(scratch0(), additional_offset)); __ cvt_d_s(result, result); @@ -3052,7 +3052,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { __ Addu(elements, elements, scratch); } if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ Addu(elements, elements, Operand(base_offset)); __ ldc1(result, MemOperand(elements)); if (instr->hydrogen()->RequiresHoleCheck()) { @@ -3515,7 +3515,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3552,7 +3552,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch1 = scratch0(); @@ -3581,7 +3581,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); @@ -3658,7 +3658,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); __ sqrt_d(result, input); @@ -3666,7 +3666,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister temp = ToDoubleRegister(instr->temp()); @@ -3691,7 +3691,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -3722,7 +3722,7 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -3799,7 +3799,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); @@ -4075,7 +4075,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); @@ -4149,7 +4149,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = no_reg; @@ -4453,7 +4453,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4471,7 +4471,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4592,7 +4592,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ Xor(src, src, Operand(0x80000000)); } if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ mtc1(src, dbl_scratch); __ cvt_d_w(dbl_scratch, dbl_scratch); } else { @@ -4604,7 +4604,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, } } else { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ mtc1(src, dbl_scratch); __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); } else { @@ -4644,7 +4644,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // number. __ bind(&done); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); } else { __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); @@ -4683,7 +4683,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Label done; if (convert_hole) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); DoubleRegister input_reg = ToDoubleRegister(instr->value()); __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg); __ Move(reg, scratch0(), input_reg); @@ -4729,7 +4729,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { } __ bind(deferred->exit()); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); } else { __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); @@ -4784,7 +4784,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, LEnvironment* env, NumberUntagDMode mode) { Register scratch = scratch0(); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); Label load_smi, heap_number, done; @@ -4862,7 +4862,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { // of the if. if (instr->truncating()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); Register scratch3 = ToRegister(instr->temp2()); FPURegister single_scratch = double_scratch.low(); ASSERT(!scratch3.is(input_reg) && @@ -5117,7 +5117,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - CpuFeatures::Scope vfp_scope(FPU); + CpuFeatureScope vfp_scope(masm(), FPU); DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); @@ -5126,7 +5126,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - CpuFeatures::Scope vfp_scope(FPU); + CpuFeatureScope vfp_scope(masm(), FPU); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -5134,7 +5134,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - CpuFeatures::Scope vfp_scope(FPU); + CpuFeatureScope vfp_scope(masm(), FPU); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc index a4a4411..b415156 100644 --- a/src/mips/lithium-gap-resolver-mips.cc +++ b/src/mips/lithium-gap-resolver-mips.cc @@ -172,10 +172,10 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -195,11 +195,11 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); __ mov_d(cgen_->ToDoubleRegister(saved_destination_), kLithiumScratchDouble); } else if (saved_destination_->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); __ sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_)); } else { @@ -236,7 +236,7 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsInt16Encodable()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); // 'at' is overwritten while saving the value to the destination. // Therefore we can't use 'at'. It is OK if the read from the source // destroys 'at', since that happens before the value is read. @@ -276,7 +276,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); DoubleRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ mov_d(cgen_->ToDoubleRegister(destination), source_register); @@ -287,7 +287,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(cgen_->masm(), FPU); MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ ldc1(cgen_->ToDoubleRegister(destination), source_operand); diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 1829522..62d42fd 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -853,7 +853,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) { void MacroAssembler::MultiPushFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t num_to_push = NumberOfBitsSet(regs); int16_t stack_offset = num_to_push * kDoubleSize; @@ -868,7 +868,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) { void MacroAssembler::MultiPushReversedFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t num_to_push = NumberOfBitsSet(regs); int16_t stack_offset = num_to_push * kDoubleSize; @@ -883,7 +883,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) { void MacroAssembler::MultiPopFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -897,7 +897,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) { void MacroAssembler::MultiPopReversedFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t stack_offset = 0; for (int16_t i = kNumRegisters - 1; i >= 0; i--) { @@ -1165,7 +1165,7 @@ void MacroAssembler::BranchF(Label* target, void MacroAssembler::Move(FPURegister dst, double imm) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); @@ -1345,7 +1345,7 @@ void MacroAssembler::ConvertToInt32(Register source, } bind(&right_exponent); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); // MIPS FPU instructions implementing double precision to integer // conversion using round to zero. Since the FP value was qualified // above, the resulting integer should be a legal int32. @@ -1406,7 +1406,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, ASSERT(!except_flag.is(scratch)); ASSERT(CpuFeatures::IsSupported(FPU)); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); Label done; // Clear the except flag (0 = no exception) @@ -1548,7 +1548,7 @@ void MacroAssembler::EmitECMATruncate(Register result, Register scratch, Register scratch2, Register scratch3) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); ASSERT(!scratch2.is(result)); ASSERT(!scratch3.is(result)); ASSERT(!scratch3.is(scratch2)); @@ -3484,7 +3484,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, scratch4, f2); if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); sdc1(f0, MemOperand(scratch1, 0)); } else { sw(mantissa_reg, MemOperand(scratch1, 0)); @@ -3577,7 +3577,7 @@ void MacroAssembler::CheckMap(Register obj, void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (IsMipsSoftFloatABI) { Move(dst, v0, v1); } else { @@ -3587,7 +3587,7 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { Move(f12, dreg); } else { @@ -3598,7 +3598,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { if (dreg2.is(f12)) { ASSERT(!dreg1.is(f14)); @@ -3617,7 +3617,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, Register reg) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { Move(f12, dreg); Move(a2, reg); @@ -4632,7 +4632,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); // The stack must be allign to 0 modulo 8 for stores with sdc1. ASSERT(kDoubleSize == frame_alignment); if (frame_alignment > 0) { @@ -4670,7 +4670,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool do_return) { // Optionally restore all double registers. if (save_doubles) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); // Remember: we only need to restore every 2nd double FPU value. lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc index 72e12f5..6247510 100644 --- a/src/mips/stub-cache-mips.cc +++ b/src/mips/stub-cache-mips.cc @@ -998,7 +998,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(ival, f0); __ cvt_s_w(f0, f0); __ sll(scratch1, wordoffset, 2); @@ -2108,7 +2108,7 @@ Handle CallStubCompiler::CompileMathFloorCall( return Handle::null(); } - CpuFeatures::Scope scope_fpu(FPU); + CpuFeatureScope scope_fpu(masm(), FPU); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -3353,7 +3353,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, FPURegister double_scratch1, Label* fail) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label key_ok; // Check for smi or a smi inside a heap number. We convert the heap // number and check if the conversion is exact and fits into the smi @@ -3489,7 +3489,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent. t0, f2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); __ sdc1(f0, MemOperand(a3, 0)); } else { __ sw(t2, MemOperand(a3, 0)); @@ -3527,7 +3527,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // reproducible behavior, convert these to zero. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset)); diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index 0ac0862..97b9075 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -43,7 +43,7 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures; -uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; ExternalReference ExternalReference::cpu_features() { @@ -108,7 +108,7 @@ void CpuFeatures::Probe() { __ bind(&cpuid); __ movl(rax, Immediate(1)); supported_ = kDefaultCpuFeatures | (1 << CPUID); - { Scope fscope(CPUID); + { CpuFeatureScope fscope(&assm, CPUID); __ cpuid(); // Move the result from ecx:edx to rdi. __ movl(rdi, rdx); // Zero-extended to 64 bits. @@ -143,12 +143,13 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST(reinterpret_cast
(memory->address())); - supported_ = probe(); - found_by_runtime_probing_ = supported_; - found_by_runtime_probing_ &= ~kDefaultCpuFeatures; - uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); - supported_ |= os_guarantees; - found_by_runtime_probing_ &= ~os_guarantees; + + uint64_t probed_features = probe(); + uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); + supported_ = probed_features | platform_features; + found_by_runtime_probing_only_ + = probed_features & ~kDefaultCpuFeatures & ~platform_features; + // SSE2 and CMOV must be available on an X64 CPU. ASSERT(IsSupported(CPUID)); ASSERT(IsSupported(SSE2)); @@ -978,7 +979,7 @@ void Assembler::cmpb_al(Immediate imm8) { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CPUID)); + ASSERT(IsEnabled(CPUID)); EnsureSpace ensure_space(this); emit(0x0F); emit(0xA2); @@ -2218,7 +2219,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDB); @@ -2227,7 +2228,7 @@ void Assembler::fisttp_s(const Operand& adr) { void Assembler::fisttp_d(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDD); @@ -2943,7 +2944,7 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) { void Assembler::roundsd(XMMRegister dst, XMMRegister src, Assembler::RoundingMode mode) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); emit(0x66); emit_optional_rex_32(dst, src); diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index 69eeb8e..86ae057 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -442,10 +442,10 @@ class Operand BASE_EMBEDDED { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. // Example: -// if (CpuFeatures::IsSupported(SSE3)) { -// CpuFeatures::Scope fscope(SSE3); +// if (assembler->IsSupported(SSE3)) { +// CpuFeatureScope fscope(assembler, SSE3); // // Generate SSE3 floating point code. // } else { // // Generate standard x87 or SSE2 floating point code. @@ -465,57 +465,14 @@ class CpuFeatures : public AllStatic { if (f == CMOV && !FLAG_enable_cmov) return false; if (f == RDTSC && !FLAG_enable_rdtsc) return false; if (f == SAHF && !FLAG_enable_sahf) return false; - return (supported_ & (V8_UINT64_C(1) << f)) != 0; + return (supported_ & (static_cast(1) << f)) != 0; } -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - uint64_t enabled = isolate->enabled_cpu_features(); - return (enabled & (V8_UINT64_C(1) << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - uint64_t mask = V8_UINT64_C(1) << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = isolate_->enabled_cpu_features(); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - uint64_t old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; private: // Safe defaults include SSE2 and CMOV for X64. It is always available, if @@ -528,7 +485,7 @@ class CpuFeatures : public AllStatic { static bool initialized_; #endif static uint64_t supported_; - static uint64_t found_by_runtime_probing_; + static uint64_t found_by_runtime_probing_only_; friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index de47f6f..5eb15b7 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -3474,7 +3474,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { XMMRegister input_reg = ToDoubleRegister(instr->value()); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope(masm(), SSE4_1); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Deoptimize if minus zero. __ movq(output_reg, input_reg); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 22a7046..5a263ab 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -898,7 +898,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, } // R12 to r15 are callee save on all platforms. if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); @@ -913,7 +913,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion2, Register exclusion3) { if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(reg, Operand(rsp, i * kDoubleSize)); diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc index 14bcb1a..cfbdb12 100644 --- a/test/cctest/test-assembler-arm.cc +++ b/test/cctest/test-assembler-arm.cc @@ -252,7 +252,7 @@ TEST(4) { if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(&assm, VFP3); __ mov(ip, Operand(sp)); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); @@ -370,7 +370,7 @@ TEST(5) { Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(ARMv7)) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(&assm, ARMv7); // On entry, r0 = 0xAAAAAAAA = 0b10..10101010. __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555 __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11 @@ -407,7 +407,7 @@ TEST(6) { Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(ARMv7)) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(&assm, ARMv7); __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF. __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F. __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0. @@ -451,7 +451,7 @@ static void TestRoundingMode(VCVTTypes types, Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(&assm, VFP3); Label wrong_exception; @@ -655,7 +655,7 @@ TEST(8) { Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(&assm, VFP2); __ mov(ip, Operand(sp)); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); @@ -767,7 +767,7 @@ TEST(9) { Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(&assm, VFP2); __ mov(ip, Operand(sp)); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); @@ -883,7 +883,7 @@ TEST(10) { Assembler assm(isolate, NULL, 0); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(&assm, VFP2); __ mov(ip, Operand(sp)); __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); @@ -1070,7 +1070,7 @@ TEST(13) { if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(&assm, VFP3); __ stm(db_w, sp, r4.bit() | lr.bit()); diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc index 76eca0d..e92e48f 100644 --- a/test/cctest/test-assembler-ia32.cc +++ b/test/cctest/test-assembler-ia32.cc @@ -180,7 +180,7 @@ TEST(AssemblerIa323) { Assembler assm(isolate, buffer, sizeof buffer); CHECK(CpuFeatures::IsSupported(SSE2)); - { CpuFeatures::Scope fscope(SSE2); + { CpuFeatureScope fscope(&assm, SSE2); __ cvttss2si(eax, Operand(esp, 4)); __ ret(0); } @@ -216,7 +216,7 @@ TEST(AssemblerIa324) { Assembler assm(isolate, buffer, sizeof buffer); CHECK(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(&assm, SSE2); __ cvttsd2si(eax, Operand(esp, 4)); __ ret(0); @@ -269,12 +269,11 @@ TEST(AssemblerIa326) { if (!CpuFeatures::IsSupported(SSE2)) return; v8::HandleScope scope; - CHECK(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope fscope(SSE2); v8::internal::byte buffer[256]; Isolate* isolate = Isolate::Current(); Assembler assm(isolate, buffer, sizeof buffer); + CpuFeatureScope fscope(&assm, SSE2); __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); __ movdbl(xmm1, Operand(esp, 3 * kPointerSize)); __ addsd(xmm0, xmm1); @@ -316,11 +315,10 @@ TEST(AssemblerIa328) { if (!CpuFeatures::IsSupported(SSE2)) return; v8::HandleScope scope; - CHECK(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope fscope(SSE2); v8::internal::byte buffer[256]; Isolate* isolate = Isolate::Current(); Assembler assm(isolate, buffer, sizeof buffer); + CpuFeatureScope fscope(&assm, SSE2); __ mov(eax, Operand(esp, 4)); __ cvtsi2sd(xmm0, eax); // Copy xmm0 to st(0) using eight bytes of stack. diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc index 8d0a18e..58e18a6 100644 --- a/test/cctest/test-disasm-arm.cc +++ b/test/cctest/test-disasm-arm.cc @@ -424,7 +424,7 @@ TEST(Vfp) { SET_UP(); if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(&assm, VFP3); COMPARE(vmov(d0, r2, r3), "ec432b10 vmov d0, r2, r3"); COMPARE(vmov(r2, r3, d0), @@ -834,7 +834,7 @@ TEST(LoadStore) { "e7210002 str r0, [r1, -r2]!"); if (CpuFeatures::IsSupported(ARMv7)) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(&assm, ARMv7); COMPARE(ldrd(r0, r1, MemOperand(r1)), "e1c100d0 ldrd r0, [r1, #+0]"); COMPARE(ldrd(r2, r3, MemOperand(r3, 127)), diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc index 59d1e04..49e6738 100644 --- a/test/cctest/test-disasm-ia32.cc +++ b/test/cctest/test-disasm-ia32.cc @@ -108,12 +108,12 @@ TEST(DisasmIa320) { __ nop(); { CHECK(CpuFeatures::IsSupported(CPUID)); - CpuFeatures::Scope fscope(CPUID); + CpuFeatureScope fscope(&assm, CPUID); __ cpuid(); } { CHECK(CpuFeatures::IsSupported(RDTSC)); - CpuFeatures::Scope fscope(RDTSC); + CpuFeatureScope fscope(&assm, RDTSC); __ rdtsc(); } __ movsx_b(edx, ecx); @@ -369,7 +369,7 @@ TEST(DisasmIa320) { __ nop(); { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(&assm, SSE2); __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000)); __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000)); __ addsd(xmm1, xmm0); @@ -391,7 +391,7 @@ TEST(DisasmIa320) { // cmov. { if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope use_cmov(CMOV); + CpuFeatureScope use_cmov(&assm, CMOV); __ cmov(overflow, eax, Operand(eax, 0)); __ cmov(no_overflow, eax, Operand(eax, 1)); __ cmov(below, eax, Operand(eax, 2)); @@ -414,7 +414,7 @@ TEST(DisasmIa320) { // andpd, cmpltsd, movaps, psllq, psrlq, por. { if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(&assm, SSE2); __ andpd(xmm0, xmm1); __ andpd(xmm1, xmm2); @@ -444,7 +444,7 @@ TEST(DisasmIa320) { { if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope(&assm, SSE4_1); __ pextrd(eax, xmm0, 1); __ pinsrd(xmm1, eax, 0); } -- 2.7.4