From 78b09625d5f1856d6dbd78fb4449957dfc47f6c4 Mon Sep 17 00:00:00 2001 From: "danno@chromium.org" Date: Mon, 3 Dec 2012 15:51:05 +0000 Subject: [PATCH] Enable stub generation using Hydrogen/Lithium (again) This initial implementation generates only KeyedLoadICs using the new Hydrogen stub infrastructure. Committed: https://code.google.com/p/v8/source/detail?r=13105 Review URL: https://codereview.chromium.org/10701054 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13117 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- Makefile | 6 + src/arm/assembler-arm-inl.h | 9 + src/arm/assembler-arm.cc | 27 + src/arm/assembler-arm.h | 124 +++- src/arm/builtins-arm.cc | 20 + src/arm/code-stubs-arm.cc | 56 +- src/arm/code-stubs-arm.h | 26 +- src/arm/codegen-arm.cc | 16 +- src/arm/codegen-arm.h | 14 +- src/arm/deoptimizer-arm.cc | 138 +++- src/arm/lithium-arm.cc | 32 +- src/arm/lithium-arm.h | 10 +- src/arm/lithium-codegen-arm.cc | 444 +++++++++---- src/arm/lithium-codegen-arm.h | 34 +- src/arm/lithium-gap-resolver-arm.cc | 13 +- src/arm/macro-assembler-arm.cc | 35 +- src/arm/macro-assembler-arm.h | 18 +- src/arm/stub-cache-arm.cc | 495 +-------------- src/assembler.cc | 5 + src/assembler.h | 2 + src/ast.cc | 8 - src/ast.h | 47 +- src/builtins.h | 3 + src/code-stubs-hydrogen.cc | 137 ++++ src/code-stubs.cc | 99 ++- src/code-stubs.h | 163 +++-- src/codegen.cc | 33 +- src/compiler.cc | 60 +- src/compiler.h | 50 +- src/deoptimizer.cc | 67 +- src/deoptimizer.h | 15 +- src/disassembler.cc | 10 +- src/frames-inl.h | 12 +- src/frames.cc | 20 +- src/frames.h | 29 +- src/full-codegen.cc | 1 + src/full-codegen.h | 6 +- src/hydrogen.cc | 1128 ++++++++++++++++++--------------- src/hydrogen.h | 146 +++-- src/ia32/assembler-ia32.cc | 30 +- src/ia32/assembler-ia32.h | 107 +++- src/ia32/builtins-ia32.cc | 19 + src/ia32/code-stubs-ia32.cc | 38 +- src/ia32/code-stubs-ia32.h | 20 +- src/ia32/deoptimizer-ia32.cc | 121 +++- src/ia32/lithium-codegen-ia32.cc | 567 ++++++++++++----- src/ia32/lithium-codegen-ia32.h | 38 +- src/ia32/lithium-gap-resolver-ia32.cc | 52 +- src/ia32/lithium-gap-resolver-ia32.h | 4 +- src/ia32/lithium-ia32.cc | 51 +- src/ia32/lithium-ia32.h | 23 +- src/ia32/macro-assembler-ia32.cc | 3 +- src/ia32/macro-assembler-ia32.h | 4 +- src/ia32/stub-cache-ia32.cc | 265 +------- src/ic.cc | 8 +- src/isolate.cc | 22 +- src/isolate.h | 6 + src/lithium-allocator.cc | 45 +- src/lithium-allocator.h | 4 +- src/lithium.cc | 4 +- src/lithium.h | 2 +- src/log.cc | 1 + src/mips/codegen-mips.h | 2 + src/objects-inl.h | 10 +- src/objects.cc | 9 +- src/objects.h | 5 + src/optimizing-compiler-thread.h | 2 +- src/prettyprinter.cc | 1 + src/prettyprinter.h | 2 + src/rewriter.cc | 6 +- src/runtime.cc | 15 +- src/runtime.h | 1 + src/safepoint-table.cc | 11 +- src/serialize.cc | 12 + src/serialize.h | 6 +- src/smart-pointers.h | 12 +- src/spaces.cc | 1 + src/stub-cache.h | 7 - src/utils.h | 6 +- src/x64/assembler-x64.cc | 3 +- src/x64/assembler-x64.h | 20 +- src/x64/builtins-x64.cc | 25 +- src/x64/code-stubs-x64.cc | 18 + src/x64/code-stubs-x64.h | 20 +- src/x64/codegen-x64.h | 6 + src/x64/deoptimizer-x64.cc | 90 ++- src/x64/lithium-codegen-x64.cc | 182 ++++-- src/x64/lithium-codegen-x64.h | 24 +- src/x64/lithium-x64.cc | 26 +- src/x64/lithium-x64.h | 10 +- src/x64/macro-assembler-x64.cc | 4 +- src/x64/macro-assembler-x64.h | 4 +- src/x64/stub-cache-x64.cc | 243 +------ test/cctest/test-mark-compact.cc | 2 +- test/mjsunit/fuzz-natives-part1.js | 1 + tools/gyp/v8.gyp | 1 + 96 files changed, 3391 insertions(+), 2388 deletions(-) create mode 100644 src/code-stubs-hydrogen.cc diff --git a/Makefile b/Makefile index b65ea4c..cf94838 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,12 @@ endif ifeq ($(liveobjectlist), on) GYPFLAGS += -Dv8_use_liveobjectlist=true endif +# vfp2=off +ifeq ($(vfp2), off) + GYPFLAGS += -Dv8_can_use_vfp2_instructions=false +else + GYPFLAGS += -Dv8_can_use_vfp2_instructions=true +endif # vfp3=off ifeq ($(vfp3), off) GYPFLAGS += -Dv8_can_use_vfp3_instructions=false diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h index acd61fe..7b79542 100644 --- a/src/arm/assembler-arm-inl.h +++ b/src/arm/assembler-arm-inl.h @@ -47,6 +47,15 @@ namespace v8 { namespace internal { +ArmDoubleRegister ArmDoubleRegister::FromAllocationIndex(int index) { + if (CpuFeatures::IsSupported(VFP2)) { + return DwVfpRegister::FromAllocationIndex(index); + } else { + return SoftFloatRegister::FromAllocationIndex(index); + } +} + + int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { ASSERT(!reg.is(kDoubleRegZero)); ASSERT(!reg.is(kScratchDoubleReg)); diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index 47ea0e2..42990f6 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -85,6 +85,33 @@ static unsigned CpuFeaturesImpliedByCompiler() { } +int Register::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(VFP2)) { + return kMaxNumAllocatableRegisters; + } else { + return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double; + } +} + + +int DoubleRegister::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(VFP2)) { + return DwVfpRegister::kMaxNumAllocatableRegisters; + } else { + return SoftFloatRegister::kMaxNumAllocatableRegisters; + } +} + + +const char* DoubleRegister::AllocationIndexToString(int index) { + if (CpuFeatures::IsSupported(VFP2)) { + return DwVfpRegister::AllocationIndexToString(index); + } else { + return SoftFloatRegister::AllocationIndexToString(index); + } +} + + void CpuFeatures::Probe() { unsigned standard_features = static_cast( OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 3b9bb80..18c1376 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -71,21 +71,23 @@ namespace internal { // Core register struct Register { static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 8; + static const int kMaxNumAllocatableRegisters = 8; + static const int kGPRsPerNonVFP2Double = 2; + static int NumAllocatableRegisters(); static const int kSizeInBytes = 4; static int ToAllocationIndex(Register reg) { - ASSERT(reg.code() < kNumAllocatableRegisters); + ASSERT(reg.code() < NumAllocatableRegisters()); return reg.code(); } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < NumAllocatableRegisters()); return from_code(index); } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < NumAllocatableRegisters()); const char* const names[] = { "r0", "r1", @@ -188,26 +190,57 @@ struct SwVfpRegister { }; -// Double word VFP register. -struct DwVfpRegister { - static const int kNumRegisters = 16; +struct ArmDoubleRegister { + static const int kMaxNumRegisters = 16; // A few double registers are reserved: one as a scratch register and one to // hold 0.0, that does not fit in the immediate field of vmov instructions. // d14: 0.0 // d15: scratch register. static const int kNumReservedRegisters = 2; - static const int kNumAllocatableRegisters = kNumRegisters - + static const int kMaxNumAllocatableRegisters = kMaxNumRegisters - kNumReservedRegisters; + explicit ArmDoubleRegister(int code) { code_ = code; } + static int NumAllocatableRegisters(); + static int NumRegisters() { return kNumRegisters; } + static const char* AllocationIndexToString(int index); + inline static ArmDoubleRegister FromAllocationIndex(int index); + inline static int ToAllocationIndex(ArmDoubleRegister reg) { + return reg.code(); + } + + static ArmDoubleRegister from_code(int code) { + ArmDoubleRegister r = ArmDoubleRegister(code); + return r; + } + + bool is_valid() const { + return 0 <= code_ && code_ < NumRegisters(); + } + bool is(ArmDoubleRegister reg) const { return code_ == reg.code_; } + int code() const { + ASSERT(is_valid()); + return code_; + } + + int code_; +}; + + +// Double word VFP register. +struct DwVfpRegister : ArmDoubleRegister { + static const int kNumRegisters = 16; - inline static int ToAllocationIndex(DwVfpRegister reg); + explicit DwVfpRegister(int code) : ArmDoubleRegister(code) {} + + inline int ToAllocationIndex(DwVfpRegister reg); static DwVfpRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index); } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "d0", "d1", @@ -228,8 +261,7 @@ struct DwVfpRegister { } static DwVfpRegister from_code(int code) { - DwVfpRegister r = { code }; - return r; + return DwVfpRegister(code); } // Supporting d0 to d15, can be later extended to d31. @@ -262,12 +294,37 @@ struct DwVfpRegister { *m = (code_ & 0x10) >> 4; *vm = code_ & 0x0F; } +}; - int code_; + +// Double word VFP register. +struct SoftFloatRegister : ArmDoubleRegister { + static const int kNumRegisters = 1; + static const int kMaxNumAllocatableRegisters = kNumRegisters; + + explicit SoftFloatRegister(int code) : ArmDoubleRegister(code) {} + + static SoftFloatRegister FromAllocationIndex(int index) { + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + return from_code(index); + } + + static const char* AllocationIndexToString(int index) { + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + const char* const names[] = { + "sfpd0" + }; + return names[index]; + } + + static SoftFloatRegister from_code(int code) { + SoftFloatRegister r = SoftFloatRegister(code); + return r; + } }; -typedef DwVfpRegister DoubleRegister; +typedef ArmDoubleRegister DoubleRegister; // Support for the VFP registers s0 to s31 (d0 to d15). @@ -305,23 +362,26 @@ const SwVfpRegister s29 = { 29 }; const SwVfpRegister s30 = { 30 }; const SwVfpRegister s31 = { 31 }; -const DwVfpRegister no_dreg = { -1 }; -const DwVfpRegister d0 = { 0 }; -const DwVfpRegister d1 = { 1 }; -const DwVfpRegister d2 = { 2 }; -const DwVfpRegister d3 = { 3 }; -const DwVfpRegister d4 = { 4 }; -const DwVfpRegister d5 = { 5 }; -const DwVfpRegister d6 = { 6 }; -const DwVfpRegister d7 = { 7 }; -const DwVfpRegister d8 = { 8 }; -const DwVfpRegister d9 = { 9 }; -const DwVfpRegister d10 = { 10 }; -const DwVfpRegister d11 = { 11 }; -const DwVfpRegister d12 = { 12 }; -const DwVfpRegister d13 = { 13 }; -const DwVfpRegister d14 = { 14 }; -const DwVfpRegister d15 = { 15 }; +const DwVfpRegister no_dreg = DwVfpRegister(-1); +const DwVfpRegister d0 = DwVfpRegister(0); +const DwVfpRegister d1 = DwVfpRegister(1); +const DwVfpRegister d2 = DwVfpRegister(2); +const DwVfpRegister d3 = DwVfpRegister(3); +const DwVfpRegister d4 = DwVfpRegister(4); +const DwVfpRegister d5 = DwVfpRegister(5); +const DwVfpRegister d6 = DwVfpRegister(6); +const DwVfpRegister d7 = DwVfpRegister(7); +const DwVfpRegister d8 = DwVfpRegister(8); +const DwVfpRegister d9 = DwVfpRegister(9); +const DwVfpRegister d10 = DwVfpRegister(10); +const DwVfpRegister d11 = DwVfpRegister(11); +const DwVfpRegister d12 = DwVfpRegister(12); +const DwVfpRegister d13 = DwVfpRegister(13); +const DwVfpRegister d14 = DwVfpRegister(14); +const DwVfpRegister d15 = DwVfpRegister(15); + +const Register sfpd_lo = { kRegister_r6_Code }; +const Register sfpd_hi = { kRegister_r7_Code }; // Aliases for double registers. Defined using #define instead of // "static const DwVfpRegister&" because Clang complains otherwise when a diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 24d14e8..28e00dd 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -1259,6 +1259,26 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR +void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); + // Pass the function and deoptimization type to the runtime system. + __ CallRuntime(Runtime::kNotifyICMiss, 0); + __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); + } + + __ mov(ip, lr); // Stash the miss continuation + __ add(sp, sp, Operand(kPointerSize)); // Ignore state + __ pop(lr); // Restore LR to continuation in JSFunction + __ mov(pc, ip); // Jump to miss handler +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 9484f85..800c0f1 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -37,6 +37,23 @@ namespace v8 { namespace internal { +CodeStubInterfaceDescriptor* + KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) { + static CodeStubInterfaceDescriptor* result = NULL; + if (result == NULL) { + Handle miss = isolate->builtins()->KeyedLoadIC_Miss(); + static Register registers[] = { r1, r0 }; + static CodeStubInterfaceDescriptor info = { + 2, + registers, + miss + }; + result = &info; + } + return result; +} + + #define __ ACCESS_MASM(masm) static void EmitIdenticalObjectComparison(MacroAssembler* masm, @@ -503,7 +520,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a // scratch register. Destroys the source register. No GC occurs during this // stub so you don't have to set up the frame. -class ConvertToDoubleStub : public CodeStub { +class ConvertToDoubleStub : public PlatformCodeStub { public: ConvertToDoubleStub(Register result_reg_1, Register result_reg_2, @@ -3568,10 +3585,10 @@ void MathPowStub::Generate(MacroAssembler* masm) { const Register exponent = r2; const Register heapnumbermap = r5; const Register heapnumber = r0; - const DoubleRegister double_base = d1; - const DoubleRegister double_exponent = d2; - const DoubleRegister double_result = d3; - const DoubleRegister double_scratch = d0; + const DwVfpRegister double_base = d1; + const DwVfpRegister double_exponent = d2; + const DwVfpRegister double_result = d3; + const DwVfpRegister double_scratch = d0; const SwVfpRegister single_scratch = s0; const Register scratch = r9; const Register scratch2 = r7; @@ -3781,12 +3798,29 @@ void CodeStub::GenerateStubsAheadOfTime() { void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - StoreBufferOverflowStub stub(kSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); + SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub save_doubles(1, mode); + StoreBufferOverflowStub stub(mode); + // These stubs might already be in the snapshot, detect that and don't + // regenerate, which would lead to code stub initialization state being messed + // up. + Code* save_doubles_code = NULL; + Code* store_buffer_overflow_code = NULL; + if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope2(VFP2); + save_doubles_code = *save_doubles.GetCode(); + store_buffer_overflow_code = *stub.GetCode(); + } else { + save_doubles_code = *save_doubles.GetCode(); + store_buffer_overflow_code = *stub.GetCode(); + } + save_doubles_code->set_is_pregenerated(true); + store_buffer_overflow_code->set_is_pregenerated(true); + } + ISOLATE->set_fp_stubs_generated(true); } diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index 0443cf7..6f964a8 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -36,7 +36,7 @@ namespace internal { // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { +class TranscendentalCacheStub: public PlatformCodeStub { public: enum ArgumentType { TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, @@ -58,7 +58,7 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { +class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) : save_doubles_(save_fp) { } @@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public CodeStub { }; -class UnaryOpStub: public CodeStub { +class UnaryOpStub: public PlatformCodeStub { public: UnaryOpStub(Token::Value op, UnaryOverwriteMode mode, @@ -219,7 +219,7 @@ enum StringAddFlags { }; -class StringAddStub: public CodeStub { +class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -242,7 +242,7 @@ class StringAddStub: public CodeStub { }; -class SubStringStub: public CodeStub { +class SubStringStub: public PlatformCodeStub { public: SubStringStub() {} @@ -255,7 +255,7 @@ class SubStringStub: public CodeStub { -class StringCompareStub: public CodeStub { +class StringCompareStub: public PlatformCodeStub { public: StringCompareStub() { } @@ -295,7 +295,7 @@ class StringCompareStub: public CodeStub { // This stub can convert a signed int32 to a heap number (double). It does // not work for int32s that are in Smi range! No GC occurs during this stub // so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { +class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, @@ -329,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { }; -class NumberToStringStub: public CodeStub { +class NumberToStringStub: public PlatformCodeStub { public: NumberToStringStub() { } @@ -355,7 +355,7 @@ class NumberToStringStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { +class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, Register value, @@ -511,7 +511,7 @@ class RecordWriteStub: public CodeStub { Register GetRegThatIsNotOneOf(Register r1, Register r2, Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { Register candidate = Register::FromAllocationIndex(i); if (candidate.is(r1)) continue; if (candidate.is(r2)) continue; @@ -570,7 +570,7 @@ class RecordWriteStub: public CodeStub { // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. -class RegExpCEntryStub: public CodeStub { +class RegExpCEntryStub: public PlatformCodeStub { public: RegExpCEntryStub() {} virtual ~RegExpCEntryStub() {} @@ -589,7 +589,7 @@ class RegExpCEntryStub: public CodeStub { // keep the code which called into native pinned in the memory. Currently the // simplest approach is to generate such stub early enough so it can never be // moved by GC -class DirectCEntryStub: public CodeStub { +class DirectCEntryStub: public PlatformCodeStub { public: DirectCEntryStub() {} void Generate(MacroAssembler* masm); @@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic { }; -class StringDictionaryLookupStub: public CodeStub { +class StringDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 300772a..a2762f8 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -73,10 +73,10 @@ UnaryMathFunction CreateExpFunction() { { CpuFeatures::Scope use_vfp(VFP2); - DoubleRegister input = d0; - DoubleRegister result = d1; - DoubleRegister double_scratch1 = d2; - DoubleRegister double_scratch2 = d3; + DwVfpRegister input = d0; + DwVfpRegister result = d1; + DwVfpRegister double_scratch1 = d2; + DwVfpRegister double_scratch2 = d3; Register temp1 = r4; Register temp2 = r5; Register temp3 = r6; @@ -527,10 +527,10 @@ static MemOperand ExpConstant(int index, Register base) { void MathExpGenerator::EmitMathExp(MacroAssembler* masm, - DoubleRegister input, - DoubleRegister result, - DoubleRegister double_scratch1, - DoubleRegister double_scratch2, + DwVfpRegister input, + DwVfpRegister result, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3) { diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h index 8f0033e..75899a9 100644 --- a/src/arm/codegen-arm.h +++ b/src/arm/codegen-arm.h @@ -44,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; class CodeGenerator: public AstVisitor { public: + CodeGenerator() { + InitializeAstVisitor(); + } + static bool MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. @@ -68,6 +72,8 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; @@ -92,10 +98,10 @@ class StringCharLoadGenerator : public AllStatic { class MathExpGenerator : public AllStatic { public: static void EmitMathExp(MacroAssembler* masm, - DoubleRegister input, - DoubleRegister result, - DoubleRegister double_scratch1, - DoubleRegister double_scratch2, + DwVfpRegister input, + DwVfpRegister result, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3); diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index 19667b9..8db156d 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -222,7 +222,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { void Deoptimizer::DoComputeOsrOutputFrame() { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned ast_id = data->OsrAstId()->value(); int bailout_id = LookupBailoutId(data, BailoutId(ast_id)); @@ -256,7 +256,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned input_frame_size = input_->GetFrameSize(); ASSERT(fixed_size + height_in_bytes == input_frame_size); - unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize; unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_size = outgoing_height * kPointerSize; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; @@ -348,7 +348,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned pc_offset = data->OsrPcOffset()->value(); uint32_t pc = reinterpret_cast( - optimized_code_->entry() + pc_offset); + compiled_code_->entry() + pc_offset); output_[0]->SetPc(pc); } Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); @@ -461,6 +461,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, } +void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator, + int frame_index) { + // + // FROM TO <-fp + // | .... | | .... | + // +-------------------------+ +-------------------------+ + // | JSFunction continuation | | JSFunction continuation | + // +-------------------------+ +-------------------------+<-sp + // | | saved frame (fp) | + // | +=========================+<-fp + // | | JSFunction context | + // v +-------------------------+ + // | COMPILED_STUB marker | fp = saved frame + // +-------------------------+ f8 = JSFunction context + // | | + // | ... | + // | | + // +-------------------------+<-sp + // + // + int output_frame_size = 1 * kPointerSize; + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, 0); + Code* notify_miss = + isolate_->builtins()->builtin(Builtins::kNotifyICMiss); + output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); + output_frame->SetContinuation( + reinterpret_cast(notify_miss->entry())); + + ASSERT(compiled_code_->kind() == Code::COMPILED_STUB); + int major_key = compiled_code_->major_key(); + CodeStubInterfaceDescriptor* descriptor = + isolate_->code_stub_interface_descriptors()[major_key]; + Handle miss_ic(descriptor->deoptimization_handler); + output_frame->SetPc(reinterpret_cast(miss_ic->instruction_start())); + unsigned input_frame_size = input_->GetFrameSize(); + intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize); + output_frame->SetFrameSlot(0, value); + value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize); + output_frame->SetRegister(fp.code(), value); + output_frame->SetFp(value); + value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize); + output_frame->SetRegister(cp.code(), value); + + Translation::Opcode opcode = + static_cast(iterator->Next()); + ASSERT(opcode == Translation::REGISTER); + USE(opcode); + int input_reg = iterator->Next(); + intptr_t input_value = input_->GetRegister(input_reg); + output_frame->SetRegister(r1.code(), input_value); + + int32_t next = iterator->Next(); + opcode = static_cast(next); + ASSERT(opcode == Translation::REGISTER); + input_reg = iterator->Next(); + input_value = input_->GetRegister(input_reg); + output_frame->SetRegister(r0.code(), input_value); + + ASSERT(frame_index == 0); + output_[frame_index] = output_frame; +} + + void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, int frame_index) { Builtins* builtins = isolate_->builtins(); @@ -888,7 +952,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } input_->SetRegister(sp.code(), reinterpret_cast(frame->sp())); input_->SetRegister(fp.code(), reinterpret_cast(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { input_->SetDoubleRegister(i, 0.0); } @@ -908,7 +972,6 @@ void Deoptimizer::EntryGenerator::Generate() { Isolate* isolate = masm()->isolate(); - CpuFeatures::Scope scope(VFP3); // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -916,23 +979,29 @@ void Deoptimizer::EntryGenerator::Generate() { RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); const int kDoubleRegsSize = - kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; - - // Save all VFP registers before messing with them. - DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0); - DwVfpRegister last = - DwVfpRegister::FromAllocationIndex( - DwVfpRegister::kNumAllocatableRegisters - 1); - ASSERT(last.code() > first.code()); - ASSERT((last.code() - first.code()) == - (DwVfpRegister::kNumAllocatableRegisters - 1)); + kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; + + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + // Save all VFP registers before messing with them. + DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0); + DwVfpRegister last = + DwVfpRegister::FromAllocationIndex( + DwVfpRegister::kMaxNumAllocatableRegisters - 1); + ASSERT(last.code() > first.code()); + ASSERT((last.code() - first.code()) == + (DwVfpRegister::kMaxNumAllocatableRegisters - 1)); #ifdef DEBUG - for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) { - ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) && - (DwVfpRegister::FromAllocationIndex(i).code() >= first.code())); - } + int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1; + for (int i = 0; i <= max; i++) { + ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) && + (DwVfpRegister::FromAllocationIndex(i).code() >= first.code())); + } #endif - __ vstm(db_w, sp, first, last); + __ vstm(db_w, sp, first, last); + } else { + __ sub(sp, sp, Operand(kDoubleRegsSize)); + } // Push all 16 registers (needed to populate FrameDescription::registers_). // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps @@ -991,14 +1060,17 @@ void Deoptimizer::EntryGenerator::Generate() { __ str(r2, MemOperand(r1, offset)); } - // Copy VFP registers to - // double_registers_[DoubleRegister::kNumAllocatableRegisters] - int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ vldr(d0, sp, src_offset); - __ vstr(d0, r1, dst_offset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + // Copy VFP registers to + // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] + int double_regs_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ vldr(d0, sp, src_offset); + __ vstr(d0, r1, dst_offset); + } } // Remove the bailout id, eventually return address, and the saved registers @@ -1019,10 +1091,13 @@ void Deoptimizer::EntryGenerator::Generate() { // frame description. __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); Label pop_loop; + Label pop_loop_header; + __ b(&pop_loop_header); __ bind(&pop_loop); __ pop(r4); __ str(r4, MemOperand(r3, 0)); __ add(r3, r3, Operand(sizeof(uint32_t))); + __ bind(&pop_loop_header); __ cmp(r2, sp); __ b(ne, &pop_loop); @@ -1039,24 +1114,29 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. - Label outer_push_loop, inner_push_loop; + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; // Outer loop state: r0 = current "FrameDescription** output_", // r1 = one past the last FrameDescription**. __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_. __ add(r1, r0, Operand(r1, LSL, 2)); + __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: r2 = current FrameDescription*, r3 = loop index. __ ldr(r2, MemOperand(r0, 0)); // output_[ix] __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); __ bind(&inner_push_loop); __ sub(r3, r3, Operand(sizeof(uint32_t))); __ add(r6, r2, Operand(r3)); __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); __ push(r7); + __ bind(&inner_loop_header); __ cmp(r3, Operand(0)); __ b(ne, &inner_push_loop); // test for gt? __ add(r0, r0, Operand(kPointerSize)); + __ bind(&outer_loop_header); __ cmp(r0, r1); __ b(lt, &outer_push_loop); diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc index 32dda27..400e1fc 100644 --- a/src/arm/lithium-arm.cc +++ b/src/arm/lithium-arm.cc @@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) #undef DEFINE_COMPILE LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { register_spills_[i] = NULL; } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { double_register_spills_[i] = NULL; } } @@ -612,6 +612,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); #ifdef DEBUG instr->VerifyCall(); #endif @@ -1684,6 +1685,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation to = instr->to(); if (from.IsTagged()) { if (to.IsDouble()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1708,6 +1710,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } } else if (from.IsDouble()) { if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1727,6 +1730,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); if (to.IsTagged()) { HValue* val = instr->value(); LOperand* value = UseRegisterAtStart(val); @@ -1964,7 +1968,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { (instr->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - LOperand* external_pointer = UseRegister(instr->elements()); + // float->double conversion on non-VFP2 requires an extra scratch + // register. For convenience, just mark the elements register as "UseTemp" + // so that it can be used as a temp during the float->double conversion + // after it's no longer needed after the float load. + bool needs_temp = + !CpuFeatures::IsSupported(VFP2) && + (elements_kind == EXTERNAL_FLOAT_ELEMENTS); + LOperand* external_pointer = needs_temp + ? UseTempRegister(instr->elements()) + : UseRegister(instr->elements()); result = new(zone()) LLoadKeyed(external_pointer, key); } @@ -2182,8 +2195,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new(zone()) LParameter, spill_index); + LParameter* result = new(zone()) LParameter; + if (info()->IsOptimizing()) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + ASSERT(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + Register reg = descriptor->register_params[instr->index()]; + return DefineFixed(result, reg); + } } diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h index b45a3e0..3a9d10b 100644 --- a/src/arm/lithium-arm.h +++ b/src/arm/lithium-arm.h @@ -255,6 +255,11 @@ class LInstruction: public ZoneObject { void MarkAsCall() { is_call_ = true; } // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + bool ClobbersDoubleRegisters() const { return is_call_; } + + // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } virtual bool HasResult() const = 0; @@ -2334,8 +2339,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { // slot, i.e., that must also be restored to the spill slot on OSR entry. // NULL if the register has no assigned spill slot. Indexed by allocation // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; + LOperand* register_spills_[Register::kMaxNumAllocatableRegisters]; + LOperand* double_register_spills_[ + DoubleRegister::kMaxNumAllocatableRegisters]; }; diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 515a0d0..1c9d0c4 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -65,8 +65,6 @@ bool LCodeGen::GenerateCode() { HPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - CpuFeatures::Scope scope1(VFP3); - CpuFeatures::Scope scope2(ARMv7); CodeStub::GenerateFPStubs(); @@ -118,37 +116,38 @@ void LCodeGen::Comment(const char* format, ...) { bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); - ProfileEntryHookStub::MaybeCallEntryHook(masm_); + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ stop("stop_at"); - } + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + __ stop("stop_at"); + } #endif - // r1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. + // r1: Callee's JS function. + // cp: Callee's context. + // fp: Caller's frame pointer. + // lr: Caller's pc. - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). r5 is zero for method calls and non-zero for - // function calls. - if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; - __ cmp(r5, Operand(0)); - __ b(eq, &ok); - int receiver_offset = scope()->num_parameters() * kPointerSize; - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); + // Strict mode functions and builtins need to replace the receiver + // with undefined when called as functions (without an explicit + // receiver object). r5 is zero for method calls and non-zero for + // function calls. + if (!info_->is_classic_mode() || info_->is_native()) { + Label ok; + __ cmp(r5, Operand(0)); + __ b(eq, &ok); + int receiver_offset = scope()->num_parameters() * kPointerSize; + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ str(r2, MemOperand(sp, receiver_offset)); + __ bind(&ok); + } } - info()->set_prologue_offset(masm_->pc_offset()); - { + if (NeedsEagerFrame()) { PredictableCodeSizeScope predictible_code_size_scope( masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); // The following three instructions must remain together and unmodified @@ -159,6 +158,7 @@ bool LCodeGen::GeneratePrologue() { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); // Adjust FP to point to saved FP. __ add(fp, sp, Operand(2 * kPointerSize)); + frame_is_built_ = true; } // Reserve space for the stack slots needed by the code. @@ -178,7 +178,7 @@ bool LCodeGen::GeneratePrologue() { } // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is in r1. @@ -214,7 +214,7 @@ bool LCodeGen::GeneratePrologue() { } // Trace the call. - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); @@ -272,10 +272,31 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred build frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(!frame_is_built_); + ASSERT(info()->IsStub()); + frame_is_built_ = true; + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + } Comment(";;; Deferred code @%d: %s.", code->instruction_index(), code->instr()->Mnemonic()); code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred destroy frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(frame_is_built_); + __ pop(ip); + __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); + frame_is_built_ = false; + } __ jmp(code->exit()); } } @@ -297,24 +318,68 @@ bool LCodeGen::GenerateDeoptJumpTable() { // Each entry in the jump table generates one instruction and inlines one // 32bit data after it. if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - deopt_jump_table_.length() * 2)) { + deopt_jump_table_.length() * 7)) { Abort("Generated code is too large"); } - // Block the constant pool emission during the jump table emission. - __ BlockConstPoolFor(deopt_jump_table_.length()); __ RecordComment("[ Deoptimisation jump table"); Label table_start; __ bind(&table_start); + Label needs_frame_not_call; + Label needs_frame_is_call; for (int i = 0; i < deopt_jump_table_.length(); i++) { __ bind(&deopt_jump_table_[i].label); - __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); - __ dd(reinterpret_cast(deopt_jump_table_[i].address)); + Address entry = deopt_jump_table_[i].address; + if (deopt_jump_table_[i].needs_frame) { + __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); + if (deopt_jump_table_[i].is_lazy_deopt) { + if (needs_frame_is_call.is_bound()) { + __ b(&needs_frame_is_call); + } else { + __ bind(&needs_frame_is_call); + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + __ mov(lr, Operand(pc), LeaveCC, al); + __ mov(pc, ip); + } + } else { + if (needs_frame_not_call.is_bound()) { + __ b(&needs_frame_not_call); + } else { + __ bind(&needs_frame_not_call); + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + __ mov(pc, ip); + } + } + } else { + if (deopt_jump_table_[i].is_lazy_deopt) { + __ mov(lr, Operand(pc), LeaveCC, al); + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); + } else { + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); + } + } + masm()->CheckConstPool(false, false); } - ASSERT(masm()->InstructionsGeneratedSince(&table_start) == - deopt_jump_table_.length() * 2); __ RecordComment("]"); + // Force constant pool emission at the end of the deopt jump table to make + // sure that no constant pools are emitted after. + masm()->CheckConstPool(true, false); + // The deoptimization jump table is the last part of the instruction // sequence. Mark the generated code as done unless we bailed out. if (!is_aborted()) status_ = DONE; @@ -334,8 +399,8 @@ Register LCodeGen::ToRegister(int index) const { } -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { - return DoubleRegister::FromAllocationIndex(index); +DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { + return DwVfpRegister::FromAllocationIndex(index); } @@ -376,15 +441,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { } -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { +DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { ASSERT(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch) { +DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DwVfpRegister dbl_scratch) { if (op->IsDoubleRegister()) { return ToDoubleRegister(op->index()); } else if (op->IsConstantOperand()) { @@ -520,7 +585,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, translation, arguments_index, arguments_count); - int closure_id = *info()->closure() != *environment->closure() + bool has_closure_id = !info()->closure().is_null() && + *info()->closure() != *environment->closure(); + int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -541,6 +608,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, ASSERT(height == 0); translation->BeginSetterStubFrame(closure_id); break; + case STUB: + translation->BeginCompiledStubFrame(); + break; case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; @@ -736,7 +806,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); + + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); if (entry == NULL) { Abort("bailout was not prepared"); return; @@ -752,14 +826,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); - if (cc == al) { + bool needs_lazy_deopt = info()->IsStub(); + ASSERT(info()->IsStub() || frame_is_built_); + if (cc == al && !needs_lazy_deopt) { __ Jump(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (deopt_jump_table_.is_empty() || - (deopt_jump_table_.last().address != entry)) { - deopt_jump_table_.Add(JumpTableEntry(entry), zone()); + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + deopt_jump_table_.Add(table_entry, zone()); } __ b(cc, &deopt_jump_table_.last().label); } @@ -1368,6 +1447,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, LOperand* left_argument, LOperand* right_argument, Token::Value op) { + CpuFeatures::Scope vfp_scope(VFP2); Register left = ToRegister(left_argument); Register right = ToRegister(right_argument); @@ -1653,6 +1733,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); + CpuFeatures::Scope scope(VFP2); double v = instr->value(); __ Vmov(result, v, scratch0()); } @@ -1821,9 +1902,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - DoubleRegister left_reg = ToDoubleRegister(left); - DoubleRegister right_reg = ToDoubleRegister(right); - DoubleRegister result_reg = ToDoubleRegister(instr->result()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister left_reg = ToDoubleRegister(left); + DwVfpRegister right_reg = ToDoubleRegister(right); + DwVfpRegister result_reg = ToDoubleRegister(instr->result()); Label check_nan_left, check_zero, return_left, return_right, done; __ VFPCompareAndSetFlags(left_reg, right_reg); __ b(vs, &check_nan_left); @@ -1866,9 +1948,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister left = ToDoubleRegister(instr->left()); + DwVfpRegister right = ToDoubleRegister(instr->right()); + DwVfpRegister result = ToDoubleRegister(instr->result()); switch (instr->op()) { case Token::ADD: __ vadd(result, left, right); @@ -1956,7 +2039,8 @@ void LCodeGen::DoBranch(LBranch* instr) { __ cmp(reg, Operand(0)); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - DoubleRegister reg = ToDoubleRegister(instr->value()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); // Test the double value. Zero and NaN are false. @@ -2041,8 +2125,9 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + CpuFeatures::Scope scope(VFP2); // heap number -> false iff +0, -0, or NaN. - DoubleRegister dbl_scratch = double_scratch0(); + DwVfpRegister dbl_scratch = double_scratch0(); Label not_heap_number; __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); __ b(ne, ¬_heap_number); @@ -2120,6 +2205,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { + CpuFeatures::Scope scope(VFP2); // Compare left and right operands as doubles and load the // resulting flags into the normal status register. __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2658,16 +2744,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // Push the return value on the stack as the parameter. // Runtime::TraceExit returns its parameter in r0. __ push(r0); __ CallRuntime(Runtime::kTraceExit, 1); } - int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; - __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); - __ add(sp, sp, Operand(sp_delta)); + if (NeedsEagerFrame()) { + int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); + __ add(sp, sp, Operand(sp_delta)); + } + if (info()->IsStub()) { + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } __ Jump(lr); } @@ -3017,17 +3108,63 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); DwVfpRegister result = ToDoubleRegister(instr->result()); Operand operand = key_is_constant ? Operand(constant_key << element_size_shift) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, result.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(result.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, result.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); + } + } else { + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + Register value = external_pointer; + __ ldr(value, MemOperand(scratch0(), additional_offset)); + __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask)); + + __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits)); + __ and_(scratch0(), scratch0(), + Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + + Label exponent_rebiased; + __ teq(scratch0(), Operand(0x00)); + __ b(eq, &exponent_rebiased); + + __ teq(scratch0(), Operand(0xff)); + __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq); + __ b(eq, &exponent_rebiased); + + // Rebias exponent. + __ add(scratch0(), + scratch0(), + Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); + + __ bind(&exponent_rebiased); + __ and_(sfpd_hi, value, Operand(kBinary32SignMask)); + __ orr(sfpd_hi, sfpd_hi, + Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord)); + + // Shift mantissa. + static const int kMantissaShiftForHiWord = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaShiftForLoWord = + kBitsPerInt - kMantissaShiftForHiWord; + + __ orr(sfpd_hi, sfpd_hi, + Operand(sfpd_lo, LSR, kMantissaShiftForHiWord)); + __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord)); + + } else { + __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset)); + __ ldr(sfpd_hi, MemOperand(scratch0(), + additional_offset + kPointerSize)); + } } } else { Register result = ToRegister(instr->result()); @@ -3096,23 +3233,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { key = ToRegister(instr->key()); } - Operand operand = key_is_constant - ? Operand(((constant_key + instr->additional_index()) << - element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(elements, elements, operand); + int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + ((constant_key + instr->additional_index()) << element_size_shift); if (!key_is_constant) { - __ add(elements, elements, - Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + - (instr->additional_index() << element_size_shift))); - } - - __ vldr(result, elements, 0); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + __ add(elements, elements, Operand(key, LSL, shift_size)); + } + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + __ add(elements, elements, Operand(base_offset)); + __ vldr(result, elements, 0); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } + } else { + __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); + __ ldr(sfpd_lo, MemOperand(elements, base_offset)); + if (instr->hydrogen()->RequiresHoleCheck()) { + ASSERT(kPointerSize == sizeof(kHoleNanLower32)); + __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } } } @@ -3548,6 +3690,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { + CpuFeatures::Scope scope(VFP2); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3584,7 +3727,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3609,7 +3753,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); Register scratch = scratch0(); @@ -3674,16 +3819,18 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); } void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = ToDoubleRegister(instr->temp()); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); + DwVfpRegister temp = ToDoubleRegister(instr->temp()); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity @@ -3702,6 +3849,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { + CpuFeatures::Scope scope(VFP2); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -3734,6 +3882,7 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { + CpuFeatures::Scope scope(VFP2); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -3812,10 +3961,11 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); - DoubleRegister double_scratch2 = double_scratch0(); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); + DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); + DwVfpRegister double_scratch2 = double_scratch0(); Register temp1 = ToRegister(instr->temp1()); Register temp2 = ToRegister(instr->temp2()); @@ -4101,6 +4251,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + CpuFeatures::Scope scope(VFP2); Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); @@ -4171,6 +4322,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + CpuFeatures::Scope scope(VFP2); DwVfpRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = no_reg; @@ -4447,6 +4599,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + CpuFeatures::Scope scope(VFP2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4464,6 +4617,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + CpuFeatures::Scope scope(VFP2); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4525,13 +4679,49 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { } +// Convert unsigned integer with specified number of leading zeroes in binary +// representation to IEEE 754 double. +// Integer to convert is passed in register hiword. +// Resulting double is returned in registers hiword:loword. +// This functions does not work correctly for 0. +static void GenerateUInt2Double(MacroAssembler* masm, + Register hiword, + Register loword, + Register scratch, + int leading_zeroes) { + const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; + const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; + + const int mantissa_shift_for_hi_word = + meaningful_bits - HeapNumber::kMantissaBitsInTopWord; + const int mantissa_shift_for_lo_word = + kBitsPerInt - mantissa_shift_for_hi_word; + masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); + if (mantissa_shift_for_hi_word > 0) { + masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); + masm->orr(hiword, scratch, + Operand(hiword, LSR, mantissa_shift_for_hi_word)); + } else { + masm->mov(loword, Operand(0, RelocInfo::NONE)); + masm->orr(hiword, scratch, + Operand(hiword, LSL, -mantissa_shift_for_hi_word)); + } + + // If least significant bit of biased exponent was not 1 it was corrupted + // by most significant bit of mantissa so we should fix that. + if (!(biased_exponent & 1)) { + masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); + } +} + + void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { Label slow; Register src = ToRegister(value); Register dst = ToRegister(instr->result()); - DoubleRegister dbl_scratch = double_scratch0(); + DwVfpRegister dbl_scratch = double_scratch0(); SwVfpRegister flt_scratch = dbl_scratch.low(); // Preserve the value of all registers. @@ -4546,16 +4736,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(src, dst); __ eor(src, src, Operand(0x80000000)); } - __ vmov(flt_scratch, src); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + __ vmov(flt_scratch, src); + __ vcvt_f64_s32(dbl_scratch, flt_scratch); + } else { + FloatingPointHelper::Destination dest = + FloatingPointHelper::kCoreRegisters; + FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0, + sfpd_lo, sfpd_hi, + scratch0(), s0); + } } else { - __ vmov(flt_scratch, src); - __ vcvt_f64_u32(dbl_scratch, flt_scratch); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + __ vmov(flt_scratch, src); + __ vcvt_f64_u32(dbl_scratch, flt_scratch); + } else { + Label no_leading_zero, done; + __ tst(src, Operand(0x80000000)); + __ b(ne, &no_leading_zero); + + // Integer has one leading zeros. + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1); + __ b(&done); + + __ bind(&no_leading_zero); + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0); + __ b(&done); + } } if (FLAG_inline_new) { - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); + __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT); __ Move(dst, r5); __ b(&done); } @@ -4575,7 +4789,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); + } else { + __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); + __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); + } __ add(dst, dst, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -4592,7 +4812,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { LNumberTagD* instr_; }; - DoubleRegister input_reg = ToDoubleRegister(instr->value()); + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); Register reg = ToRegister(instr->result()); Register temp1 = ToRegister(instr->temp()); @@ -4608,7 +4828,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - __ vstr(input_reg, reg, HeapNumber::kValueOffset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatures::Scope scope(VFP2); + __ vstr(input_reg, reg, HeapNumber::kValueOffset); + } else { + __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); + __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); + } // Now that we have finished with the object's real address tag it __ add(reg, reg, Operand(kHeapObjectTag)); } @@ -4649,13 +4875,14 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, - DoubleRegister result_reg, + DwVfpRegister result_reg, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, LEnvironment* env) { Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); + CpuFeatures::Scope scope(VFP2); Label load_smi, heap_number, done; @@ -4730,6 +4957,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { + CpuFeatures::Scope scope(VFP2); Register scratch3 = ToRegister(instr->temp2()); SwVfpRegister single_scratch = double_scratch.low(); ASSERT(!scratch3.is(input_reg) && @@ -4821,7 +5049,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { ASSERT(result->IsDoubleRegister()); Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); + DwVfpRegister result_reg = ToDoubleRegister(result); EmitNumberUntagD(input_reg, result_reg, instr->hydrogen()->deoptimize_on_undefined(), @@ -4970,14 +5198,16 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); + CpuFeatures::Scope vfp_scope(VFP2); + DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); } void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + CpuFeatures::Scope scope(VFP2); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -4985,10 +5215,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { + CpuFeatures::Scope scope(VFP2); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); Label is_smi, done, heap_number; // Both smi and heap number cases are handled. @@ -5565,6 +5796,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { void LCodeGen::EnsureSpaceForLazyDeopt() { + if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index 921285b..e7afcbf 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -61,6 +61,7 @@ class LCodeGen BASE_EMBEDDED { deferred_(8, info->zone()), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + frame_is_built_(false), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -76,6 +77,15 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + // Support for converting LOperands to assembler types. // LOperand must be a register. Register ToRegister(LOperand* op) const; @@ -84,12 +94,12 @@ class LCodeGen BASE_EMBEDDED { Register EmitLoadRegister(LOperand* op, Register scratch); // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; + DwVfpRegister ToDoubleRegister(LOperand* op) const; // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); + DwVfpRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DwVfpRegister dbl_scratch); int ToInteger32(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; Operand ToOperand(LOperand* op); @@ -193,7 +203,7 @@ class LCodeGen BASE_EMBEDDED { Register temporary2); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } + int GetParameterCount() const { return info()->num_parameters(); } void Abort(const char* reason); void Comment(const char* format, ...); @@ -275,7 +285,7 @@ class LCodeGen BASE_EMBEDDED { void PopulateDeoptimizationLiteralsWithInlinedFunctions(); Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; + DwVfpRegister ToDoubleRegister(int index) const; // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); @@ -308,7 +318,7 @@ class LCodeGen BASE_EMBEDDED { void EmitGoto(int block); void EmitBranch(int left_block, int right_block, Condition cc); void EmitNumberUntagD(Register input, - DoubleRegister result, + DwVfpRegister result, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, LEnvironment* env); @@ -369,11 +379,15 @@ class LCodeGen BASE_EMBEDDED { LEnvironment* environment); struct JumpTableEntry { - explicit inline JumpTableEntry(Address entry) + inline JumpTableEntry(Address entry, bool frame, bool is_lazy) : label(), - address(entry) { } + address(entry), + needs_frame(frame), + is_lazy_deopt(is_lazy) { } Label label; Address address; + bool needs_frame; + bool is_lazy_deopt; }; void EnsureSpaceForLazyDeopt(); @@ -402,6 +416,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList deferred_; int osr_pc_offset_; int last_lazy_deopt_pc_; + bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -417,6 +432,7 @@ class LCodeGen BASE_EMBEDDED { PushSafepointRegistersScope(LCodeGen* codegen, Safepoint::Kind kind) : codegen_(codegen) { + ASSERT(codegen_->info()->is_calling()); ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->expected_safepoint_kind_ = kind; diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc index c100720..4df1338 100644 --- a/src/arm/lithium-gap-resolver-arm.cc +++ b/src/arm/lithium-gap-resolver-arm.cc @@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { + CpuFeatures::Scope scope(VFP2); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { + CpuFeatures::Scope scope(VFP2); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { + CpuFeatures::Scope scope(VFP2); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { + CpuFeatures::Scope scope(VFP2); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); @@ -229,7 +233,8 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) { - // ip is overwritten while saving the value to the destination. + CpuFeatures::Scope scope(VFP2); + // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. __ vldr(kScratchDoubleReg.low(), source_operand); @@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); + CpuFeatures::Scope scope(VFP2); + DwVfpRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); } else { @@ -276,7 +282,8 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); + CpuFeatures::Scope scope(VFP2); + MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); } else { diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 5c064c1..067a05d 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -290,7 +290,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { } -void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { ASSERT(CpuFeatures::IsSupported(VFP2)); CpuFeatures::Scope scope(VFP2); if (!dst.is(src)) { @@ -643,19 +643,19 @@ void MacroAssembler::PopSafepointRegisters() { void MacroAssembler::PushSafepointRegistersAndDoubles() { PushSafepointRegisters(); - sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * + sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() * kDoubleSize)); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) { vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); } } void MacroAssembler::PopSafepointRegistersAndDoubles() { - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) { vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); } - add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * + add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() * kDoubleSize)); PopSafepointRegisters(); } @@ -691,7 +691,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { // General purpose registers are pushed last on the stack. - int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize; + int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; return MemOperand(sp, doubles_size + register_offset); } @@ -967,7 +967,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } } -void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(dst, d0); @@ -2717,7 +2717,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1, kSaveFPRegs); + SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub stub(1, mode); CallStub(&stub); } @@ -3393,9 +3396,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, if (use_eabi_hardfloat()) { // In the hard floating point calling convention, we can use // all double registers to pass doubles. - if (num_double_arguments > DoubleRegister::kNumRegisters) { + if (num_double_arguments > DoubleRegister::NumRegisters()) { stack_passed_words += - 2 * (num_double_arguments - DoubleRegister::kNumRegisters); + 2 * (num_double_arguments - DoubleRegister::NumRegisters()); } } else { // In the soft floating point calling convention, every double @@ -3436,7 +3439,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); @@ -3446,8 +3449,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, - DoubleRegister dreg2) { +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, + DwVfpRegister dreg2) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { if (dreg2.is(d0)) { @@ -3465,7 +3468,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, Register reg) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { @@ -3748,8 +3751,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { void MacroAssembler::ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg) { + DwVfpRegister input_reg, + DwVfpRegister temp_double_reg) { Label above_zero; Label done; Label in_bounds; diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 3c05e00..50c298b 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -178,7 +178,7 @@ class MacroAssembler: public Assembler { // Register move. May do nothing if the registers are identical. void Move(Register dst, Handle value); void Move(Register dst, Register src, Condition cond = al); - void Move(DoubleRegister dst, DoubleRegister src); + void Move(DwVfpRegister dst, DwVfpRegister src); // Load an object from the root table. void LoadRoot(Register destination, @@ -1058,9 +1058,9 @@ class MacroAssembler: public Assembler { // whether soft or hard floating point ABI is used. These functions // abstract parameter passing for the three different ways we call // C functions from generated code. - void SetCallCDoubleArguments(DoubleRegister dreg); - void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2); - void SetCallCDoubleArguments(DoubleRegister dreg, Register reg); + void SetCallCDoubleArguments(DwVfpRegister dreg); + void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2); + void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg); // Calls a C function and cleans up the space for arguments allocated // by PrepareCallCFunction. The called function is not allowed to trigger a @@ -1076,7 +1076,7 @@ class MacroAssembler: public Assembler { int num_reg_arguments, int num_double_arguments); - void GetCFunctionDoubleResult(const DoubleRegister dst); + void GetCFunctionDoubleResult(const DwVfpRegister dst); // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. stack_space @@ -1289,8 +1289,8 @@ class MacroAssembler: public Assembler { void ClampUint8(Register output_reg, Register input_reg); void ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg); + DwVfpRegister input_reg, + DwVfpRegister temp_double_reg); void LoadInstanceDescriptors(Register map, Register descriptors); @@ -1365,9 +1365,9 @@ class MacroAssembler: public Assembler { // This handle will be patched with the code object on installation. Handle code_object_; - // Needs access to SafepointRegisterStackIndex for optimized frame + // Needs access to SafepointRegisterStackIndex for compiled frame // traversal. - friend class OptimizedFrame; + friend class CompiledFrame; }; diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index 4604c33..e79c520 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -1053,42 +1053,6 @@ static void StoreIntAsFloat(MacroAssembler* masm, } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register hiword. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - - __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); - if (mantissa_shift_for_hi_word > 0) { - __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); - __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); - } else { - __ mov(loword, Operand(0, RelocInfo::NONE)); - __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); - } -} - - #undef __ #define __ ACCESS_MASM(masm()) @@ -3319,9 +3283,17 @@ Handle KeyedLoadStubCompiler::CompileLoadElement( // -- r1 : receiver // ----------------------------------- ElementsKind elements_kind = receiver_map->elements_kind(); - Handle stub = KeyedLoadElementStub(elements_kind).GetCode(); - - __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); + if (receiver_map->has_fast_elements() || + receiver_map->has_external_array_elements()) { + Handle stub = KeyedLoadFastElementStub( + receiver_map->instance_type() == JS_ARRAY_TYPE, + elements_kind).GetCode(); + __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); + } else { + Handle stub = + KeyedLoadDictionaryElementStub().GetCode(); + __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); + } Handle ic = isolate()->builtins()->KeyedLoadIC_Miss(); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -3726,339 +3698,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - ElementsKind elements_kind) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic, slow, failed_allocation; - - Register key = r0; - Register receiver = r1; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); - - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // r3: elements array - - // Check that the index is in range. - __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); - __ cmp(key, ip); - // Unsigned comparison catches both negative and too-large values. - __ b(hs, &miss_force_generic); - - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - // r3: base pointer of external storage - - // We are not untagging smi key and instead work with it - // as if it was premultiplied by 2. - STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); - - Register value = r2; - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(value, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(value, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(value, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(value, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(value, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_FLOAT_ELEMENTS: - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - __ add(r2, r3, Operand(key, LSL, 1)); - __ vldr(s0, r2, 0); - } else { - __ ldr(value, MemOperand(r3, key, LSL, 1)); - } - break; - case EXTERNAL_DOUBLE_ELEMENTS: - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - __ add(r2, r3, Operand(key, LSL, 2)); - __ vldr(d0, r2, 0); - } else { - __ add(r4, r3, Operand(key, LSL, 2)); - // r4: pointer to the beginning of the double we want to load. - __ ldr(r2, MemOperand(r4, 0)); - __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); - } - break; - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - - // For integer array types: - // r2: value - // For float array type: - // s0: value (if VFP3 is supported) - // r2: value (if VFP3 is not supported) - // For double array type: - // d0: value (if VFP3 is supported) - // r2/r3: value (if VFP3 is not supported) - - if (elements_kind == EXTERNAL_INT_ELEMENTS) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - __ cmp(value, Operand(0xC0000000)); - __ b(mi, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); - // Now we can use r0 for the result as key is not needed any more. - __ add(r0, r5, Operand(kHeapObjectTag)); - __ vmov(s0, value); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r5, HeapNumber::kValueOffset); - __ Ret(); - } else { - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); - // Now we can use r0 for the result as key is not needed any more. - __ mov(r0, r5); - Register dst_mantissa = r1; - Register dst_exponent = r3; - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm, - value, - dest, - d0, - dst_mantissa, - dst_exponent, - r9, - s0); - __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - Label box_int, done; - __ tst(value, Operand(0xC0000000)); - __ b(ne, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - __ vmov(s0, value); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all - // registers - also when jumping due to exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - - __ vcvt_f64_u32(d0, s0); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Check whether unsigned integer fits into smi. - Label box_int_0, box_int_1, done; - __ tst(value, Operand(0x80000000)); - __ b(ne, &box_int_0); - __ tst(value, Operand(0x40000000)); - __ b(ne, &box_int_1); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - Register hiword = value; // r2. - Register loword = r3; - - __ bind(&box_int_0); - // Integer does not have leading zeros. - GenerateUInt2Double(masm, hiword, loword, r4, 0); - __ b(&done); - - __ bind(&box_int_1); - // Integer has one leading zero. - GenerateUInt2Double(masm, hiword, loword, r4, 1); - - - __ bind(&done); - // Integer was converted to double in registers hiword:loword. - // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber - // clobbers all registers - also when jumping due to exhausted young - // space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); - - __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - - __ mov(r0, r4); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - __ vcvt_f64_f32(d0, s0); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); - // VFP is not available, do manual single to double conversion. - - // r2: floating point value (binary32) - // r3: heap number for result - - // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to - // the slow case from here. - __ and_(r0, value, Operand(kBinary32MantissaMask)); - - // Extract exponent to r1. OK to clobber r1 now as there are no jumps to - // the slow case from here. - __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); - __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ teq(r1, Operand(0x00)); - __ b(eq, &exponent_rebiased); - - __ teq(r1, Operand(0xff)); - __ mov(r1, Operand(0x7ff), LeaveCC, eq); - __ b(eq, &exponent_rebiased); - - // Rebias exponent. - __ add(r1, - r1, - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ and_(r2, value, Operand(kBinary32SignMask)); - value = no_reg; - __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); - __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); - - __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); - __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); - - __ mov(r0, r3); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); - - __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ mov(r0, r4); - __ Ret(); - } - - } else { - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - } - - // Slow case, key and receiver still in r0 and r1. - __ bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, r2, r3); - - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - - __ Push(r1, r0); - - __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); - - __ bind(&miss_force_generic); - Handle stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -4403,118 +4042,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); - - // Get the elements array. - __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ AssertFastElements(r2); - - // Check that the key is within bounds. - __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); - __ cmp(r0, Operand(r3)); - __ b(hs, &miss_force_generic); - - // Load the result and make sure it's not the hole. - __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ ldr(r4, - MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r4, ip); - __ b(eq, &miss_force_generic); - __ mov(r0, r4); - __ Ret(); - - __ bind(&miss_force_generic); - Handle stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( - MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic, slow_allocate_heapnumber; - - Register key_reg = r0; - Register receiver_reg = r1; - Register elements_reg = r2; - Register heap_number_reg = r2; - Register indexed_double_offset = r3; - Register scratch = r4; - Register scratch2 = r5; - Register scratch3 = r6; - Register heap_number_map = r7; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); - - // Get the elements array. - __ ldr(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - - // Check that the key is within bounds. - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - __ cmp(key_reg, Operand(scratch)); - __ b(hs, &miss_force_generic); - - // Load the upper word of the double in the fixed array and test for NaN. - __ add(indexed_double_offset, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); - __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); - __ cmp(scratch, Operand(kHoleNanUpper32)); - __ b(&miss_force_generic, eq); - - // Non-NaN. Allocate a new heap number and copy the double value into it. - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, - heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); - - // Don't need to reload the upper 32 bits of the double, it's already in - // scratch. - __ str(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kExponentOffset)); - __ ldr(scratch, FieldMemOperand(indexed_double_offset, - FixedArray::kHeaderSize)); - __ str(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kMantissaOffset)); - - __ mov(r0, heap_number_reg); - __ Ret(); - - __ bind(&slow_allocate_heapnumber); - Handle slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); - - __ bind(&miss_force_generic); - Handle miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, diff --git a/src/assembler.cc b/src/assembler.cc index 25157be..ccaf290 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -1375,6 +1375,11 @@ ExternalReference ExternalReference::page_flags(Page* page) { } +ExternalReference ExternalReference::ForDeoptEntry(Address entry) { + return ExternalReference(entry); +} + + // Helper function to compute x^y, where y is known to be an // integer. Uses binary decomposition to limit the number of // multiplications; see the discussion in "Hacker's Delight" by Henry diff --git a/src/assembler.h b/src/assembler.h index 4639374..111c1d9 100644 --- a/src/assembler.h +++ b/src/assembler.h @@ -736,6 +736,8 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference page_flags(Page* page); + static ExternalReference ForDeoptEntry(Address entry); + Address address() const {return reinterpret_cast
(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/src/ast.cc b/src/ast.cc index 232cb73..c43b913 100644 --- a/src/ast.cc +++ b/src/ast.cc @@ -616,14 +616,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { // ---------------------------------------------------------------------------- // Implementation of AstVisitor -bool AstVisitor::CheckStackOverflow() { - if (stack_overflow_) return true; - StackLimitCheck check(isolate_); - if (!check.HasOverflowed()) return false; - return (stack_overflow_ = true); -} - - void AstVisitor::VisitDeclarations(ZoneList* declarations) { for (int i = 0; i < declarations->length(); i++) { Visit(declarations->at(i)); diff --git a/src/ast.h b/src/ast.h index d299f19..a0a7a73 100644 --- a/src/ast.h +++ b/src/ast.h @@ -2492,40 +2492,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy) class AstVisitor BASE_EMBEDDED { public: - AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { } + AstVisitor() {} virtual ~AstVisitor() { } // Stack overflow check and dynamic dispatch. - void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); } + virtual void Visit(AstNode* node) = 0; // Iteration left-to-right. virtual void VisitDeclarations(ZoneList* declarations); virtual void VisitStatements(ZoneList* statements); virtual void VisitExpressions(ZoneList* expressions); - // Stack overflow tracking support. - bool HasStackOverflow() const { return stack_overflow_; } - bool CheckStackOverflow(); - - // If a stack-overflow exception is encountered when visiting a - // node, calling SetStackOverflow will make sure that the visitor - // bails out without visiting more nodes. - void SetStackOverflow() { stack_overflow_ = true; } - void ClearStackOverflow() { stack_overflow_ = false; } - // Individual AST nodes. #define DEF_VISIT(type) \ virtual void Visit##type(type* node) = 0; AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT +}; - protected: - Isolate* isolate() { return isolate_; } - private: - Isolate* isolate_; - bool stack_overflow_; -}; +#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \ +public: \ + virtual void Visit(AstNode* node) { \ + if (!CheckStackOverflow()) node->Accept(this); \ + } \ + \ + void SetStackOverflow() { stack_overflow_ = true; } \ + void ClearStackOverflow() { stack_overflow_ = false; } \ + bool HasStackOverflow() const { return stack_overflow_; } \ + \ + bool CheckStackOverflow() { \ + if (stack_overflow_) return true; \ + StackLimitCheck check(isolate_); \ + if (!check.HasOverflowed()) return false; \ + return (stack_overflow_ = true); \ + } \ + \ +private: \ + void InitializeAstVisitor() { \ + isolate_ = Isolate::Current(); \ + stack_overflow_ = false; \ + } \ + Isolate* isolate() { return isolate_; } \ + \ + Isolate* isolate_; \ + bool stack_overflow_ // ---------------------------------------------------------------------------- diff --git a/src/builtins.h b/src/builtins.h index a2f752e..1ca4053 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -107,6 +107,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(NotifyICMiss, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ V(NotifyOSR, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ \ @@ -386,6 +388,7 @@ class Builtins { static void Generate_NotifyDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyOSR(MacroAssembler* masm); + static void Generate_NotifyICMiss(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm); diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc new file mode 100644 index 0000000..da9ec6a --- /dev/null +++ b/src/code-stubs-hydrogen.cc @@ -0,0 +1,137 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "code-stubs.h" +#include "hydrogen.h" +#include "lithium.h" + +namespace v8 { +namespace internal { + + +Handle HydrogenCodeStub::CodeFromGraph(HGraph* graph) { + graph->OrderBlocks(); + graph->AssignDominators(); + graph->CollectPhis(); + graph->InsertRepresentationChanges(); + graph->EliminateRedundantBoundsChecks(); + LChunk* chunk = LChunk::NewChunk(graph); + ASSERT(chunk != NULL); + Handle stub = chunk->Codegen(Code::COMPILED_STUB); + return stub; +} + + +class CodeStubGraphBuilderBase : public HGraphBuilder { + public: + CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub) + : HGraphBuilder(&info_), info_(stub, isolate) {} + virtual bool BuildGraph(); + + protected: + virtual void BuildCodeStub() = 0; + HParameter* GetParameter(int parameter) { return parameters_[parameter]; } + HydrogenCodeStub* stub() { return info_.code_stub(); } + + private: + SmartArrayPointer parameters_; + CompilationInfoWithZone info_; +}; + + +bool CodeStubGraphBuilderBase::BuildGraph() { + if (FLAG_trace_hydrogen) { + PrintF("-----------------------------------------------------------\n"); + PrintF("Compiling stub using hydrogen\n"); + HTracer::Instance()->TraceCompilation(&info_); + } + HBasicBlock* next_block = graph()->CreateBasicBlock(); + next_block->SetInitialEnvironment(graph()->start_environment()); + HGoto* jump = new(zone()) HGoto(next_block); + graph()->entry_block()->Finish(jump); + set_current_block(next_block); + + int major_key = stub()->MajorKey(); + CodeStubInterfaceDescriptor** descriptors = + info_.isolate()->code_stub_interface_descriptors(); + if (descriptors[major_key] == NULL) { + descriptors[major_key] = stub()->GetInterfaceDescriptor(info_.isolate()); + } + + CodeStubInterfaceDescriptor* descriptor = descriptors[major_key]; + parameters_.Reset(new HParameter*[descriptor->number_of_register_params]); + + HGraph* graph = this->graph(); + Zone* zone = this->zone(); + for (int i = 0; i < descriptor->number_of_register_params; ++i) { + HParameter* param = new(zone) HParameter(i); + AddInstruction(param); + graph->start_environment()->Push(param); + parameters_[i] = param; + } + AddSimulate(BailoutId::StubEntry()); + + BuildCodeStub(); + + return true; +} + +template +class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { + public: + explicit CodeStubGraphBuilder(Stub* stub) + : CodeStubGraphBuilderBase(Isolate::Current(), stub) {} + + protected: + virtual void BuildCodeStub(); + Stub* casted_stub() { return static_cast(stub()); } +}; + + +template <> +void CodeStubGraphBuilder::BuildCodeStub() { + Zone* zone = this->zone(); + + HInstruction* load = BuildUncheckedMonomorphicElementAccess( + GetParameter(0), GetParameter(1), NULL, NULL, + casted_stub()->is_js_array(), casted_stub()->elements_kind(), false); + AddInstruction(load); + + HReturn* ret = new(zone) HReturn(load); + current_block()->Finish(ret); +} + + +Handle KeyedLoadFastElementStub::GenerateCode() { + CodeStubGraphBuilder builder(this); + return CodeFromGraph(builder.CreateGraph()); +} + + +} } // namespace v8::internal diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 276c87e..c7d4c80 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -48,20 +48,6 @@ bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { } -void CodeStub::GenerateCode(MacroAssembler* masm) { - // Update the static counter each time a new code stub is generated. - masm->isolate()->counters()->code_stubs()->Increment(); - - // Nested stubs are not allowed for leaves. - AllowStubCallsScope allow_scope(masm, false); - - // Generate the code for the stub. - masm->set_generating_stub(true); - NoCurrentFrameScope scope(masm); - Generate(masm); -} - - SmartArrayPointer CodeStub::GetName() { char buffer[100]; NoAllocationStringAllocator allocator(buffer, @@ -72,8 +58,7 @@ SmartArrayPointer CodeStub::GetName() { } -void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { - Isolate* isolate = masm->isolate(); +void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) { SmartArrayPointer name = GetName(); PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name)); GDBJIT(AddCode(GDBJITInterface::STUB, *name, code)); @@ -87,6 +72,39 @@ int CodeStub::GetCodeKind() { } +Handle PlatformCodeStub::GenerateCode() { + Isolate* isolate = Isolate::Current(); + Factory* factory = isolate->factory(); + + // Generate the new code. + MacroAssembler masm(isolate, NULL, 256); + + { + // Update the static counter each time a new code stub is generated. + isolate->counters()->code_stubs()->Increment(); + + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(&masm, false); + + // Generate the code for the stub. + masm.set_generating_stub(true); + NoCurrentFrameScope scope(&masm); + Generate(&masm); + } + + // Create the code object. + CodeDesc desc; + masm.GetCode(&desc); + + // Copy the generated code into a heap object. + Code::Flags flags = Code::ComputeFlags( + static_cast(GetCodeKind()), GetICState()); + Handle new_object = factory->NewCode( + desc, flags, masm.CodeObject(), NeedsImmovableCode()); + return new_object; +} + + Handle CodeStub::GetCode() { Isolate* isolate = Isolate::Current(); Factory* factory = isolate->factory(); @@ -102,23 +120,10 @@ Handle CodeStub::GetCode() { { HandleScope scope(isolate); - // Generate the new code. - MacroAssembler masm(isolate, NULL, 256); - GenerateCode(&masm); - - // Create the code object. - CodeDesc desc; - masm.GetCode(&desc); - - // Copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags( - static_cast(GetCodeKind()), - GetICState()); - Handle new_object = factory->NewCode( - desc, flags, masm.CodeObject(), NeedsImmovableCode()); + Handle new_object = GenerateCode(); new_object->set_major_key(MajorKey()); FinishCode(new_object); - RecordCodeGeneration(*new_object, &masm); + RecordCodeGeneration(*new_object, isolate); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_code_stubs) { @@ -416,36 +421,8 @@ void JSEntryStub::FinishCode(Handle code) { } -void KeyedLoadElementStub::Generate(MacroAssembler* masm) { - switch (elements_kind_) { - case FAST_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadFastElement(masm); - break; - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); - break; - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_); - break; - case DICTIONARY_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); - break; - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } +void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) { + KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); } diff --git a/src/code-stubs.h b/src/code-stubs.h index ae113f5..c6e328c 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -162,20 +162,29 @@ class CodeStub BASE_EMBEDDED { // Lookup the code in the (possibly custom) cache. bool FindCodeInCache(Code** code_out, Isolate* isolate); + // Returns information for computing the number key. + virtual Major MajorKey() = 0; + virtual int MinorKey() = 0; + protected: static bool CanUseFPRegisters(); - private: - // Nonvirtual wrapper around the stub-specific Generate function. Call - // this function to set up the macro assembler and generate the code. - void GenerateCode(MacroAssembler* masm); - // Generates the assembler code for the stub. - virtual void Generate(MacroAssembler* masm) = 0; + virtual Handle GenerateCode() = 0; + // BinaryOpStub needs to override this. + virtual InlineCacheState GetICState() { + return UNINITIALIZED; + } + + // Returns whether the code generated for this stub needs to be allocated as + // a fixed (non-moveable) code object. + virtual bool NeedsImmovableCode() { return false; } + + private: // Perform bookkeeping required after code generation when stub code is // initially generated. - void RecordCodeGeneration(Code* code, MacroAssembler* masm); + void RecordCodeGeneration(Code* code, Isolate* isolate); // Finish the code object after it has been generated. virtual void FinishCode(Handle code) { } @@ -184,18 +193,9 @@ class CodeStub BASE_EMBEDDED { // registering stub in the stub cache. virtual void Activate(Code* code) { } - // Returns information for computing the number key. - virtual Major MajorKey() = 0; - virtual int MinorKey() = 0; - // BinaryOpStub needs to override this. virtual int GetCodeKind(); - // BinaryOpStub needs to override this. - virtual InlineCacheState GetICState() { - return UNINITIALIZED; - } - // Add the code to a specialized cache, specific to an individual // stub type. Please note, this method must add the code object to a // roots object, otherwise we will remove the code during GC. @@ -213,10 +213,6 @@ class CodeStub BASE_EMBEDDED { SmartArrayPointer GetName(); virtual void PrintName(StringStream* stream); - // Returns whether the code generated for this stub needs to be allocated as - // a fixed (non-moveable) code object. - virtual bool NeedsImmovableCode() { return false; } - // Computes the key based on major and minor. uint32_t GetKey() { ASSERT(static_cast(MajorKey()) < NUMBER_OF_IDS); @@ -232,6 +228,43 @@ class CodeStub BASE_EMBEDDED { }; +class PlatformCodeStub : public CodeStub { + public: + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle GenerateCode(); + + virtual int GetCodeKind() { return Code::STUB; } + + protected: + // Generates the assembler code for the stub. + virtual void Generate(MacroAssembler* masm) = 0; +}; + + +struct CodeStubInterfaceDescriptor { + int number_of_register_params; + Register* register_params; + Handle deoptimization_handler; +}; + + +class HGraph; +struct Register; +class HydrogenCodeStub : public CodeStub { + public: + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle GenerateCode() = 0; + + virtual int GetCodeKind() { return Code::COMPILED_STUB; } + + virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor( + Isolate* isolate) = 0; + + protected: + Handle CodeFromGraph(HGraph* graph); +}; + + // Helper interface to prepare to/restore after making runtime calls. class RuntimeCallHelper { public: @@ -289,7 +322,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper { }; -class StackCheckStub : public CodeStub { +class StackCheckStub : public PlatformCodeStub { public: StackCheckStub() { } @@ -301,7 +334,7 @@ class StackCheckStub : public CodeStub { }; -class InterruptStub : public CodeStub { +class InterruptStub : public PlatformCodeStub { public: InterruptStub() { } @@ -313,7 +346,7 @@ class InterruptStub : public CodeStub { }; -class ToNumberStub: public CodeStub { +class ToNumberStub: public PlatformCodeStub { public: ToNumberStub() { } @@ -325,7 +358,7 @@ class ToNumberStub: public CodeStub { }; -class FastNewClosureStub : public CodeStub { +class FastNewClosureStub : public PlatformCodeStub { public: explicit FastNewClosureStub(LanguageMode language_mode) : language_mode_(language_mode) { } @@ -341,7 +374,7 @@ class FastNewClosureStub : public CodeStub { }; -class FastNewContextStub : public CodeStub { +class FastNewContextStub : public PlatformCodeStub { public: static const int kMaximumSlots = 64; @@ -359,7 +392,7 @@ class FastNewContextStub : public CodeStub { }; -class FastNewBlockContextStub : public CodeStub { +class FastNewBlockContextStub : public PlatformCodeStub { public: static const int kMaximumSlots = 64; @@ -377,7 +410,7 @@ class FastNewBlockContextStub : public CodeStub { }; -class FastCloneShallowArrayStub : public CodeStub { +class FastCloneShallowArrayStub : public PlatformCodeStub { public: // Maximum length of copied elements array. static const int kMaximumClonedLength = 8; @@ -410,7 +443,7 @@ class FastCloneShallowArrayStub : public CodeStub { }; -class FastCloneShallowObjectStub : public CodeStub { +class FastCloneShallowObjectStub : public PlatformCodeStub { public: // Maximum number of properties in copied object. static const int kMaximumClonedProperties = 6; @@ -430,7 +463,7 @@ class FastCloneShallowObjectStub : public CodeStub { }; -class InstanceofStub: public CodeStub { +class InstanceofStub: public PlatformCodeStub { public: enum Flags { kNoFlags = 0, @@ -468,7 +501,7 @@ class InstanceofStub: public CodeStub { }; -class MathPowStub: public CodeStub { +class MathPowStub: public PlatformCodeStub { public: enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; @@ -484,7 +517,7 @@ class MathPowStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { +class BinaryOpStub: public PlatformCodeStub { public: BinaryOpStub(Token::Value op, OverwriteMode mode) : op_(op), @@ -600,7 +633,7 @@ class BinaryOpStub: public CodeStub { }; -class ICCompareStub: public CodeStub { +class ICCompareStub: public PlatformCodeStub { public: ICCompareStub(Token::Value op, CompareIC::State left, @@ -666,7 +699,7 @@ class ICCompareStub: public CodeStub { }; -class CEntryStub : public CodeStub { +class CEntryStub : public PlatformCodeStub { public: explicit CEntryStub(int result_size, SaveFPRegsMode save_doubles = kDontSaveFPRegs) @@ -700,7 +733,7 @@ class CEntryStub : public CodeStub { }; -class JSEntryStub : public CodeStub { +class JSEntryStub : public PlatformCodeStub { public: JSEntryStub() { } @@ -734,7 +767,7 @@ class JSConstructEntryStub : public JSEntryStub { }; -class ArgumentsAccessStub: public CodeStub { +class ArgumentsAccessStub: public PlatformCodeStub { public: enum Type { READ_ELEMENT, @@ -761,7 +794,7 @@ class ArgumentsAccessStub: public CodeStub { }; -class RegExpExecStub: public CodeStub { +class RegExpExecStub: public PlatformCodeStub { public: RegExpExecStub() { } @@ -773,7 +806,7 @@ class RegExpExecStub: public CodeStub { }; -class RegExpConstructResultStub: public CodeStub { +class RegExpConstructResultStub: public PlatformCodeStub { public: RegExpConstructResultStub() { } @@ -785,7 +818,7 @@ class RegExpConstructResultStub: public CodeStub { }; -class CallFunctionStub: public CodeStub { +class CallFunctionStub: public PlatformCodeStub { public: CallFunctionStub(int argc, CallFunctionFlags flags) : argc_(argc), flags_(flags) { } @@ -826,7 +859,7 @@ class CallFunctionStub: public CodeStub { }; -class CallConstructStub: public CodeStub { +class CallConstructStub: public PlatformCodeStub { public: explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {} @@ -1017,25 +1050,53 @@ class AllowStubCallsScope { }; -class KeyedLoadElementStub : public CodeStub { +class KeyedLoadDictionaryElementStub : public PlatformCodeStub { public: - explicit KeyedLoadElementStub(ElementsKind elements_kind) - : elements_kind_(elements_kind) - { } + KeyedLoadDictionaryElementStub() {} Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return elements_kind_; } + int MinorKey() { return DICTIONARY_ELEMENTS; } void Generate(MacroAssembler* masm); private: - ElementsKind elements_kind_; + DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); +}; + + +class KeyedLoadFastElementStub : public HydrogenCodeStub { + public: + KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { + bit_field_ = ElementsKindBits::encode(elements_kind) | + IsJSArrayBits::encode(is_js_array); + } + + Major MajorKey() { return KeyedLoadElement; } + int MinorKey() { return bit_field_; } + + bool is_js_array() const { + return IsJSArrayBits::decode(bit_field_); + } + + ElementsKind elements_kind() const { + return ElementsKindBits::decode(bit_field_); + } + + virtual Handle GenerateCode(); + + virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor( + Isolate* isolate); + + private: + class IsJSArrayBits: public BitField {}; + class ElementsKindBits: public BitField {}; + uint32_t bit_field_; - DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub); + DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); }; -class KeyedStoreElementStub : public CodeStub { +class KeyedStoreElementStub : public PlatformCodeStub { public: KeyedStoreElementStub(bool is_js_array, ElementsKind elements_kind, @@ -1070,7 +1131,7 @@ class KeyedStoreElementStub : public CodeStub { }; -class ToBooleanStub: public CodeStub { +class ToBooleanStub: public PlatformCodeStub { public: enum Type { UNDEFINED, @@ -1140,7 +1201,7 @@ class ToBooleanStub: public CodeStub { }; -class ElementsTransitionAndStoreStub : public CodeStub { +class ElementsTransitionAndStoreStub : public PlatformCodeStub { public: ElementsTransitionAndStoreStub(ElementsKind from, ElementsKind to, @@ -1181,7 +1242,7 @@ class ElementsTransitionAndStoreStub : public CodeStub { }; -class StoreArrayLiteralElementStub : public CodeStub { +class StoreArrayLiteralElementStub : public PlatformCodeStub { public: StoreArrayLiteralElementStub() : fp_registers_(CanUseFPRegisters()) { } @@ -1200,7 +1261,7 @@ class StoreArrayLiteralElementStub : public CodeStub { }; -class ProfileEntryHookStub : public CodeStub { +class ProfileEntryHookStub : public PlatformCodeStub { public: explicit ProfileEntryHookStub() {} diff --git a/src/codegen.cc b/src/codegen.cc index 83ac854..c8bdf68 100644 --- a/src/codegen.cc +++ b/src/codegen.cc @@ -121,19 +121,21 @@ void CodeGenerator::PrintCode(Handle code, CompilationInfo* info) { if (print_code) { // Print the source code if available. FunctionLiteral* function = info->function(); - Handle