From 21fb1401bd17c3049eb1926e0a15072bc63bac0b Mon Sep 17 00:00:00 2001 From: "ulan@chromium.org" Date: Fri, 22 Nov 2013 10:21:47 +0000 Subject: [PATCH] Restore saved caller FP registers on stub failure and preserve FP registers on NotifyStubFailure. In debug mode, clobber FP registers on each runtime call to increase chances of catching such bugs. R=danno@chromium.org Review URL: https://chromiumcodereview.appspot.com/78283002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18000 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arguments.cc | 7 +++ src/arguments.h | 11 ++++ src/arm/builtins-arm.cc | 15 +++++- src/arm/deoptimizer-arm.cc | 5 ++ src/arm/lithium-codegen-arm.cc | 64 +++++++++++++++-------- src/arm/lithium-codegen-arm.h | 3 ++ src/arm/macro-assembler-arm.h | 6 ++- src/builtins.h | 3 ++ src/deoptimizer.cc | 3 +- src/deoptimizer.h | 4 ++ src/ia32/builtins-ia32.cc | 15 +++++- src/ia32/deoptimizer-ia32.cc | 7 +++ src/ia32/lithium-codegen-ia32.cc | 62 ++++++++++++++-------- src/ia32/lithium-codegen-ia32.h | 3 ++ src/ia32/macro-assembler-ia32.h | 6 ++- src/mips/builtins-mips.cc | 15 +++++- src/mips/deoptimizer-mips.cc | 5 ++ src/mips/lithium-codegen-mips.cc | 64 +++++++++++++++-------- src/mips/macro-assembler-mips.h | 6 ++- src/x64/builtins-x64.cc | 15 +++++- src/x64/deoptimizer-x64.cc | 5 ++ src/x64/lithium-codegen-x64.cc | 64 +++++++++++++++-------- src/x64/lithium-codegen-x64.h | 4 ++ src/x64/macro-assembler-x64.h | 6 ++- test/mjsunit/regress/regress-clobbered-fp-regs.js | 54 +++++++++++++++++++ 25 files changed, 349 insertions(+), 103 deletions(-) create mode 100644 test/mjsunit/regress/regress-clobbered-fp-regs.js diff --git a/src/arguments.cc b/src/arguments.cc index 3a4d733..ae06bd0 100644 --- a/src/arguments.cc +++ b/src/arguments.cc @@ -117,4 +117,11 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID) #undef WRITE_CALL_2_VOID +double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) { + // TODO(ulan): This clobbers only subset of registers depending on compiler, + // Rewrite this in assembly to really clobber all registers. + return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04; +} + + } } // namespace v8::internal diff --git a/src/arguments.h b/src/arguments.h index 92e5740..b7137c3 100644 --- a/src/arguments.h +++ b/src/arguments.h @@ -289,12 +289,23 @@ class FunctionCallbackArguments }; +double ClobberDoubleRegisters(double x1, double x2, double x3, double x4); + + +#ifdef DEBUG +#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4); +#else +#define CLOBBER_DOUBLE_REGISTERS() +#endif + + #define DECLARE_RUNTIME_FUNCTION(Type, Name) \ Type Name(int args_length, Object** args_object, Isolate* isolate) #define RUNTIME_FUNCTION(Type, Name) \ static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \ Type Name(int args_length, Object** args_object, Isolate* isolate) { \ + CLOBBER_DOUBLE_REGISTERS(); \ Arguments args(args_length, args_object); \ return __RT_impl_##Name(args, isolate); \ } \ diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 19fa9f2..7ae2924 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -857,7 +857,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -866,7 +867,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // registers. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); } @@ -875,6 +876,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index c846f98..6031499 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 0aa8197..244cba1 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) { } +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } // Possibly allocate a local context. @@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (deopt_jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ b(&needs_frame); @@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() { __ mov(pc, ip); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ mov(lr, Operand(pc), LeaveCC, al); __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); } @@ -828,7 +856,10 @@ void LCodeGen::DeoptimizeIf(Condition condition, } ASSERT(info()->IsStub() || frame_is_built_); - if (condition == al && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (condition == al && frame_is_built_ && + !info()->saves_caller_doubles()) { __ Call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last @@ -2929,16 +2960,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index 7390a48..3f2ba35 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 4897064..5e20eec 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -1054,8 +1054,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/src/builtins.h b/src/builtins.h index ec78782..1a04ad8 100644 --- a/src/builtins.h +++ b/src/builtins.h @@ -114,6 +114,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ @@ -403,6 +405,7 @@ class Builtins { static void Generate_NotifySoftDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyStubFailure(MacroAssembler* masm); + static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm); diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index e39c345..077337f 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -1649,8 +1649,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, output_frame->SetPc(reinterpret_cast( trampoline->instruction_start())); output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); - Code* notify_failure = - isolate_->builtins()->builtin(Builtins::kNotifyStubFailure); + Code* notify_failure = NotifyStubFailureBuiltin(); output_frame->SetContinuation( reinterpret_cast(notify_failure->entry())); } diff --git a/src/deoptimizer.h b/src/deoptimizer.h index 52e4e24..f518546 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -406,6 +406,10 @@ class Deoptimizer : public Malloced { // at the dynamic alignment state slot inside the frame. bool HasAlignmentPadding(JSFunction* function); + // Select the version of NotifyStubFailure builtin that either saves or + // doesn't save the double registers depending on CPU features. + Code* NotifyStubFailureBuiltin(); + Isolate* isolate_; JSFunction* function_; Code* compiled_code_; diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index e5e6ec5..32c3397 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { // Enter an internal frame. { FrameScope scope(masm, StackFrame::INTERNAL); @@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // stubs that tail call the runtime on deopts passing their parameters in // registers. __ pushad(); - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ popad(); // Tear down internal frame. } @@ -620,6 +621,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index e043aa4..5300dde 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + Builtins::Name name = CpuFeatures::IsSupported(SSE2) ? + Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure; + return isolate_->builtins()->builtin(name); +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index f316e85..cf5e8e1 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) { #endif +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + CpuFeatureScope scope(masm(), SSE2); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movsd(MemOperand(esp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + CpuFeatureScope scope(masm(), SSE2); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(esp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - Comment(";;; Save clobbered callee double registers"); - CpuFeatureScope scope(masm(), SSE2); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ movsd(MemOperand(esp, count * kDoubleSize), - XMMRegister::FromAllocationIndex(save_iterator.Current())); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } } @@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() { __ ret(0); // Call the continuation without clobbering registers. } } else { + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { + RestoreCallerDoubles(); + } __ call(entry, RelocInfo::RUNTIME_ENTRY); } } @@ -3129,17 +3157,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { - ASSERT(NeedsEagerFrame()); - CpuFeatureScope scope(masm(), SSE2); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(esp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } if (dynamic_frame_alignment_) { // Fetch the state of the dynamic frame alignment. diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h index 52ca07a..638f80c 100644 --- a/src/ia32/lithium-codegen-ia32.h +++ b/src/ia32/lithium-codegen-ia32.h @@ -198,6 +198,9 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 58e5907..2941130 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -777,8 +777,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc index 85588e8..d2641df 100644 --- a/src/mips/builtins-mips.cc +++ b/src/mips/builtins-mips.cc @@ -892,7 +892,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -901,7 +902,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // registers. __ MultiPush(kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ MultiPop(kJSCallerSaved | kCalleeSaved); } @@ -910,6 +911,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc index c66472f..0662b17 100644 --- a/src/mips/deoptimizer-mips.cc +++ b/src/mips/deoptimizer-mips.cc @@ -125,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc index 43d83bd..6cb8859 100644 --- a/src/mips/lithium-codegen-mips.cc +++ b/src/mips/lithium-codegen-mips.cc @@ -98,6 +98,38 @@ void LChunkBuilder::Abort(BailoutReason reason) { } +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -160,16 +192,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } // Possibly allocate a local context. @@ -298,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { } __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); if (deopt_jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); if (needs_frame.is_bound()) { __ Branch(&needs_frame); } else { @@ -313,6 +337,10 @@ bool LCodeGen::GenerateDeoptJumpTable() { __ Call(t9); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ Call(t9); } } @@ -786,7 +814,10 @@ void LCodeGen::DeoptimizeIf(Condition condition, } ASSERT(info()->IsStub() || frame_is_built_); - if (condition == al && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (condition == al && frame_is_built_ && + !info()->saves_caller_doubles()) { __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); } else { // We often have several deopts to the same entry, reuse the last @@ -2777,16 +2808,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(sp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index e9d9f1c..e4187c0 100644 --- a/src/mips/macro-assembler-mips.h +++ b/src/mips/macro-assembler-mips.h @@ -1205,8 +1205,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index 30cd314..f4864f8 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -662,7 +662,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { } -void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, + SaveFPRegsMode save_doubles) { // Enter an internal frame. { FrameScope scope(masm, StackFrame::INTERNAL); @@ -671,7 +672,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // stubs that tail call the runtime on deopts passing their parameters in // registers. __ Pushad(); - __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); __ Popad(); // Tear down internal frame. } @@ -681,6 +682,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { } +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); +} + + +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { // Enter an internal frame. diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index 4528077..ae180ec 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -126,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { } +Code* Deoptimizer::NotifyStubFailureBuiltin() { + return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles); +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 0c95745..9fefcc0 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -111,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) { #endif +void LCodeGen::SaveCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movsd(MemOperand(rsp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } +} + + +void LCodeGen::RestoreCallerDoubles() { + ASSERT(info()->saves_caller_doubles()); + ASSERT(NeedsEagerFrame()); + Comment(";;; Restore clobbered callee double registers"); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(rsp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -173,16 +205,7 @@ bool LCodeGen::GeneratePrologue() { } if (info()->saves_caller_doubles()) { - Comment(";;; Save clobbered callee double registers"); - int count = 0; - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - while (!save_iterator.Done()) { - __ movsd(MemOperand(rsp, count * kDoubleSize), - XMMRegister::FromAllocationIndex(save_iterator.Current())); - save_iterator.Advance(); - count++; - } + SaveCallerDoubles(); } } @@ -261,6 +284,7 @@ bool LCodeGen::GenerateJumpTable() { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { + ASSERT(!info()->saves_caller_doubles()); __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); if (needs_frame.is_bound()) { __ jmp(&needs_frame); @@ -280,6 +304,10 @@ bool LCodeGen::GenerateJumpTable() { __ call(kScratchRegister); } } else { + if (info()->saves_caller_doubles()) { + ASSERT(info()->IsStub()); + RestoreCallerDoubles(); + } __ call(entry, RelocInfo::RUNTIME_ENTRY); } } @@ -714,7 +742,10 @@ void LCodeGen::DeoptimizeIf(Condition cc, } ASSERT(info()->IsStub() || frame_is_built_); - if (cc == no_condition && frame_is_built_) { + // Go through jump table if we need to handle condition, build frame, or + // restore caller doubles. + if (cc == no_condition && frame_is_built_ && + !info()->saves_caller_doubles()) { __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last @@ -2657,16 +2688,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { - ASSERT(NeedsEagerFrame()); - BitVector* doubles = chunk()->allocated_double_registers(); - BitVector::Iterator save_iterator(doubles); - int count = 0; - while (!save_iterator.Done()) { - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), - MemOperand(rsp, count * kDoubleSize)); - save_iterator.Advance(); - count++; - } + RestoreCallerDoubles(); } int no_frame_start = -1; if (NeedsEagerFrame()) { diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h index 51cfe08..53d2646 100644 --- a/src/x64/lithium-codegen-x64.h +++ b/src/x64/lithium-codegen-x64.h @@ -153,6 +153,10 @@ class LCodeGen: public LCodeGenBase { void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } + + void SaveCallerDoubles(); + void RestoreCallerDoubles(); + // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index f7bf1ca..06b2f80 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -1266,8 +1266,10 @@ class MacroAssembler: public Assembler { } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); + void CallRuntime(Runtime::FunctionId id, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); } // Convenience function: call an external reference. diff --git a/test/mjsunit/regress/regress-clobbered-fp-regs.js b/test/mjsunit/regress/regress-clobbered-fp-regs.js new file mode 100644 index 0000000..7795ae9 --- /dev/null +++ b/test/mjsunit/regress/regress-clobbered-fp-regs.js @@ -0,0 +1,54 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Flags: --allow-natives-syntax + +function store(a, x, y) { + var f1 = 0.1 * y; + var f2 = 0.2 * y; + var f3 = 0.3 * y; + var f4 = 0.4 * y; + var f5 = 0.5 * y; + var f6 = 0.6 * y; + var f7 = 0.7 * y; + var f8 = 0.8 * y; + a[0] = x; + var sum = (f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8); + assertEquals(1, y); + var expected = 3.6; + if (Math.abs(expected - sum) > 0.01) { + assertEquals(expected, sum); + } +} + +// Generate TransitionElementsKindStub. +store([1], 1, 1); +store([1], 1.1, 1); +store([1], 1.1, 1); +%OptimizeFunctionOnNextCall(store); +// This will trap on allocation site in TransitionElementsKindStub. +store([1], 1, 1) -- 2.7.4