From: mstarzinger@chromium.org Date: Fri, 12 Apr 2013 08:58:22 +0000 (+0000) Subject: Support full deoptimization during GC via stack guard. X-Git-Tag: upstream/4.7.83~14604 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4a9c3d4a66f8dcb7ce2cfe602428af5f1ff00b4f;p=platform%2Fupstream%2Fv8.git Support full deoptimization during GC via stack guard. This adds support to the stack guard to trigger a full deoptimization of all optimized code when the GC kicks into high promotion mode. Global pretenuring decisions in optimized code can then be based on the high promotion mode. R=ulan@chromium.org Review URL: https://codereview.chromium.org/14173007 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14243 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/execution.cc b/src/execution.cc index dee3112..f343868 100644 --- a/src/execution.cc +++ b/src/execution.cc @@ -33,6 +33,7 @@ #include "bootstrapper.h" #include "codegen.h" #include "debug.h" +#include "deoptimizer.h" #include "isolate-inl.h" #include "runtime-profiler.h" #include "simulator.h" @@ -448,6 +449,19 @@ void StackGuard::RequestGC() { } +bool StackGuard::IsFullDeopt() { + ExecutionAccess access(isolate_); + return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0; +} + + +void StackGuard::FullDeopt() { + ExecutionAccess access(isolate_); + thread_local_.interrupt_flags_ |= FULL_DEOPT; + set_interrupt_limits(access); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT bool StackGuard::IsDebugBreak() { ExecutionAccess access(isolate_); @@ -880,7 +894,6 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) { stack_guard->Continue(GC_REQUEST); } - isolate->counters()->stack_interrupts()->Increment(); isolate->counters()->runtime_profiler_ticks()->Increment(); isolate->runtime_profiler()->OptimizeNow(); @@ -898,6 +911,10 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) { stack_guard->Continue(INTERRUPT); return isolate->StackOverflow(); } + if (stack_guard->IsFullDeopt()) { + stack_guard->Continue(FULL_DEOPT); + Deoptimizer::DeoptimizeAll(isolate); + } return isolate->heap()->undefined_value(); } diff --git a/src/execution.h b/src/execution.h index b104180..9cf8ac6 100644 --- a/src/execution.h +++ b/src/execution.h @@ -41,7 +41,8 @@ enum InterruptFlag { DEBUGCOMMAND = 1 << 2, PREEMPT = 1 << 3, TERMINATE = 1 << 4, - GC_REQUEST = 1 << 5 + GC_REQUEST = 1 << 5, + FULL_DEOPT = 1 << 6 }; @@ -197,6 +198,8 @@ class StackGuard { #endif bool IsGCRequest(); void RequestGC(); + bool IsFullDeopt(); + void FullDeopt(); void Continue(InterruptFlag after_what); // This provides an asynchronous read of the stack limits for the current diff --git a/src/heap.cc b/src/heap.cc index 76f0a3c..3c88980 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -952,6 +952,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, PrintPID("Limited new space size due to high promotion rate: %d MB\n", new_space_.InitialCapacity() / MB); } + // Support for global pre-tenuring uses the high promotion mode as a + // heuristic indicator of whether to pretenure or not, we trigger + // deoptimization here to take advantage of pre-tenuring as soon as + // possible. + if (FLAG_pretenure_literals) { + isolate_->stack_guard()->FullDeopt(); + } } else if (new_space_high_promotion_mode_active_ && IsStableOrDecreasingSurvivalTrend() && IsLowSurvivalRate()) { diff --git a/src/heap.h b/src/heap.h index 9e758aa..46c75fe 100644 --- a/src/heap.h +++ b/src/heap.h @@ -1498,6 +1498,12 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, PretenureFlag pretenure); + // Predicate that governs global pre-tenuring decisions based on observed + // promotion rates of previous collections. + inline bool ShouldGloballyPretenure() { + return new_space_high_promotion_mode_active_; + } + inline intptr_t PromotedTotalSize() { return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); }