i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
- isolate->set_context_exit_happened(true);
}
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ closure->PrintName();
+ PrintF(" on next run.\n");
}
return;
}
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
+ if (FLAG_parallel_recompilation) {
+ isolate_->optimizing_compiler_thread()->Flush();
+ }
+
Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
shared->set_num_literals(literals_array_size);
if (is_generator) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
+ shared->DisableOptimization(kGenerator);
}
return shared;
}
}
+int Heap::NotifyContextDisposed() {
+ if (FLAG_parallel_recompilation) {
+ // Flush the queued recompilation tasks.
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
+ flush_monomorphic_ics_ = true;
+ return ++contexts_disposed_;
+}
+
+
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() {
- flush_monomorphic_ics_ = true;
- return ++contexts_disposed_;
- }
+ int NotifyContextDisposed();
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
- context_exit_happened_(false),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
thread_local_top_.top_lookup_result_ = top;
}
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
-
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
-
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
+ shared_info->DisableOptimization(kLiveEdit);
}
if (shared_info->debug_info()->IsDebugInfo()) {
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
+ ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
// No write barrier required, since the builtin is part of the root set.
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- if (!FLAG_parallel_recompilation) {
- JSFunction::MarkForLazyRecompilation();
- return;
- }
+ ASSERT(!shared()->is_generator());
+ ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Marking ");
PrintName();
OS::Sleep(FLAG_parallel_recompilation_delay);
}
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
- }
- return;
+ switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+ case CONTINUE:
+ break;
+ case STOP:
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_total_ = OS::Ticks() - epoch;
+ }
+ stop_semaphore_->Signal();
+ return;
+ case FLUSH:
+ // The main thread is blocked, waiting for the stop semaphore.
+ { AllowHandleDereference allow_handle_dereference;
+ FlushInputQueue(true);
+ }
+ Release_Store(&queue_length_, static_cast<AtomicWord>(0));
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+ stop_semaphore_->Signal();
+ // Return to start of consumer loop.
+ continue;
}
int64_t compiling_start = 0;
void OptimizingCompilerThread::CompileNext() {
OptimizingCompiler* optimizing_compiler = NULL;
- input_queue_.Dequeue(&optimizing_compiler);
+ bool result = input_queue_.Dequeue(&optimizing_compiler);
+ USE(result);
+ ASSERT(result);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
// The function may have already been optimized by OSR. Simply continue.
}
+void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (input_queue_.Dequeue(&optimizing_compiler)) {
+ // This should not block, since we have one signal on the input queue
+ // semaphore corresponding to each element in the input queue.
+ input_queue_semaphore_->Wait();
+ CompilationInfo* info = optimizing_compiler->info();
+ if (restore_function_code) {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ delete info;
+ }
+}
+
+
+void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (output_queue_.Dequeue(&optimizing_compiler)) {
+ CompilationInfo* info = optimizing_compiler->info();
+ if (restore_function_code) {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ delete info;
+ }
+}
+
+
+void OptimizingCompilerThread::Flush() {
+ ASSERT(!IsOptimizerThread());
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ input_queue_semaphore_->Signal();
+ stop_semaphore_->Wait();
+ FlushOutputQueue(true);
+}
+
+
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
if (FLAG_parallel_recompilation_delay != 0) {
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
+ // This is used only for testing.
while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
InstallOptimizedFunctions();
} else {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
- while (output_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
+ FlushInputQueue(false);
+ FlushOutputQueue(false);
}
if (FLAG_trace_parallel_recompilation) {
install_mutex_(OS::CreateMutex()),
time_spent_compiling_(0),
time_spent_total_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
void Run();
void Stop();
- void CompileNext();
+ void Flush();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
}
private:
+ enum StopFlag { CONTINUE, STOP, FLUSH };
+
+ void FlushInputQueue(bool restore_function_code);
+ void FlushOutputQueue(bool restore_function_code);
+
+ void CompileNext();
+
#ifdef DEBUG
int thread_id_;
Mutex* thread_id_mutex_;
JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
+ ASSERT(frame->function()->is_compiled());
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
- while (function->IsMarkedForParallelRecompilation() ||
- function->IsInRecompileQueue() ||
+ while (function->IsInRecompileQueue() ||
function->IsMarkedForInstallingRecompiledCode()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
-class AllowNativesSyntaxNoInlining {
+class AllowNativesSyntaxNoInliningNoParallel {
public:
- AllowNativesSyntaxNoInlining()
+ AllowNativesSyntaxNoInliningNoParallel()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
- use_inlining_(i::FLAG_use_inlining) {
+ use_inlining_(i::FLAG_use_inlining),
+ parallel_recompilation_(i::FLAG_parallel_recompilation) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
+ i::FLAG_parallel_recompilation = false;
}
- ~AllowNativesSyntaxNoInlining() {
+ ~AllowNativesSyntaxNoInliningNoParallel() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
+ i::FLAG_parallel_recompilation = parallel_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
+ bool parallel_recompilation_;
};
const char* f_source = "function f(x, y) { return x + y; };";
{
- AllowNativesSyntaxNoInlining options;
+ AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
binary_op);
char* f_source = f_source_buffer.start();
- AllowNativesSyntaxNoInlining options;
+ AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
const char* f_source = "function f(x, y) { return x < y; };";
{
- AllowNativesSyntaxNoInlining options;
+ AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInlining options;
+ AllowNativesSyntaxNoInliningNoParallel options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInlining options;
+ AllowNativesSyntaxNoInliningNoParallel options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
FLAG_use_ic = false; // ICs retain objects.
+ FLAG_parallel_recompilation = false;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --parallel-recompilation-delay=300
+
+if (!%IsParallelRecompilationSupported()) {
+ print("Parallel recompilation is disabled. Skipping this test.");
+ quit();
+}
+
+Debug = debug.Debug
+
+function foo() {
+ var x = 1;
+ return x;
+}
+
+function bar() {
+ var x = 2;
+ return x;
+}
+
+foo();
+// Mark and trigger parallel optimization.
+%OptimizeFunctionOnNextCall(foo, "parallel");
+foo();
+
+// Set break points on an unrelated function. This clears both optimized
+// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin.
+// Clear the break point immediately after to deactivate the debugger.
+Debug.setBreakPoint(bar, 0, 0);
+Debug.clearAllBreakPoints();
+
+// Install optimized code when parallel optimization finishes.
+// This needs to be able to deal with shared code being a builtin.
+assertUnoptimized(foo, "sync");
+