// Do a scavenge to put off the next scavenge as far as possible.
// This may ease the issue that GVN blocks the next scavenge.
isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile");
- closure->MarkInRecompileQueue();
shared->code()->set_profiler_ticks(0);
info.Detach();
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
- ASSERT(info->closure()->IsMarkedForInstallingRecompiledCode());
- // While waiting for the optimizer thread, OSR may have already done all
- // the work and disabled optimization of this function for some reason.
+ // The function may have already been optimized by OSR. Simply continue.
+ // Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** aborting optimization for ");
+ info->closure()->PrintName();
+ PrintF(" as it has been disabled.\n");
+ }
+ ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
return;
}
// Optimized code is finally replacing unoptimized code. Reset the latter's
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
+ ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
}
input_queue_.Dequeue(&optimizing_compiler);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
- ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
-
+ // The function may have already been optimized by OSR. Simply continue.
OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ USE(status); // Prevent an unused-variable error in release mode.
ASSERT(status != OptimizingCompiler::FAILED);
- // Prevent an unused-variable error in release mode.
- USE(status);
-
- output_queue_.Enqueue(optimizing_compiler);
- // The execution thread can call InstallOptimizedFunctions() at any time,
- // including at this point, after queuing for install and before marking
- // for install. To avoid race condition, functions that are queued but not
- // yet marked for install are not processed by InstallOptimizedFunctions().
-
- ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
- // Mark function to generate and install optimized code. We assume this
- // write to be atomic.
+ // The function may have already been optimized by OSR. Simply continue.
+ // Mark it for installing before queuing so that we can be sure of the write
+ // order: marking first and (after being queued) installing code second.
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+ output_queue_.Enqueue(optimizing_compiler);
}
stop_semaphore_->Wait();
if (FLAG_parallel_recompilation_delay != 0) {
- // Execution ended before we managed to compile and install the remaining
- // functions in the queue. We still want to do that for debugging though.
- // At this point the optimizing thread already stopped, so we finish
- // processing the queue in the main thread.
InstallOptimizedFunctions();
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
HandleScope handle_scope(isolate_);
int functions_installed = 0;
while (!output_queue_.IsEmpty()) {
- OptimizingCompiler* compiler = *output_queue_.Peek();
-
- if (compiler->info()->closure()->IsInRecompileQueue()) {
- // A function may be queued for install, but not marked as such yet.
- // We continue with the output queue the next to avoid race condition.
- break;
- }
+ OptimizingCompiler* compiler;
output_queue_.Dequeue(&compiler);
-
-#ifdef DEBUG
- // Create new closure handle since the deferred handle is about to die.
- Handle<JSFunction> closure(*compiler->info()->closure());
-#endif // DEBUG
-
Compiler::InstallOptimizedCode(compiler);
- // Assert that the marker builtin has been replaced by actual code.
- ASSERT(!closure->IsInRecompileQueue());
functions_installed++;
}
}
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
+ optimizing_compiler->info()->closure()->MarkInRecompileQueue();
input_queue_.Enqueue(optimizing_compiler);
input_queue_semaphore_->Signal();
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (!AllowOptimization(isolate, function)) {
function->ReplaceCode(function->shared()->code());
- return function->code();
+ return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
ASSERT(FLAG_parallel_recompilation);
HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- ASSERT(FLAG_parallel_recompilation);
+ ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation);
OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
- opt_thread->InstallOptimizedFunctions();
+ do {
+ // The function could have been marked for installing, but not queued just
+ // yet. In this case, retry until installed.
+ opt_thread->InstallOptimizedFunctions();
+ } while (function->IsMarkedForInstallingRecompiledCode());
return function->code();
}