}
-void OptimizingCompiler::RecordOptimizationStats() {
+void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- OptimizingCompiler compiler(info);
- OptimizingCompiler::Status status = compiler.CreateGraph();
+ RecompileJob job(info);
+ RecompileJob::Status status = job.CreateGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- return status != OptimizingCompiler::FAILED;
+ if (status != RecompileJob::SUCCEEDED) {
+ return status != RecompileJob::FAILED;
}
- status = compiler.OptimizeGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- status = compiler.AbortOptimization();
- return status != OptimizingCompiler::FAILED;
+ status = job.OptimizeGraph();
+ if (status != RecompileJob::SUCCEEDED) {
+ status = job.AbortOptimization();
+ return status != RecompileJob::FAILED;
}
- status = compiler.GenerateAndInstallCode();
- return status != OptimizingCompiler::FAILED;
+ status = job.GenerateAndInstallCode();
+ return status != RecompileJob::FAILED;
}
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
}
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+RecompileJob::Status RecompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
}
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- OptimizingCompiler* compiler =
- new(info->zone()) OptimizingCompiler(*info);
- OptimizingCompiler::Status status = compiler->CreateGraph();
- if (status == OptimizingCompiler::SUCCEEDED) {
+ RecompileJob* job = new(info->zone()) RecompileJob(*info);
+ RecompileJob::Status status = job->CreateGraph();
+ if (status == RecompileJob::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
ASSERT(!isolate->has_pending_exception());
return true;
- } else if (status == OptimizingCompiler::BAILED_OUT) {
+ } else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
}
-Handle<Code> Compiler::InstallOptimizedCode(
- OptimizingCompiler* optimizing_compiler) {
- SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+ SmartPointer<CompilationInfo> info(job->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
- OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ RecompileJob::Status status = job->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependencyChange);
- status = optimizing_compiler->AbortOptimization();
- } else if (status != OptimizingCompiler::SUCCEEDED) {
+ status = job->AbortOptimization();
+ } else if (status != RecompileJob::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
info->set_bailout_reason(kDebuggerIsActive);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else {
- status = optimizing_compiler->GenerateAndInstallCode();
- ASSERT(status == OptimizingCompiler::SUCCEEDED ||
- status == OptimizingCompiler::BAILED_OUT);
+ status = job->GenerateAndInstallCode();
+ ASSERT(status == RecompileJob::SUCCEEDED ||
+ status == RecompileJob::BAILED_OUT);
}
InstallCodeCommon(*info);
- if (status == OptimizingCompiler::SUCCEEDED) {
+ if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsInRecompileQueue());
- return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
- : Handle<Code>::null();
+ return (status == RecompileJob::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class OptimizingCompiler: public ZoneObject {
+class RecompileJob: public ZoneObject {
public:
- explicit OptimizingCompiler(CompilationInfo* info)
+ explicit RecompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, TimeDelta* location)
- : compiler_(compiler),
- location_(location) {
+ Timer(RecompileJob* job, TimeDelta* location)
+ : job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
*location_ += timer_.Elapsed();
}
- OptimizingCompiler* compiler_;
+ RecompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
bool is_toplevel,
Handle<Script> script);
- static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(RecompileJob* job);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
void OptimizingCompilerThread::CompileNext() {
- OptimizingCompiler* optimizing_compiler = NULL;
- bool result = input_queue_.Dequeue(&optimizing_compiler);
+ RecompileJob* job = NULL;
+ bool result = input_queue_.Dequeue(&job);
USE(result);
ASSERT(result);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
// The function may have already been optimized by OSR. Simply continue.
- OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ RecompileJob::Status status = job->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
- ASSERT(status != OptimizingCompiler::FAILED);
+ ASSERT(status != RecompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
LockGuard<Mutex> access_queue(&queue_mutex_);
- output_queue_.Enqueue(optimizing_compiler);
+ output_queue_.Enqueue(job);
isolate_->stack_guard()->RequestInstallCode();
}
-static void DisposeOptimizingCompiler(OptimizingCompiler* compiler,
- bool restore_function_code) {
+static void DisposeRecompileJob(RecompileJob* compiler,
+ bool restore_function_code) {
+ // The recompile job is allocated in the CompilationInfo's zone.
CompilationInfo* info = compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
+ RecompileJob* job;
+ while (input_queue_.Dequeue(&job)) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
- if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ if (job->info()->osr_ast_id().IsNone()) {
// OSR jobs are dealt with separately.
- DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
+ DisposeRecompileJob(job, restore_function_code);
}
}
Release_Store(&queue_length_, static_cast<AtomicWord>(0));
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
+ RecompileJob* job;
while (true) {
{ LockGuard<Mutex> access_queue(&queue_mutex_);
- if (!output_queue_.Dequeue(&optimizing_compiler)) break;
+ if (!output_queue_.Dequeue(&job)) break;
}
- if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ if (job->info()->osr_ast_id().IsNone()) {
// OSR jobs are dealt with separately.
- DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
+ DisposeRecompileJob(job, restore_function_code);
}
}
}
void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
+ RecompileJob* job;
for (int i = 0; i < osr_buffer_size_; i++) {
- optimizing_compiler = osr_buffer_[i];
- if (optimizing_compiler != NULL) {
- DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
- }
+ job = osr_buffer_[i];
+ if (job != NULL) DisposeRecompileJob(job, restore_function_code);
}
osr_cursor_ = 0;
}
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- OptimizingCompiler* compiler;
+ RecompileJob* job;
while (true) {
{ LockGuard<Mutex> access_queue(&queue_mutex_);
- if (!output_queue_.Dequeue(&compiler)) break;
+ if (!output_queue_.Dequeue(&job)) break;
}
- CompilationInfo* info = compiler->info();
+ CompilationInfo* info = job->info();
if (info->osr_ast_id().IsNone()) {
- Compiler::InstallOptimizedCode(compiler);
+ Compiler::InstallOptimizedCode(job);
} else {
if (FLAG_trace_osr) {
PrintF("[COSR - ");
PrintF(" is ready for install and entry at AST id %d]\n",
info->osr_ast_id().ToInt());
}
- compiler->WaitForInstall();
+ job->WaitForInstall();
BackEdgeTable::RemoveStackCheck(info);
}
}
}
-void OptimizingCompilerThread::QueueForOptimization(
- OptimizingCompiler* optimizing_compiler) {
+void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- CompilationInfo* info = optimizing_compiler->info();
+ CompilationInfo* info = job->info();
if (info->osr_ast_id().IsNone()) {
info->closure()->MarkInRecompileQueue();
} else {
info->closure()->PrintName();
PrintF(" for concurrent on-stack replacement.\n");
}
- AddToOsrBuffer(optimizing_compiler);
+ AddToOsrBuffer(job);
osr_attempts_++;
BackEdgeTable::AddStackCheck(info);
}
- input_queue_.Enqueue(optimizing_compiler);
+ input_queue_.Enqueue(job);
input_queue_semaphore_.Signal();
}
-OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
Handle<JSFunction> function, uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- OptimizingCompiler* result = NULL;
+ RecompileJob* result = NULL;
for (int i = 0; i < osr_buffer_size_; i++) {
result = osr_buffer_[i];
if (result == NULL) continue;
}
-void OptimizingCompilerThread::AddToOsrBuffer(OptimizingCompiler* compiler) {
+void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
ASSERT(!IsOptimizerThread());
// Store into next empty slot or replace next stale OSR job that's waiting
// in vain. Dispose in the latter case.
- OptimizingCompiler* stale;
+ RecompileJob* stale;
while (true) {
stale = osr_buffer_[osr_cursor_];
if (stale == NULL) break;
PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
BackEdgeTable::RemoveStackCheck(info);
- DisposeOptimizingCompiler(stale, false);
+ DisposeRecompileJob(stale, false);
break;
}
AdvanceOsrCursor();
}
- osr_buffer_[osr_cursor_] = compiler;
+ osr_buffer_[osr_cursor_] = job;
AdvanceOsrCursor();
}
namespace internal {
class HOptimizedGraphBuilder;
-class OptimizingCompiler;
+class RecompileJob;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
if (FLAG_concurrent_osr) {
osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4;
- osr_buffer_ = NewArray<OptimizingCompiler*>(osr_buffer_size_);
+ osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_);
for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL;
}
}
void Run();
void Stop();
void Flush();
- void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void QueueForOptimization(RecompileJob* optimizing_compiler);
void InstallOptimizedFunctions();
- OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
- uint32_t osr_pc_offset);
+ RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
bool IsQueuedForOSR(JSFunction* function);
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
// Tasks evicted from the cyclic buffer are discarded.
- void AddToOsrBuffer(OptimizingCompiler* compiler);
+ void AddToOsrBuffer(RecompileJob* compiler);
void AdvanceOsrCursor() {
osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_;
}
Semaphore input_queue_semaphore_;
// Queue of incoming recompilation tasks (including OSR).
- UnboundQueue<OptimizingCompiler*> input_queue_;
+ UnboundQueue<RecompileJob*> input_queue_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- UnboundQueue<OptimizingCompiler*> output_queue_;
+ UnboundQueue<RecompileJob*> output_queue_;
// Cyclic buffer of recompilation tasks for OSR.
// TODO(yangguo): This may keep zombie tasks indefinitely, holding on to
// a lot of memory. Fix this.
- OptimizingCompiler** osr_buffer_;
+ RecompileJob** osr_buffer_;
// Cursor for the cyclic buffer.
int osr_cursor_;
int osr_buffer_size_;
return NULL;
}
- OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+ RecompileJob* job = isolate->optimizing_compiler_thread()->
FindReadyOSRCandidate(function, pc_offset);
- if (compiler == NULL) {
+ if (job == NULL) {
if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
Compiler::RecompileConcurrent(function, pc_offset)) {
if (function->IsMarkedForLazyRecompilation() ||
// Fall through to the end in case of failure.
} else {
// TODO(titzer): don't install the OSR code into the function.
- ast_id = compiler->info()->osr_ast_id();
- result = Compiler::InstallOptimizedCode(compiler);
+ ast_id = job->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(job);
}
} else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);