1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/optimizing-compiler-thread.h"
9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h"
11 #include "src/hydrogen.h"
12 #include "src/isolate.h"
13 #include "src/v8threads.h"
20 void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
21 bool restore_function_code) {
22 // The recompile job is allocated in the CompilationInfo's zone.
23 CompilationInfo* info = job->info();
24 if (restore_function_code) {
26 if (!job->IsWaitingForInstall()) {
27 // Remove stack check that guards OSR entry on original code.
28 Handle<Code> code = info->unoptimized_code();
29 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
30 BackEdgeTable::RemoveStackCheck(code, offset);
33 Handle<JSFunction> function = info->closure();
34 function->ReplaceCode(function->shared()->code());
43 class OptimizingCompilerThread::CompileTask : public v8::Task {
45 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
47 virtual ~CompileTask() {}
50 // v8::Task overrides.
52 DisallowHeapAllocation no_allocation;
53 DisallowHandleAllocation no_handles;
54 DisallowHandleDereference no_deref;
56 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
59 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
61 if (thread->recompilation_delay_ != 0) {
62 base::OS::Sleep(thread->recompilation_delay_);
66 OptimizedCompileJob* job = thread->NextInput(&flag);
68 if (flag == CONTINUE) {
69 thread->CompileNext(job);
71 AllowHandleDereference allow_handle_dereference;
72 if (!job->info()->is_osr()) {
73 DisposeOptimizedCompileJob(job, true);
80 base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
81 if (--thread->task_count_ == 0) {
82 if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
84 base::Release_Store(&thread->stop_thread_,
85 static_cast<base::AtomicWord>(CONTINUE));
90 if (signal) thread->stop_semaphore_.Signal();
95 DISALLOW_COPY_AND_ASSIGN(CompileTask);
99 OptimizingCompilerThread::~OptimizingCompilerThread() {
100 DCHECK_EQ(0, input_queue_length_);
101 DeleteArray(input_queue_);
102 if (FLAG_concurrent_osr) {
104 for (int i = 0; i < osr_buffer_capacity_; i++) {
105 CHECK_NULL(osr_buffer_[i]);
108 DeleteArray(osr_buffer_);
113 void OptimizingCompilerThread::Run() {
115 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
116 thread_id_ = ThreadId::Current().ToInteger();
119 DisallowHeapAllocation no_allocation;
120 DisallowHandleAllocation no_handles;
121 DisallowHandleDereference no_deref;
123 if (job_based_recompilation_) {
127 base::ElapsedTimer total_timer;
128 if (tracing_enabled_) total_timer.Start();
131 input_queue_semaphore_.Wait();
132 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
134 if (recompilation_delay_ != 0) {
135 base::OS::Sleep(recompilation_delay_);
138 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
142 if (tracing_enabled_) {
143 time_spent_total_ = total_timer.Elapsed();
145 stop_semaphore_.Signal();
148 // The main thread is blocked, waiting for the stop semaphore.
149 { AllowHandleDereference allow_handle_dereference;
150 FlushInputQueue(true);
152 base::Release_Store(&stop_thread_,
153 static_cast<base::AtomicWord>(CONTINUE));
154 stop_semaphore_.Signal();
155 // Return to start of consumer loop.
159 base::ElapsedTimer compiling_timer;
160 if (tracing_enabled_) compiling_timer.Start();
162 CompileNext(NextInput());
164 if (tracing_enabled_) {
165 time_spent_compiling_ += compiling_timer.Elapsed();
171 OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
172 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
173 if (input_queue_length_ == 0) {
180 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
181 DCHECK_NOT_NULL(job);
182 input_queue_shift_ = InputQueueIndex(1);
183 input_queue_length_--;
185 *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
191 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
192 DCHECK_NOT_NULL(job);
194 // The function may have already been optimized by OSR. Simply continue.
195 OptimizedCompileJob::Status status = job->OptimizeGraph();
196 USE(status); // Prevent an unused-variable error in release mode.
197 DCHECK(status != OptimizedCompileJob::FAILED);
199 // The function may have already been optimized by OSR. Simply continue.
200 // Use a mutex to make sure that functions marked for install
201 // are always also queued.
202 if (job_based_recompilation_) output_queue_mutex_.Lock();
203 output_queue_.Enqueue(job);
204 if (job_based_recompilation_) output_queue_mutex_.Unlock();
205 isolate_->stack_guard()->RequestInstallCode();
209 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
210 OptimizedCompileJob* job;
211 while ((job = NextInput())) {
212 DCHECK(!job_based_recompilation_);
213 // This should not block, since we have one signal on the input queue
214 // semaphore corresponding to each element in the input queue.
215 input_queue_semaphore_.Wait();
216 // OSR jobs are dealt with separately.
217 if (!job->info()->is_osr()) {
218 DisposeOptimizedCompileJob(job, restore_function_code);
224 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
225 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
226 OptimizedCompileJob* job;
227 while (output_queue_.Dequeue(&job)) {
228 // OSR jobs are dealt with separately.
229 if (!job->info()->is_osr()) {
230 DisposeOptimizedCompileJob(job, restore_function_code);
236 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
237 for (int i = 0; i < osr_buffer_capacity_; i++) {
238 if (osr_buffer_[i] != NULL) {
239 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
240 osr_buffer_[i] = NULL;
246 void OptimizingCompilerThread::Flush() {
247 DCHECK(!IsOptimizerThread());
249 if (job_based_recompilation_) {
250 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
251 block = task_count_ > 0 || blocked_jobs_ > 0;
253 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
255 if (FLAG_block_concurrent_recompilation) Unblock();
257 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
258 if (FLAG_block_concurrent_recompilation) Unblock();
260 if (!job_based_recompilation_) input_queue_semaphore_.Signal();
261 if (block) stop_semaphore_.Wait();
262 FlushOutputQueue(true);
263 if (FLAG_concurrent_osr) FlushOsrBuffer(true);
264 if (tracing_enabled_) {
265 PrintF(" ** Flushed concurrent recompilation queues.\n");
270 void OptimizingCompilerThread::Stop() {
271 DCHECK(!IsOptimizerThread());
273 if (job_based_recompilation_) {
274 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
275 block = task_count_ > 0 || blocked_jobs_ > 0;
277 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
279 if (FLAG_block_concurrent_recompilation) Unblock();
281 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
282 if (FLAG_block_concurrent_recompilation) Unblock();
284 if (!job_based_recompilation_) input_queue_semaphore_.Signal();
285 if (block) stop_semaphore_.Wait();
287 if (recompilation_delay_ != 0) {
288 // At this point the optimizing compiler thread's event loop has stopped.
289 // There is no need for a mutex when reading input_queue_length_.
290 while (input_queue_length_ > 0) CompileNext(NextInput());
291 InstallOptimizedFunctions();
293 FlushInputQueue(false);
294 FlushOutputQueue(false);
297 if (FLAG_concurrent_osr) FlushOsrBuffer(false);
299 if (tracing_enabled_) {
300 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
301 if (job_based_recompilation_) percentage = 100.0;
302 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
305 if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
306 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
313 void OptimizingCompilerThread::InstallOptimizedFunctions() {
314 DCHECK(!IsOptimizerThread());
315 HandleScope handle_scope(isolate_);
317 OptimizedCompileJob* job;
318 while (output_queue_.Dequeue(&job)) {
319 CompilationInfo* info = job->info();
320 Handle<JSFunction> function(*info->closure());
321 if (info->is_osr()) {
322 if (FLAG_trace_osr) {
324 function->ShortPrint();
325 PrintF(" is ready for install and entry at AST id %d]\n",
326 info->osr_ast_id().ToInt());
328 job->WaitForInstall();
329 // Remove stack check that guards OSR entry on original code.
330 Handle<Code> code = info->unoptimized_code();
331 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
332 BackEdgeTable::RemoveStackCheck(code, offset);
334 if (function->IsOptimized()) {
335 if (tracing_enabled_) {
336 PrintF(" ** Aborting compilation for ");
337 function->ShortPrint();
338 PrintF(" as it has already been optimized.\n");
340 DisposeOptimizedCompileJob(job, false);
342 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
343 function->ReplaceCode(
344 code.is_null() ? function->shared()->code() : *code);
351 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
352 DCHECK(IsQueueAvailable());
353 DCHECK(!IsOptimizerThread());
354 CompilationInfo* info = job->info();
355 if (info->is_osr()) {
358 // Add job to the front of the input queue.
359 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
360 DCHECK_LT(input_queue_length_, input_queue_capacity_);
361 // Move shift_ back by one.
362 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
363 input_queue_[InputQueueIndex(0)] = job;
364 input_queue_length_++;
366 // Add job to the back of the input queue.
367 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
368 DCHECK_LT(input_queue_length_, input_queue_capacity_);
369 input_queue_[InputQueueIndex(input_queue_length_)] = job;
370 input_queue_length_++;
372 if (FLAG_block_concurrent_recompilation) {
374 } else if (job_based_recompilation_) {
375 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
377 V8::GetCurrentPlatform()->CallOnBackgroundThread(
378 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
380 input_queue_semaphore_.Signal();
385 void OptimizingCompilerThread::Unblock() {
386 DCHECK(!IsOptimizerThread());
388 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
389 task_count_ += blocked_jobs_;
391 while (blocked_jobs_ > 0) {
392 if (job_based_recompilation_) {
393 V8::GetCurrentPlatform()->CallOnBackgroundThread(
394 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
396 input_queue_semaphore_.Signal();
403 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
404 Handle<JSFunction> function, BailoutId osr_ast_id) {
405 DCHECK(!IsOptimizerThread());
406 for (int i = 0; i < osr_buffer_capacity_; i++) {
407 OptimizedCompileJob* current = osr_buffer_[i];
408 if (current != NULL &&
409 current->IsWaitingForInstall() &&
410 current->info()->HasSameOsrEntry(function, osr_ast_id)) {
412 osr_buffer_[i] = NULL;
420 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
421 BailoutId osr_ast_id) {
422 DCHECK(!IsOptimizerThread());
423 for (int i = 0; i < osr_buffer_capacity_; i++) {
424 OptimizedCompileJob* current = osr_buffer_[i];
425 if (current != NULL &&
426 current->info()->HasSameOsrEntry(function, osr_ast_id)) {
427 return !current->IsWaitingForInstall();
434 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
435 DCHECK(!IsOptimizerThread());
436 for (int i = 0; i < osr_buffer_capacity_; i++) {
437 OptimizedCompileJob* current = osr_buffer_[i];
438 if (current != NULL && *current->info()->closure() == function) {
439 return !current->IsWaitingForInstall();
446 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
447 DCHECK(!IsOptimizerThread());
448 // Find the next slot that is empty or has a stale job.
449 OptimizedCompileJob* stale = NULL;
451 stale = osr_buffer_[osr_buffer_cursor_];
452 if (stale == NULL || stale->IsWaitingForInstall()) break;
453 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
456 // Add to found slot and dispose the evicted job.
458 DCHECK(stale->IsWaitingForInstall());
459 CompilationInfo* info = stale->info();
460 if (FLAG_trace_osr) {
461 PrintF("[COSR - Discarded ");
462 info->closure()->PrintName();
463 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
465 DisposeOptimizedCompileJob(stale, false);
467 osr_buffer_[osr_buffer_cursor_] = job;
468 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
473 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
474 return isolate->concurrent_recompilation_enabled() &&
475 isolate->optimizing_compiler_thread()->IsOptimizerThread();
479 bool OptimizingCompilerThread::IsOptimizerThread() {
480 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
481 return ThreadId::Current().ToInteger() == thread_id_;
486 } } // namespace v8::internal