5999df9d6d724916c4c3d5d99f177e0e29c70a5c
[platform/upstream/nodejs.git] / deps / v8 / src / optimizing-compiler-thread.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/optimizing-compiler-thread.h"
6
7 #include "src/v8.h"
8
9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h"
11 #include "src/hydrogen.h"
12 #include "src/isolate.h"
13 #include "src/v8threads.h"
14
15 namespace v8 {
16 namespace internal {
17
18 namespace {
19
20 void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
21                                 bool restore_function_code) {
22   // The recompile job is allocated in the CompilationInfo's zone.
23   CompilationInfo* info = job->info();
24   if (restore_function_code) {
25     if (info->is_osr()) {
26       if (!job->IsWaitingForInstall()) {
27         // Remove stack check that guards OSR entry on original code.
28         Handle<Code> code = info->unoptimized_code();
29         uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
30         BackEdgeTable::RemoveStackCheck(code, offset);
31       }
32     } else {
33       Handle<JSFunction> function = info->closure();
34       function->ReplaceCode(function->shared()->code());
35     }
36   }
37   delete info;
38 }
39
40 }  // namespace
41
42
43 class OptimizingCompilerThread::CompileTask : public v8::Task {
44  public:
45   explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
46
47   virtual ~CompileTask() {}
48
49  private:
50   // v8::Task overrides.
51   void Run() OVERRIDE {
52     DisallowHeapAllocation no_allocation;
53     DisallowHandleAllocation no_handles;
54     DisallowHandleDereference no_deref;
55
56     OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
57
58     {
59       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
60
61       if (thread->recompilation_delay_ != 0) {
62         base::OS::Sleep(thread->recompilation_delay_);
63       }
64
65       StopFlag flag;
66       OptimizedCompileJob* job = thread->NextInput(&flag);
67
68       if (flag == CONTINUE) {
69         thread->CompileNext(job);
70       } else {
71         AllowHandleDereference allow_handle_dereference;
72         if (!job->info()->is_osr()) {
73           DisposeOptimizedCompileJob(job, true);
74         }
75       }
76     }
77
78     bool signal = false;
79     {
80       base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
81       if (--thread->task_count_ == 0) {
82         if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
83             FLUSH) {
84           base::Release_Store(&thread->stop_thread_,
85                               static_cast<base::AtomicWord>(CONTINUE));
86           signal = true;
87         }
88       }
89     }
90     if (signal) thread->stop_semaphore_.Signal();
91   }
92
93   Isolate* isolate_;
94
95   DISALLOW_COPY_AND_ASSIGN(CompileTask);
96 };
97
98
99 OptimizingCompilerThread::~OptimizingCompilerThread() {
100   DCHECK_EQ(0, input_queue_length_);
101   DeleteArray(input_queue_);
102   if (FLAG_concurrent_osr) {
103 #ifdef DEBUG
104     for (int i = 0; i < osr_buffer_capacity_; i++) {
105       CHECK_NULL(osr_buffer_[i]);
106     }
107 #endif
108     DeleteArray(osr_buffer_);
109   }
110 }
111
112
113 void OptimizingCompilerThread::Run() {
114 #ifdef DEBUG
115   { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
116     thread_id_ = ThreadId::Current().ToInteger();
117   }
118 #endif
119   DisallowHeapAllocation no_allocation;
120   DisallowHandleAllocation no_handles;
121   DisallowHandleDereference no_deref;
122
123   if (job_based_recompilation_) {
124     return;
125   }
126
127   base::ElapsedTimer total_timer;
128   if (tracing_enabled_) total_timer.Start();
129
130   while (true) {
131     input_queue_semaphore_.Wait();
132     TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
133
134     if (recompilation_delay_ != 0) {
135       base::OS::Sleep(recompilation_delay_);
136     }
137
138     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
139       case CONTINUE:
140         break;
141       case STOP:
142         if (tracing_enabled_) {
143           time_spent_total_ = total_timer.Elapsed();
144         }
145         stop_semaphore_.Signal();
146         return;
147       case FLUSH:
148         // The main thread is blocked, waiting for the stop semaphore.
149         { AllowHandleDereference allow_handle_dereference;
150           FlushInputQueue(true);
151         }
152         base::Release_Store(&stop_thread_,
153                             static_cast<base::AtomicWord>(CONTINUE));
154         stop_semaphore_.Signal();
155         // Return to start of consumer loop.
156         continue;
157     }
158
159     base::ElapsedTimer compiling_timer;
160     if (tracing_enabled_) compiling_timer.Start();
161
162     CompileNext(NextInput());
163
164     if (tracing_enabled_) {
165       time_spent_compiling_ += compiling_timer.Elapsed();
166     }
167   }
168 }
169
170
171 OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
172   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
173   if (input_queue_length_ == 0) {
174     if (flag) {
175       UNREACHABLE();
176       *flag = CONTINUE;
177     }
178     return NULL;
179   }
180   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
181   DCHECK_NOT_NULL(job);
182   input_queue_shift_ = InputQueueIndex(1);
183   input_queue_length_--;
184   if (flag) {
185     *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
186   }
187   return job;
188 }
189
190
191 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
192   DCHECK_NOT_NULL(job);
193
194   // The function may have already been optimized by OSR.  Simply continue.
195   OptimizedCompileJob::Status status = job->OptimizeGraph();
196   USE(status);   // Prevent an unused-variable error in release mode.
197   DCHECK(status != OptimizedCompileJob::FAILED);
198
199   // The function may have already been optimized by OSR.  Simply continue.
200   // Use a mutex to make sure that functions marked for install
201   // are always also queued.
202   if (job_based_recompilation_) output_queue_mutex_.Lock();
203   output_queue_.Enqueue(job);
204   if (job_based_recompilation_) output_queue_mutex_.Unlock();
205   isolate_->stack_guard()->RequestInstallCode();
206 }
207
208
209 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
210   OptimizedCompileJob* job;
211   while ((job = NextInput())) {
212     DCHECK(!job_based_recompilation_);
213     // This should not block, since we have one signal on the input queue
214     // semaphore corresponding to each element in the input queue.
215     input_queue_semaphore_.Wait();
216     // OSR jobs are dealt with separately.
217     if (!job->info()->is_osr()) {
218       DisposeOptimizedCompileJob(job, restore_function_code);
219     }
220   }
221 }
222
223
224 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
225   base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
226   OptimizedCompileJob* job;
227   while (output_queue_.Dequeue(&job)) {
228     // OSR jobs are dealt with separately.
229     if (!job->info()->is_osr()) {
230       DisposeOptimizedCompileJob(job, restore_function_code);
231     }
232   }
233 }
234
235
236 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
237   for (int i = 0; i < osr_buffer_capacity_; i++) {
238     if (osr_buffer_[i] != NULL) {
239       DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
240       osr_buffer_[i] = NULL;
241     }
242   }
243 }
244
245
246 void OptimizingCompilerThread::Flush() {
247   DCHECK(!IsOptimizerThread());
248   bool block = true;
249   if (job_based_recompilation_) {
250     base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
251     block = task_count_ > 0 || blocked_jobs_ > 0;
252     if (block) {
253       base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
254     }
255     if (FLAG_block_concurrent_recompilation) Unblock();
256   } else {
257     base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
258     if (FLAG_block_concurrent_recompilation) Unblock();
259   }
260   if (!job_based_recompilation_) input_queue_semaphore_.Signal();
261   if (block) stop_semaphore_.Wait();
262   FlushOutputQueue(true);
263   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
264   if (tracing_enabled_) {
265     PrintF("  ** Flushed concurrent recompilation queues.\n");
266   }
267 }
268
269
270 void OptimizingCompilerThread::Stop() {
271   DCHECK(!IsOptimizerThread());
272   bool block = true;
273   if (job_based_recompilation_) {
274     base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
275     block = task_count_ > 0 || blocked_jobs_ > 0;
276     if (block) {
277       base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
278     }
279     if (FLAG_block_concurrent_recompilation) Unblock();
280   } else {
281     base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
282     if (FLAG_block_concurrent_recompilation) Unblock();
283   }
284   if (!job_based_recompilation_) input_queue_semaphore_.Signal();
285   if (block) stop_semaphore_.Wait();
286
287   if (recompilation_delay_ != 0) {
288     // At this point the optimizing compiler thread's event loop has stopped.
289     // There is no need for a mutex when reading input_queue_length_.
290     while (input_queue_length_ > 0) CompileNext(NextInput());
291     InstallOptimizedFunctions();
292   } else {
293     FlushInputQueue(false);
294     FlushOutputQueue(false);
295   }
296
297   if (FLAG_concurrent_osr) FlushOsrBuffer(false);
298
299   if (tracing_enabled_) {
300     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
301     if (job_based_recompilation_) percentage = 100.0;
302     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
303   }
304
305   if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
306     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
307   }
308
309   Join();
310 }
311
312
313 void OptimizingCompilerThread::InstallOptimizedFunctions() {
314   DCHECK(!IsOptimizerThread());
315   HandleScope handle_scope(isolate_);
316
317   OptimizedCompileJob* job;
318   while (output_queue_.Dequeue(&job)) {
319     CompilationInfo* info = job->info();
320     Handle<JSFunction> function(*info->closure());
321     if (info->is_osr()) {
322       if (FLAG_trace_osr) {
323         PrintF("[COSR - ");
324         function->ShortPrint();
325         PrintF(" is ready for install and entry at AST id %d]\n",
326                info->osr_ast_id().ToInt());
327       }
328       job->WaitForInstall();
329       // Remove stack check that guards OSR entry on original code.
330       Handle<Code> code = info->unoptimized_code();
331       uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
332       BackEdgeTable::RemoveStackCheck(code, offset);
333     } else {
334       if (function->IsOptimized()) {
335         if (tracing_enabled_) {
336           PrintF("  ** Aborting compilation for ");
337           function->ShortPrint();
338           PrintF(" as it has already been optimized.\n");
339         }
340         DisposeOptimizedCompileJob(job, false);
341       } else {
342         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
343         function->ReplaceCode(
344             code.is_null() ? function->shared()->code() : *code);
345       }
346     }
347   }
348 }
349
350
351 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
352   DCHECK(IsQueueAvailable());
353   DCHECK(!IsOptimizerThread());
354   CompilationInfo* info = job->info();
355   if (info->is_osr()) {
356     osr_attempts_++;
357     AddToOsrBuffer(job);
358     // Add job to the front of the input queue.
359     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
360     DCHECK_LT(input_queue_length_, input_queue_capacity_);
361     // Move shift_ back by one.
362     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
363     input_queue_[InputQueueIndex(0)] = job;
364     input_queue_length_++;
365   } else {
366     // Add job to the back of the input queue.
367     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
368     DCHECK_LT(input_queue_length_, input_queue_capacity_);
369     input_queue_[InputQueueIndex(input_queue_length_)] = job;
370     input_queue_length_++;
371   }
372   if (FLAG_block_concurrent_recompilation) {
373     blocked_jobs_++;
374   } else if (job_based_recompilation_) {
375     base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
376     ++task_count_;
377     V8::GetCurrentPlatform()->CallOnBackgroundThread(
378         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
379   } else {
380     input_queue_semaphore_.Signal();
381   }
382 }
383
384
385 void OptimizingCompilerThread::Unblock() {
386   DCHECK(!IsOptimizerThread());
387   {
388     base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
389     task_count_ += blocked_jobs_;
390   }
391   while (blocked_jobs_ > 0) {
392     if (job_based_recompilation_) {
393       V8::GetCurrentPlatform()->CallOnBackgroundThread(
394           new CompileTask(isolate_), v8::Platform::kShortRunningTask);
395     } else {
396       input_queue_semaphore_.Signal();
397     }
398     blocked_jobs_--;
399   }
400 }
401
402
403 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
404     Handle<JSFunction> function, BailoutId osr_ast_id) {
405   DCHECK(!IsOptimizerThread());
406   for (int i = 0; i < osr_buffer_capacity_; i++) {
407     OptimizedCompileJob* current = osr_buffer_[i];
408     if (current != NULL &&
409         current->IsWaitingForInstall() &&
410         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
411       osr_hits_++;
412       osr_buffer_[i] = NULL;
413       return current;
414     }
415   }
416   return NULL;
417 }
418
419
420 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
421                                               BailoutId osr_ast_id) {
422   DCHECK(!IsOptimizerThread());
423   for (int i = 0; i < osr_buffer_capacity_; i++) {
424     OptimizedCompileJob* current = osr_buffer_[i];
425     if (current != NULL &&
426         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
427       return !current->IsWaitingForInstall();
428     }
429   }
430   return false;
431 }
432
433
434 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
435   DCHECK(!IsOptimizerThread());
436   for (int i = 0; i < osr_buffer_capacity_; i++) {
437     OptimizedCompileJob* current = osr_buffer_[i];
438     if (current != NULL && *current->info()->closure() == function) {
439       return !current->IsWaitingForInstall();
440     }
441   }
442   return false;
443 }
444
445
446 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
447   DCHECK(!IsOptimizerThread());
448   // Find the next slot that is empty or has a stale job.
449   OptimizedCompileJob* stale = NULL;
450   while (true) {
451     stale = osr_buffer_[osr_buffer_cursor_];
452     if (stale == NULL || stale->IsWaitingForInstall()) break;
453     osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
454   }
455
456   // Add to found slot and dispose the evicted job.
457   if (stale != NULL) {
458     DCHECK(stale->IsWaitingForInstall());
459     CompilationInfo* info = stale->info();
460     if (FLAG_trace_osr) {
461       PrintF("[COSR - Discarded ");
462       info->closure()->PrintName();
463       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
464     }
465     DisposeOptimizedCompileJob(stale, false);
466   }
467   osr_buffer_[osr_buffer_cursor_] = job;
468   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
469 }
470
471
472 #ifdef DEBUG
473 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
474   return isolate->concurrent_recompilation_enabled() &&
475          isolate->optimizing_compiler_thread()->IsOptimizerThread();
476 }
477
478
479 bool OptimizingCompilerThread::IsOptimizerThread() {
480   base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
481   return ThreadId::Current().ToInteger() == thread_id_;
482 }
483 #endif
484
485
486 } }  // namespace v8::internal