deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / optimizing-compiler-thread.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/optimizing-compiler-thread.h"
6
7 #include "src/v8.h"
8
9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h"
11 #include "src/hydrogen.h"
12 #include "src/isolate.h"
13 #include "src/v8threads.h"
14
15 namespace v8 {
16 namespace internal {
17
18 namespace {
19
20 void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
21                                 bool restore_function_code) {
22   // The recompile job is allocated in the CompilationInfo's zone.
23   CompilationInfo* info = job->info();
24   if (restore_function_code) {
25     if (info->is_osr()) {
26       if (!job->IsWaitingForInstall()) {
27         // Remove stack check that guards OSR entry on original code.
28         Handle<Code> code = info->unoptimized_code();
29         uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
30         BackEdgeTable::RemoveStackCheck(code, offset);
31       }
32     } else {
33       Handle<JSFunction> function = info->closure();
34       function->ReplaceCode(function->shared()->code());
35     }
36   }
37   delete info;
38 }
39
40 }  // namespace
41
42
43 class OptimizingCompilerThread::CompileTask : public v8::Task {
44  public:
45   explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
46     OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
47     base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
48     ++thread->ref_count_;
49   }
50
51   virtual ~CompileTask() {}
52
53  private:
54   // v8::Task overrides.
55   void Run() OVERRIDE {
56     DisallowHeapAllocation no_allocation;
57     DisallowHandleAllocation no_handles;
58     DisallowHandleDereference no_deref;
59
60     OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
61     {
62       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
63
64       if (thread->recompilation_delay_ != 0) {
65         base::OS::Sleep(thread->recompilation_delay_);
66       }
67
68       thread->CompileNext(thread->NextInput(true));
69     }
70     {
71       base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
72       if (--thread->ref_count_ == 0) {
73         thread->ref_count_zero_.NotifyOne();
74       }
75     }
76   }
77
78   Isolate* isolate_;
79
80   DISALLOW_COPY_AND_ASSIGN(CompileTask);
81 };
82
83
84 OptimizingCompilerThread::~OptimizingCompilerThread() {
85 #ifdef DEBUG
86   {
87     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
88     DCHECK_EQ(0, ref_count_);
89   }
90 #endif
91   DCHECK_EQ(0, input_queue_length_);
92   DeleteArray(input_queue_);
93   if (FLAG_concurrent_osr) {
94 #ifdef DEBUG
95     for (int i = 0; i < osr_buffer_capacity_; i++) {
96       CHECK_NULL(osr_buffer_[i]);
97     }
98 #endif
99     DeleteArray(osr_buffer_);
100   }
101 }
102
103
104 void OptimizingCompilerThread::Run() {
105 #ifdef DEBUG
106   { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
107     thread_id_ = ThreadId::Current().ToInteger();
108   }
109 #endif
110   DisallowHeapAllocation no_allocation;
111   DisallowHandleAllocation no_handles;
112   DisallowHandleDereference no_deref;
113
114   if (job_based_recompilation_) {
115     return;
116   }
117
118   base::ElapsedTimer total_timer;
119   if (tracing_enabled_) total_timer.Start();
120
121   while (true) {
122     input_queue_semaphore_.Wait();
123     TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
124
125     if (recompilation_delay_ != 0) {
126       base::OS::Sleep(recompilation_delay_);
127     }
128
129     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
130       case CONTINUE:
131         break;
132       case STOP:
133         if (tracing_enabled_) {
134           time_spent_total_ = total_timer.Elapsed();
135         }
136         stop_semaphore_.Signal();
137         return;
138       case FLUSH:
139         // The main thread is blocked, waiting for the stop semaphore.
140         { AllowHandleDereference allow_handle_dereference;
141           FlushInputQueue(true);
142         }
143         base::Release_Store(&stop_thread_,
144                             static_cast<base::AtomicWord>(CONTINUE));
145         stop_semaphore_.Signal();
146         // Return to start of consumer loop.
147         continue;
148     }
149
150     base::ElapsedTimer compiling_timer;
151     if (tracing_enabled_) compiling_timer.Start();
152
153     CompileNext(NextInput());
154
155     if (tracing_enabled_) {
156       time_spent_compiling_ += compiling_timer.Elapsed();
157     }
158   }
159 }
160
161
162 OptimizedCompileJob* OptimizingCompilerThread::NextInput(
163     bool check_if_flushing) {
164   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
165   if (input_queue_length_ == 0) return NULL;
166   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
167   DCHECK_NOT_NULL(job);
168   input_queue_shift_ = InputQueueIndex(1);
169   input_queue_length_--;
170   if (check_if_flushing) {
171     if (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)) != CONTINUE) {
172       if (!job->info()->is_osr()) {
173         AllowHandleDereference allow_handle_dereference;
174         DisposeOptimizedCompileJob(job, true);
175       }
176       return NULL;
177     }
178   }
179   return job;
180 }
181
182
183 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
184   if (!job) return;
185
186   // The function may have already been optimized by OSR.  Simply continue.
187   OptimizedCompileJob::Status status = job->OptimizeGraph();
188   USE(status);   // Prevent an unused-variable error in release mode.
189   DCHECK(status != OptimizedCompileJob::FAILED);
190
191   // The function may have already been optimized by OSR.  Simply continue.
192   // Use a mutex to make sure that functions marked for install
193   // are always also queued.
194   if (job_based_recompilation_) output_queue_mutex_.Lock();
195   output_queue_.Enqueue(job);
196   if (job_based_recompilation_) output_queue_mutex_.Unlock();
197   isolate_->stack_guard()->RequestInstallCode();
198 }
199
200
201 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
202   OptimizedCompileJob* job;
203   while ((job = NextInput())) {
204     DCHECK(!job_based_recompilation_);
205     // This should not block, since we have one signal on the input queue
206     // semaphore corresponding to each element in the input queue.
207     input_queue_semaphore_.Wait();
208     // OSR jobs are dealt with separately.
209     if (!job->info()->is_osr()) {
210       DisposeOptimizedCompileJob(job, restore_function_code);
211     }
212   }
213 }
214
215
216 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
217   OptimizedCompileJob* job;
218   while (output_queue_.Dequeue(&job)) {
219     // OSR jobs are dealt with separately.
220     if (!job->info()->is_osr()) {
221       DisposeOptimizedCompileJob(job, restore_function_code);
222     }
223   }
224 }
225
226
227 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
228   for (int i = 0; i < osr_buffer_capacity_; i++) {
229     if (osr_buffer_[i] != NULL) {
230       DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
231       osr_buffer_[i] = NULL;
232     }
233   }
234 }
235
236
237 void OptimizingCompilerThread::Flush() {
238   DCHECK(!IsOptimizerThread());
239   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
240   if (FLAG_block_concurrent_recompilation) Unblock();
241   if (!job_based_recompilation_) {
242     input_queue_semaphore_.Signal();
243     stop_semaphore_.Wait();
244   } else {
245     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
246     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
247     base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
248   }
249   FlushOutputQueue(true);
250   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
251   if (tracing_enabled_) {
252     PrintF("  ** Flushed concurrent recompilation queues.\n");
253   }
254 }
255
256
257 void OptimizingCompilerThread::Stop() {
258   DCHECK(!IsOptimizerThread());
259   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
260   if (FLAG_block_concurrent_recompilation) Unblock();
261   if (!job_based_recompilation_) {
262     input_queue_semaphore_.Signal();
263     stop_semaphore_.Wait();
264   } else {
265     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
266     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
267     base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
268   }
269
270   if (recompilation_delay_ != 0) {
271     // At this point the optimizing compiler thread's event loop has stopped.
272     // There is no need for a mutex when reading input_queue_length_.
273     while (input_queue_length_ > 0) CompileNext(NextInput());
274     InstallOptimizedFunctions();
275   } else {
276     FlushInputQueue(false);
277     FlushOutputQueue(false);
278   }
279
280   if (FLAG_concurrent_osr) FlushOsrBuffer(false);
281
282   if (tracing_enabled_) {
283     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
284     if (job_based_recompilation_) percentage = 100.0;
285     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
286   }
287
288   if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
289     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
290   }
291
292   Join();
293 }
294
295
296 void OptimizingCompilerThread::InstallOptimizedFunctions() {
297   DCHECK(!IsOptimizerThread());
298   HandleScope handle_scope(isolate_);
299
300   OptimizedCompileJob* job;
301   while (output_queue_.Dequeue(&job)) {
302     CompilationInfo* info = job->info();
303     Handle<JSFunction> function(*info->closure());
304     if (info->is_osr()) {
305       if (FLAG_trace_osr) {
306         PrintF("[COSR - ");
307         function->ShortPrint();
308         PrintF(" is ready for install and entry at AST id %d]\n",
309                info->osr_ast_id().ToInt());
310       }
311       job->WaitForInstall();
312       // Remove stack check that guards OSR entry on original code.
313       Handle<Code> code = info->unoptimized_code();
314       uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
315       BackEdgeTable::RemoveStackCheck(code, offset);
316     } else {
317       if (function->IsOptimized()) {
318         if (tracing_enabled_) {
319           PrintF("  ** Aborting compilation for ");
320           function->ShortPrint();
321           PrintF(" as it has already been optimized.\n");
322         }
323         DisposeOptimizedCompileJob(job, false);
324       } else {
325         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
326         function->ReplaceCode(
327             code.is_null() ? function->shared()->code() : *code);
328       }
329     }
330   }
331 }
332
333
334 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
335   DCHECK(IsQueueAvailable());
336   DCHECK(!IsOptimizerThread());
337   CompilationInfo* info = job->info();
338   if (info->is_osr()) {
339     osr_attempts_++;
340     AddToOsrBuffer(job);
341     // Add job to the front of the input queue.
342     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
343     DCHECK_LT(input_queue_length_, input_queue_capacity_);
344     // Move shift_ back by one.
345     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
346     input_queue_[InputQueueIndex(0)] = job;
347     input_queue_length_++;
348   } else {
349     // Add job to the back of the input queue.
350     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
351     DCHECK_LT(input_queue_length_, input_queue_capacity_);
352     input_queue_[InputQueueIndex(input_queue_length_)] = job;
353     input_queue_length_++;
354   }
355   if (FLAG_block_concurrent_recompilation) {
356     blocked_jobs_++;
357   } else if (job_based_recompilation_) {
358     V8::GetCurrentPlatform()->CallOnBackgroundThread(
359         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
360   } else {
361     input_queue_semaphore_.Signal();
362   }
363 }
364
365
366 void OptimizingCompilerThread::Unblock() {
367   DCHECK(!IsOptimizerThread());
368   while (blocked_jobs_ > 0) {
369     if (job_based_recompilation_) {
370       V8::GetCurrentPlatform()->CallOnBackgroundThread(
371           new CompileTask(isolate_), v8::Platform::kShortRunningTask);
372     } else {
373       input_queue_semaphore_.Signal();
374     }
375     blocked_jobs_--;
376   }
377 }
378
379
380 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
381     Handle<JSFunction> function, BailoutId osr_ast_id) {
382   DCHECK(!IsOptimizerThread());
383   for (int i = 0; i < osr_buffer_capacity_; i++) {
384     OptimizedCompileJob* current = osr_buffer_[i];
385     if (current != NULL &&
386         current->IsWaitingForInstall() &&
387         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
388       osr_hits_++;
389       osr_buffer_[i] = NULL;
390       return current;
391     }
392   }
393   return NULL;
394 }
395
396
397 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
398                                               BailoutId osr_ast_id) {
399   DCHECK(!IsOptimizerThread());
400   for (int i = 0; i < osr_buffer_capacity_; i++) {
401     OptimizedCompileJob* current = osr_buffer_[i];
402     if (current != NULL &&
403         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
404       return !current->IsWaitingForInstall();
405     }
406   }
407   return false;
408 }
409
410
411 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
412   DCHECK(!IsOptimizerThread());
413   for (int i = 0; i < osr_buffer_capacity_; i++) {
414     OptimizedCompileJob* current = osr_buffer_[i];
415     if (current != NULL && *current->info()->closure() == function) {
416       return !current->IsWaitingForInstall();
417     }
418   }
419   return false;
420 }
421
422
423 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
424   DCHECK(!IsOptimizerThread());
425   // Find the next slot that is empty or has a stale job.
426   OptimizedCompileJob* stale = NULL;
427   while (true) {
428     stale = osr_buffer_[osr_buffer_cursor_];
429     if (stale == NULL || stale->IsWaitingForInstall()) break;
430     osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
431   }
432
433   // Add to found slot and dispose the evicted job.
434   if (stale != NULL) {
435     DCHECK(stale->IsWaitingForInstall());
436     CompilationInfo* info = stale->info();
437     if (FLAG_trace_osr) {
438       PrintF("[COSR - Discarded ");
439       info->closure()->PrintName();
440       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
441     }
442     DisposeOptimizedCompileJob(stale, false);
443   }
444   osr_buffer_[osr_buffer_cursor_] = job;
445   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
446 }
447
448
449 #ifdef DEBUG
450 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
451   return isolate->concurrent_recompilation_enabled() &&
452          isolate->optimizing_compiler_thread()->IsOptimizerThread();
453 }
454
455
456 bool OptimizingCompilerThread::IsOptimizerThread() {
457   base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
458   return ThreadId::Current().ToInteger() == thread_id_;
459 }
460 #endif
461
462
463 } }  // namespace v8::internal