1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
6 #define V8_OPTIMIZING_COMPILER_THREAD_H_
8 #include "src/base/atomicops.h"
9 #include "src/base/platform/mutex.h"
10 #include "src/base/platform/platform.h"
11 #include "src/base/platform/time.h"
12 #include "src/flags.h"
14 #include "src/unbound-queue-inl.h"
19 class HOptimizedGraphBuilder;
20 class OptimizedCompileJob;
21 class SharedFunctionInfo;
23 class OptimizingCompilerThread : public base::Thread {
25 explicit OptimizingCompilerThread(Isolate* isolate)
26 : Thread(Options("OptimizingCompilerThread")),
32 input_queue_semaphore_(0),
33 input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
34 input_queue_length_(0),
35 input_queue_shift_(0),
36 osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
37 osr_buffer_cursor_(0),
42 tracing_enabled_(FLAG_trace_concurrent_recompilation),
43 job_based_recompilation_(FLAG_job_based_recompilation),
44 recompilation_delay_(FLAG_concurrent_recompilation_delay) {
45 base::NoBarrier_Store(&stop_thread_,
46 static_cast<base::AtomicWord>(CONTINUE));
47 input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
48 if (FLAG_concurrent_osr) {
49 // Allocate and mark OSR buffer slots as empty.
50 osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
51 for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
55 ~OptimizingCompilerThread();
60 void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
62 void InstallOptimizedFunctions();
63 OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
64 BailoutId osr_ast_id);
65 bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
67 bool IsQueuedForOSR(JSFunction* function);
69 inline bool IsQueueAvailable() {
70 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
71 return input_queue_length_ < input_queue_capacity_;
74 inline void AgeBufferedOsrJobs() {
75 // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
76 // Dispose said OSR job in the latter case. Calling this on every GC
77 // should make sure that we do not hold onto stale jobs indefinitely.
81 static bool Enabled(int max_available) {
82 return (FLAG_concurrent_recompilation && max_available > 1);
86 static bool IsOptimizerThread(Isolate* isolate);
87 bool IsOptimizerThread();
93 enum StopFlag { CONTINUE, STOP, FLUSH };
95 void FlushInputQueue(bool restore_function_code);
96 void FlushOutputQueue(bool restore_function_code);
97 void FlushOsrBuffer(bool restore_function_code);
98 void CompileNext(OptimizedCompileJob* job);
99 OptimizedCompileJob* NextInput(StopFlag* flag = NULL);
101 // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
102 // Tasks evicted from the cyclic buffer are discarded.
103 void AddToOsrBuffer(OptimizedCompileJob* compiler);
105 inline int InputQueueIndex(int i) {
106 int result = (i + input_queue_shift_) % input_queue_capacity_;
107 DCHECK_LE(0, result);
108 DCHECK_LT(result, input_queue_capacity_);
114 base::Mutex thread_id_mutex_;
118 base::Semaphore stop_semaphore_;
119 base::Semaphore input_queue_semaphore_;
121 // Circular queue of incoming recompilation tasks (including OSR).
122 OptimizedCompileJob** input_queue_;
123 int input_queue_capacity_;
124 int input_queue_length_;
125 int input_queue_shift_;
126 base::Mutex input_queue_mutex_;
128 // Queue of recompilation tasks ready to be installed (excluding OSR).
129 UnboundQueue<OptimizedCompileJob*> output_queue_;
130 // Used for job based recompilation which has multiple producers on
131 // different threads.
132 base::Mutex output_queue_mutex_;
134 // Cyclic buffer of recompilation tasks for OSR.
135 OptimizedCompileJob** osr_buffer_;
136 int osr_buffer_capacity_;
137 int osr_buffer_cursor_;
139 volatile base::AtomicWord stop_thread_;
140 base::TimeDelta time_spent_compiling_;
141 base::TimeDelta time_spent_total_;
144 // TODO(jochen): This is currently a RecursiveMutex since both Flush/Stop and
145 // Unblock try to get it, but the former methods both can call Unblock. Once
146 // job based recompilation is on by default, and the dedicated thread can be
147 // removed, this should be refactored to not use a RecursiveMutex.
148 base::RecursiveMutex task_count_mutex_;
155 // Copies of FLAG_trace_concurrent_recompilation,
156 // FLAG_concurrent_recompilation_delay and
157 // FLAG_job_based_recompilation that will be used from the background thread.
159 // Since flags might get modified while the background thread is running, it
160 // is not safe to access them directly.
161 bool tracing_enabled_;
162 bool job_based_recompilation_;
163 int recompilation_delay_;
166 } } // namespace v8::internal
168 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_