1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "optimizing-compiler-thread.h"
34 #include "v8threads.h"
40 void OptimizingCompilerThread::Run() {
42 { ScopedLock lock(thread_id_mutex_);
43 thread_id_ = ThreadId::Current().ToInteger();
46 Isolate::SetIsolateThreadLocals(isolate_, NULL);
47 DisallowHeapAllocation no_allocation;
48 DisallowHandleAllocation no_handles;
49 DisallowHandleDereference no_deref;
52 if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
55 input_queue_semaphore_->Wait();
56 Logger::TimerEventScope timer(
57 isolate_, Logger::TimerEventScope::v8_recompile_parallel);
59 if (FLAG_parallel_recompilation_delay != 0) {
60 OS::Sleep(FLAG_parallel_recompilation_delay);
63 switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
67 if (FLAG_trace_parallel_recompilation) {
68 time_spent_total_ = OS::Ticks() - epoch;
70 stop_semaphore_->Signal();
73 // The main thread is blocked, waiting for the stop semaphore.
74 { AllowHandleDereference allow_handle_dereference;
75 FlushInputQueue(true);
77 Release_Store(&queue_length_, static_cast<AtomicWord>(0));
78 Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
79 stop_semaphore_->Signal();
80 // Return to start of consumer loop.
84 int64_t compiling_start = 0;
85 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
89 if (FLAG_trace_parallel_recompilation) {
90 time_spent_compiling_ += OS::Ticks() - compiling_start;
96 void OptimizingCompilerThread::CompileNext() {
97 OptimizingCompiler* optimizing_compiler = NULL;
98 bool result = input_queue_.Dequeue(&optimizing_compiler);
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
103 // The function may have already been optimized by OSR. Simply continue.
104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
105 USE(status); // Prevent an unused-variable error in release mode.
106 ASSERT(status != OptimizingCompiler::FAILED);
108 // The function may have already been optimized by OSR. Simply continue.
109 // Use a mutex to make sure that functions marked for install
110 // are always also queued.
111 ScopedLock mark_and_queue(install_mutex_);
112 { Heap::RelocationLock relocation_lock(isolate_->heap());
113 AllowHandleDereference ahd;
114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
116 output_queue_.Enqueue(optimizing_compiler);
120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
121 OptimizingCompiler* optimizing_compiler;
122 // The optimizing compiler is allocated in the CompilationInfo's zone.
123 while (input_queue_.Dequeue(&optimizing_compiler)) {
124 // This should not block, since we have one signal on the input queue
125 // semaphore corresponding to each element in the input queue.
126 input_queue_semaphore_->Wait();
127 CompilationInfo* info = optimizing_compiler->info();
128 if (restore_function_code) {
129 Handle<JSFunction> function = info->closure();
130 function->ReplaceCode(function->shared()->code());
137 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
138 OptimizingCompiler* optimizing_compiler;
139 // The optimizing compiler is allocated in the CompilationInfo's zone.
140 while (output_queue_.Dequeue(&optimizing_compiler)) {
141 CompilationInfo* info = optimizing_compiler->info();
142 if (restore_function_code) {
143 Handle<JSFunction> function = info->closure();
144 function->ReplaceCode(function->shared()->code());
151 void OptimizingCompilerThread::Flush() {
152 ASSERT(!IsOptimizerThread());
153 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
154 input_queue_semaphore_->Signal();
155 stop_semaphore_->Wait();
156 FlushOutputQueue(true);
160 void OptimizingCompilerThread::Stop() {
161 ASSERT(!IsOptimizerThread());
162 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
163 input_queue_semaphore_->Signal();
164 stop_semaphore_->Wait();
166 if (FLAG_parallel_recompilation_delay != 0) {
167 // Barrier when loading queue length is not necessary since the write
168 // happens in CompileNext on the same thread.
169 // This is used only for testing.
170 while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
171 InstallOptimizedFunctions();
173 FlushInputQueue(false);
174 FlushOutputQueue(false);
177 if (FLAG_trace_parallel_recompilation) {
178 double compile_time = static_cast<double>(time_spent_compiling_);
179 double total_time = static_cast<double>(time_spent_total_);
180 double percentage = (compile_time * 100) / total_time;
181 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
188 void OptimizingCompilerThread::InstallOptimizedFunctions() {
189 ASSERT(!IsOptimizerThread());
190 HandleScope handle_scope(isolate_);
191 OptimizingCompiler* compiler;
193 { // Memory barrier to ensure marked functions are queued.
194 ScopedLock marked_and_queued(install_mutex_);
195 if (!output_queue_.Dequeue(&compiler)) return;
197 Compiler::InstallOptimizedCode(compiler);
202 void OptimizingCompilerThread::QueueForOptimization(
203 OptimizingCompiler* optimizing_compiler) {
204 ASSERT(IsQueueAvailable());
205 ASSERT(!IsOptimizerThread());
206 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
207 optimizing_compiler->info()->closure()->MarkInRecompileQueue();
208 input_queue_.Enqueue(optimizing_compiler);
209 input_queue_semaphore_->Signal();
214 bool OptimizingCompilerThread::IsOptimizerThread() {
215 if (!FLAG_parallel_recompilation) return false;
216 ScopedLock lock(thread_id_mutex_);
217 return ThreadId::Current().ToInteger() == thread_id_;
222 } } // namespace v8::internal