1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "optimizing-compiler-thread.h"
34 #include "v8threads.h"
40 void OptimizingCompilerThread::Run() {
42 thread_id_ = ThreadId::Current().ToInteger();
44 Isolate::SetIsolateThreadLocals(isolate_, NULL);
47 if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
50 input_queue_semaphore_->Wait();
51 Logger::TimerEventScope timer(
52 isolate_, Logger::TimerEventScope::v8_recompile_parallel);
54 if (FLAG_parallel_recompilation_delay != 0) {
55 OS::Sleep(FLAG_parallel_recompilation_delay);
58 if (Acquire_Load(&stop_thread_)) {
59 stop_semaphore_->Signal();
60 if (FLAG_trace_parallel_recompilation) {
61 time_spent_total_ = OS::Ticks() - epoch;
66 int64_t compiling_start = 0;
67 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
71 if (FLAG_trace_parallel_recompilation) {
72 time_spent_compiling_ += OS::Ticks() - compiling_start;
78 void OptimizingCompilerThread::CompileNext() {
79 OptimizingCompiler* optimizing_compiler = NULL;
80 input_queue_.Dequeue(&optimizing_compiler);
81 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
83 // The function may have already been optimized by OSR. Simply continue.
84 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
85 USE(status); // Prevent an unused-variable error in release mode.
86 ASSERT(status != OptimizingCompiler::FAILED);
88 // The function may have already been optimized by OSR. Simply continue.
89 // Mark it for installing before queuing so that we can be sure of the write
90 // order: marking first and (after being queued) installing code second.
91 { Heap::RelocationLock relocation_lock(isolate_->heap());
92 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
94 output_queue_.Enqueue(optimizing_compiler);
98 void OptimizingCompilerThread::Stop() {
99 ASSERT(!IsOptimizerThread());
100 Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
101 input_queue_semaphore_->Signal();
102 stop_semaphore_->Wait();
104 if (FLAG_parallel_recompilation_delay != 0) {
105 InstallOptimizedFunctions();
106 // Barrier when loading queue length is not necessary since the write
107 // happens in CompileNext on the same thread.
108 while (NoBarrier_Load(&queue_length_) > 0) {
110 InstallOptimizedFunctions();
114 if (FLAG_trace_parallel_recompilation) {
115 double compile_time = static_cast<double>(time_spent_compiling_);
116 double total_time = static_cast<double>(time_spent_total_);
117 double percentage = (compile_time * 100) / total_time;
118 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
123 void OptimizingCompilerThread::InstallOptimizedFunctions() {
124 ASSERT(!IsOptimizerThread());
125 HandleScope handle_scope(isolate_);
126 int functions_installed = 0;
127 while (!output_queue_.IsEmpty()) {
128 OptimizingCompiler* compiler;
129 output_queue_.Dequeue(&compiler);
130 Compiler::InstallOptimizedCode(compiler);
131 functions_installed++;
136 void OptimizingCompilerThread::QueueForOptimization(
137 OptimizingCompiler* optimizing_compiler) {
138 ASSERT(IsQueueAvailable());
139 ASSERT(!IsOptimizerThread());
140 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
141 optimizing_compiler->info()->closure()->MarkInRecompileQueue();
142 input_queue_.Enqueue(optimizing_compiler);
143 input_queue_semaphore_->Signal();
148 bool OptimizingCompilerThread::IsOptimizerThread() {
149 if (!FLAG_parallel_recompilation) return false;
150 return ThreadId::Current().ToInteger() == thread_id_;
155 } } // namespace v8::internal