1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "optimizing-compiler-thread.h"
34 #include "v8threads.h"
40 void OptimizingCompilerThread::Run() {
42 thread_id_ = ThreadId::Current().ToInteger();
44 Isolate::SetIsolateThreadLocals(isolate_, NULL);
45 DisallowHeapAllocation no_allocation;
46 DisallowHandleAllocation no_handles;
47 DisallowHandleDereference no_deref;
50 if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
53 input_queue_semaphore_->Wait();
54 Logger::TimerEventScope timer(
55 isolate_, Logger::TimerEventScope::v8_recompile_parallel);
57 if (FLAG_parallel_recompilation_delay != 0) {
58 OS::Sleep(FLAG_parallel_recompilation_delay);
61 if (Acquire_Load(&stop_thread_)) {
62 stop_semaphore_->Signal();
63 if (FLAG_trace_parallel_recompilation) {
64 time_spent_total_ = OS::Ticks() - epoch;
69 int64_t compiling_start = 0;
70 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
74 if (FLAG_trace_parallel_recompilation) {
75 time_spent_compiling_ += OS::Ticks() - compiling_start;
81 void OptimizingCompilerThread::CompileNext() {
82 OptimizingCompiler* optimizing_compiler = NULL;
83 input_queue_.Dequeue(&optimizing_compiler);
84 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
86 // The function may have already been optimized by OSR. Simply continue.
87 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
88 USE(status); // Prevent an unused-variable error in release mode.
89 ASSERT(status != OptimizingCompiler::FAILED);
91 // The function may have already been optimized by OSR. Simply continue.
92 // Mark it for installing before queuing so that we can be sure of the write
93 // order: marking first and (after being queued) installing code second.
94 { Heap::RelocationLock relocation_lock(isolate_->heap());
95 AllowHandleDereference ahd;
96 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
98 output_queue_.Enqueue(optimizing_compiler);
102 void OptimizingCompilerThread::Stop() {
103 ASSERT(!IsOptimizerThread());
104 Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
105 input_queue_semaphore_->Signal();
106 stop_semaphore_->Wait();
108 if (FLAG_parallel_recompilation_delay != 0) {
109 InstallOptimizedFunctions();
110 // Barrier when loading queue length is not necessary since the write
111 // happens in CompileNext on the same thread.
112 while (NoBarrier_Load(&queue_length_) > 0) {
114 InstallOptimizedFunctions();
118 if (FLAG_trace_parallel_recompilation) {
119 double compile_time = static_cast<double>(time_spent_compiling_);
120 double total_time = static_cast<double>(time_spent_total_);
121 double percentage = (compile_time * 100) / total_time;
122 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
127 void OptimizingCompilerThread::InstallOptimizedFunctions() {
128 ASSERT(!IsOptimizerThread());
129 HandleScope handle_scope(isolate_);
130 int functions_installed = 0;
131 while (!output_queue_.IsEmpty()) {
132 OptimizingCompiler* compiler;
133 output_queue_.Dequeue(&compiler);
134 Compiler::InstallOptimizedCode(compiler);
135 functions_installed++;
140 void OptimizingCompilerThread::QueueForOptimization(
141 OptimizingCompiler* optimizing_compiler) {
142 ASSERT(IsQueueAvailable());
143 ASSERT(!IsOptimizerThread());
144 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
145 optimizing_compiler->info()->closure()->MarkInRecompileQueue();
146 input_queue_.Enqueue(optimizing_compiler);
147 input_queue_semaphore_->Signal();
152 bool OptimizingCompilerThread::IsOptimizerThread() {
153 if (!FLAG_parallel_recompilation) return false;
154 return ThreadId::Current().ToInteger() == thread_id_;
159 } } // namespace v8::internal