1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/worker_pool.h"
10 #include "base/bind.h"
11 #include "base/containers/hash_tables.h"
12 #include "base/debug/trace_event.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/synchronization/condition_variable.h"
15 #include "base/threading/simple_thread.h"
16 #include "base/threading/thread_restrictions.h"
17 #include "cc/base/scoped_ptr_deque.h"
23 WorkerPoolTask::WorkerPoolTask()
24 : did_schedule_(false),
26 did_complete_(false) {
29 WorkerPoolTask::~WorkerPoolTask() {
30 DCHECK_EQ(did_schedule_, did_complete_);
31 DCHECK(!did_run_ || did_schedule_);
32 DCHECK(!did_run_ || did_complete_);
35 void WorkerPoolTask::DidSchedule() {
36 DCHECK(!did_complete_);
40 void WorkerPoolTask::WillRun() {
41 DCHECK(did_schedule_);
42 DCHECK(!did_complete_);
46 void WorkerPoolTask::DidRun() {
50 void WorkerPoolTask::WillComplete() {
51 DCHECK(!did_complete_);
54 void WorkerPoolTask::DidComplete() {
55 DCHECK(did_schedule_);
56 DCHECK(!did_complete_);
60 bool WorkerPoolTask::HasFinishedRunning() const {
64 bool WorkerPoolTask::HasCompleted() const {
68 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
71 num_dependencies_(0) {
74 GraphNode::~GraphNode() {
77 } // namespace internal
79 // Internal to the worker pool. Any data or logic that needs to be
80 // shared between threads lives in this class. All members are guarded
82 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
84 Inner(size_t num_threads, const std::string& thread_name_prefix);
89 // Schedule running of tasks in |graph|. Tasks previously scheduled but
90 // no longer needed will be canceled unless already running. Canceled
91 // tasks are moved to |completed_tasks_| without being run. The result
92 // is that once scheduled, a task is guaranteed to end up in the
93 // |completed_tasks_| queue even if they later get canceled by another
94 // call to SetTaskGraph().
95 void SetTaskGraph(TaskGraph* graph);
97 // Collect all completed tasks in |completed_tasks|.
98 void CollectCompletedTasks(TaskVector* completed_tasks);
101 class PriorityComparator {
103 bool operator()(const internal::GraphNode* a,
104 const internal::GraphNode* b) {
105 // In this system, numerically lower priority is run first.
106 if (a->priority() != b->priority())
107 return a->priority() > b->priority();
109 // Run task with most dependents first when priority is the same.
110 return a->dependents().size() < b->dependents().size();
114 // Overridden from base::DelegateSimpleThread:
115 virtual void Run() OVERRIDE;
117 // This lock protects all members of this class except
118 // |worker_pool_on_origin_thread_|. Do not read or modify anything
119 // without holding this lock. Do not block while holding this lock.
120 mutable base::Lock lock_;
122 // Condition variable that is waited on by worker threads until new
123 // tasks are ready to run or shutdown starts.
124 base::ConditionVariable has_ready_to_run_tasks_cv_;
126 // Provides each running thread loop with a unique index. First thread
128 unsigned next_thread_index_;
130 // Set during shutdown. Tells workers to exit when no more tasks
134 // This set contains all pending tasks.
135 GraphNodeMap pending_tasks_;
137 // Ordered set of tasks that are ready to run.
138 typedef std::priority_queue<internal::GraphNode*,
139 std::vector<internal::GraphNode*>,
140 PriorityComparator> TaskQueue;
141 TaskQueue ready_to_run_tasks_;
143 // This set contains all currently running tasks.
144 GraphNodeMap running_tasks_;
146 // Completed tasks not yet collected by origin thread.
147 TaskVector completed_tasks_;
149 ScopedPtrDeque<base::DelegateSimpleThread> workers_;
151 DISALLOW_COPY_AND_ASSIGN(Inner);
154 WorkerPool::Inner::Inner(
155 size_t num_threads, const std::string& thread_name_prefix)
157 has_ready_to_run_tasks_cv_(&lock_),
158 next_thread_index_(0),
160 base::AutoLock lock(lock_);
162 while (workers_.size() < num_threads) {
163 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
164 new base::DelegateSimpleThread(
169 static_cast<unsigned>(workers_.size() + 1)).c_str()));
171 #if defined(OS_ANDROID) || defined(OS_LINUX)
172 worker->SetThreadPriority(base::kThreadPriority_Background);
174 workers_.push_back(worker.Pass());
178 WorkerPool::Inner::~Inner() {
179 base::AutoLock lock(lock_);
183 DCHECK_EQ(0u, pending_tasks_.size());
184 DCHECK_EQ(0u, ready_to_run_tasks_.size());
185 DCHECK_EQ(0u, running_tasks_.size());
186 DCHECK_EQ(0u, completed_tasks_.size());
189 void WorkerPool::Inner::Shutdown() {
191 base::AutoLock lock(lock_);
196 // Wake up a worker so it knows it should exit. This will cause all workers
197 // to exit as each will wake up another worker before exiting.
198 has_ready_to_run_tasks_cv_.Signal();
201 while (workers_.size()) {
202 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front();
203 // http://crbug.com/240453 - Join() is considered IO and will block this
204 // thread. See also http://crbug.com/239423 for further ideas.
205 base::ThreadRestrictions::ScopedAllowIO allow_io;
210 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
211 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty.
212 DCHECK(graph->empty() || !shutdown_);
214 GraphNodeMap new_pending_tasks;
215 GraphNodeMap new_running_tasks;
216 TaskQueue new_ready_to_run_tasks;
218 new_pending_tasks.swap(*graph);
221 base::AutoLock lock(lock_);
223 // First remove all completed tasks from |new_pending_tasks| and
224 // adjust number of dependencies.
225 for (TaskVector::iterator it = completed_tasks_.begin();
226 it != completed_tasks_.end(); ++it) {
227 internal::WorkerPoolTask* task = it->get();
229 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase(
232 for (internal::GraphNode::Vector::const_iterator it =
233 node->dependents().begin();
234 it != node->dependents().end(); ++it) {
235 internal::GraphNode* dependent_node = *it;
236 dependent_node->remove_dependency();
241 // Build new running task set.
242 for (GraphNodeMap::iterator it = running_tasks_.begin();
243 it != running_tasks_.end(); ++it) {
244 internal::WorkerPoolTask* task = it->first;
245 // Transfer scheduled task value from |new_pending_tasks| to
246 // |new_running_tasks| if currently running. Value must be set to
247 // NULL if |new_pending_tasks| doesn't contain task. This does
248 // the right in both cases.
249 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
252 // Build new "ready to run" tasks queue.
253 // TODO(reveman): Create this queue when building the task graph instead.
254 for (GraphNodeMap::iterator it = new_pending_tasks.begin();
255 it != new_pending_tasks.end(); ++it) {
256 internal::WorkerPoolTask* task = it->first;
258 internal::GraphNode* node = it->second;
260 // Completed tasks should not exist in |new_pending_tasks|.
261 DCHECK(!task->HasFinishedRunning());
263 // Call DidSchedule() to indicate that this task has been scheduled.
264 // Note: This is only for debugging purposes.
267 if (!node->num_dependencies())
268 new_ready_to_run_tasks.push(node);
270 // Erase the task from old pending tasks.
271 pending_tasks_.erase(task);
274 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size());
276 // The items left in |pending_tasks_| need to be canceled.
277 for (GraphNodeMap::const_iterator it = pending_tasks_.begin();
278 it != pending_tasks_.end();
280 completed_tasks_.push_back(it->first);
284 // Note: old tasks are intentionally destroyed after releasing |lock_|.
285 pending_tasks_.swap(new_pending_tasks);
286 running_tasks_.swap(new_running_tasks);
287 std::swap(ready_to_run_tasks_, new_ready_to_run_tasks);
289 // If |ready_to_run_tasks_| is empty, it means we either have
290 // running tasks, or we have no pending tasks.
291 DCHECK(!ready_to_run_tasks_.empty() ||
292 (pending_tasks_.empty() || !running_tasks_.empty()));
294 // If there is more work available, wake up worker thread.
295 if (!ready_to_run_tasks_.empty())
296 has_ready_to_run_tasks_cv_.Signal();
300 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) {
301 base::AutoLock lock(lock_);
303 DCHECK_EQ(0u, completed_tasks->size());
304 completed_tasks->swap(completed_tasks_);
307 void WorkerPool::Inner::Run() {
308 base::AutoLock lock(lock_);
310 // Get a unique thread index.
311 int thread_index = next_thread_index_++;
314 if (ready_to_run_tasks_.empty()) {
315 // Exit when shutdown is set and no more tasks are pending.
316 if (shutdown_ && pending_tasks_.empty())
319 // Wait for more tasks.
320 has_ready_to_run_tasks_cv_.Wait();
324 // Take top priority task from |ready_to_run_tasks_|.
325 scoped_refptr<internal::WorkerPoolTask> task(
326 ready_to_run_tasks_.top()->task());
327 ready_to_run_tasks_.pop();
329 // Move task from |pending_tasks_| to |running_tasks_|.
330 DCHECK(pending_tasks_.contains(task.get()));
331 DCHECK(!running_tasks_.contains(task.get()));
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get()));
334 // There may be more work available, so wake up another worker thread.
335 has_ready_to_run_tasks_cv_.Signal();
337 // Call WillRun() before releasing |lock_| and running task.
341 base::AutoUnlock unlock(lock_);
343 task->RunOnWorkerThread(thread_index);
346 // This will mark task as finished running.
349 // Now iterate over all dependents to remove dependency and check
350 // if they are ready to run.
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase(
354 for (internal::GraphNode::Vector::const_iterator it =
355 node->dependents().begin();
356 it != node->dependents().end(); ++it) {
357 internal::GraphNode* dependent_node = *it;
359 dependent_node->remove_dependency();
360 // Task is ready if it has no dependencies. Add it to
361 // |ready_to_run_tasks_|.
362 if (!dependent_node->num_dependencies())
363 ready_to_run_tasks_.push(dependent_node);
367 // Finally add task to |completed_tasks_|.
368 completed_tasks_.push_back(task);
371 // We noticed we should exit. Wake up the next worker so it knows it should
372 // exit as well (because the Shutdown() code only signals once).
373 has_ready_to_run_tasks_cv_.Signal();
376 WorkerPool::WorkerPool(size_t num_threads,
377 const std::string& thread_name_prefix)
378 : in_dispatch_completion_callbacks_(false),
379 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) {
382 WorkerPool::~WorkerPool() {
385 void WorkerPool::Shutdown() {
386 TRACE_EVENT0("cc", "WorkerPool::Shutdown");
388 DCHECK(!in_dispatch_completion_callbacks_);
393 void WorkerPool::CheckForCompletedTasks() {
394 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
396 DCHECK(!in_dispatch_completion_callbacks_);
398 TaskVector completed_tasks;
399 inner_->CollectCompletedTasks(&completed_tasks);
400 ProcessCompletedTasks(completed_tasks);
403 void WorkerPool::ProcessCompletedTasks(
404 const TaskVector& completed_tasks) {
405 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks",
406 "completed_task_count", completed_tasks.size());
408 // Worker pool instance is not reentrant while processing completed tasks.
409 in_dispatch_completion_callbacks_ = true;
411 for (TaskVector::const_iterator it = completed_tasks.begin();
412 it != completed_tasks.end();
414 internal::WorkerPoolTask* task = it->get();
416 task->WillComplete();
417 task->CompleteOnOriginThread();
421 in_dispatch_completion_callbacks_ = false;
424 void WorkerPool::SetTaskGraph(TaskGraph* graph) {
425 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph",
426 "num_tasks", graph->size());
428 DCHECK(!in_dispatch_completion_callbacks_);
430 inner_->SetTaskGraph(graph);