2 * Copyright 2012 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #ifndef SkThreadPool_DEFINED
9 #define SkThreadPool_DEFINED
11 #include "SkCondVar.h"
12 #include "SkRunnable.h"
13 #include "SkTDArray.h"
14 #include "SkTInternalLList.h"
15 #include "SkThreadUtils.h"
18 #if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID)
22 // Returns the number of cores on this machine.
23 static inline int num_cores() {
24 #if defined(SK_BUILD_FOR_WIN32)
26 GetSystemInfo(&sysinfo);
27 return sysinfo.dwNumberOfProcessors;
28 #elif defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID)
29 return (int) sysconf(_SC_NPROCESSORS_ONLN);
39 * Create a threadpool with count threads, or one thread per core if kThreadPerCore.
41 static const int kThreadPerCore = -1;
42 explicit SkTThreadPool(int count);
46 * Queues up an SkRunnable to run when a thread is available, or synchronously if count is 0.
47 * Does not take ownership. NULL is a safe no-op. If T is not void, the runnable will be passed
48 * a reference to a T on the thread's local stack.
50 void add(SkTRunnable<T>*);
53 * Same as add, but adds the runnable as the very next to run rather than enqueueing it.
55 void addNext(SkTRunnable<T>*);
58 * Block until all added SkRunnables have completed. Once called, calling add() is undefined.
63 struct LinkedRunnable {
64 SkTRunnable<T>* fRunnable; // Unowned.
65 SK_DECLARE_INTERNAL_LLIST_INTERFACE(LinkedRunnable);
69 kRunning_State, // Normal case. We've been constructed and no one has called wait().
70 kWaiting_State, // wait has been called, but there still might be work to do or being done.
71 kHalting_State, // There's no work to do and no thread is busy. All threads can shut down.
74 void addSomewhere(SkTRunnable<T>* r,
75 void (SkTInternalLList<LinkedRunnable>::*)(LinkedRunnable*));
77 SkTInternalLList<LinkedRunnable> fQueue;
79 SkTDArray<SkThread*> fThreads;
83 static void Loop(void*); // Static because we pass in this.
87 SkTThreadPool<T>::SkTThreadPool(int count) : fState(kRunning_State), fBusyThreads(0) {
91 // Create count threads, all running SkTThreadPool::Loop.
92 for (int i = 0; i < count; i++) {
93 SkThread* thread = SkNEW_ARGS(SkThread, (&SkTThreadPool::Loop, this));
94 *fThreads.append() = thread;
100 SkTThreadPool<T>::~SkTThreadPool() {
101 if (kRunning_State == fState) {
106 namespace SkThreadPoolPrivate {
108 template <typename T>
110 void run(SkTRunnable<T>* r) { r->run(data); }
115 struct ThreadLocal<void> {
116 void run(SkTRunnable<void>* r) { r->run(); }
119 } // namespace SkThreadPoolPrivate
121 template <typename T>
122 void SkTThreadPool<T>::addSomewhere(SkTRunnable<T>* r,
123 void (SkTInternalLList<LinkedRunnable>::* f)(LinkedRunnable*)) {
128 if (fThreads.isEmpty()) {
129 SkThreadPoolPrivate::ThreadLocal<T> threadLocal;
134 LinkedRunnable* linkedRunnable = SkNEW(LinkedRunnable);
135 linkedRunnable->fRunnable = r;
137 SkASSERT(fState != kHalting_State); // Shouldn't be able to add work when we're halting.
138 (fQueue.*f)(linkedRunnable);
143 template <typename T>
144 void SkTThreadPool<T>::add(SkTRunnable<T>* r) {
145 this->addSomewhere(r, &SkTInternalLList<LinkedRunnable>::addToTail);
148 template <typename T>
149 void SkTThreadPool<T>::addNext(SkTRunnable<T>* r) {
150 this->addSomewhere(r, &SkTInternalLList<LinkedRunnable>::addToHead);
154 template <typename T>
155 void SkTThreadPool<T>::wait() {
157 fState = kWaiting_State;
161 // Wait for all threads to stop.
162 for (int i = 0; i < fThreads.count(); i++) {
164 SkDELETE(fThreads[i]);
166 SkASSERT(fQueue.isEmpty());
169 template <typename T>
170 /*static*/ void SkTThreadPool<T>::Loop(void* arg) {
171 // The SkTThreadPool passes itself as arg to each thread as they're created.
172 SkTThreadPool<T>* pool = static_cast<SkTThreadPool<T>*>(arg);
173 SkThreadPoolPrivate::ThreadLocal<T> threadLocal;
176 // We have to be holding the lock to read the queue and to call wait.
178 while(pool->fQueue.isEmpty()) {
179 // Does the client want to stop and are all the threads ready to stop?
180 // If so, we move into the halting state, and whack all the threads so they notice.
181 if (kWaiting_State == pool->fState && pool->fBusyThreads == 0) {
182 pool->fState = kHalting_State;
183 pool->fReady.broadcast();
185 // Any time we find ourselves in the halting state, it's quitting time.
186 if (kHalting_State == pool->fState) {
187 pool->fReady.unlock();
190 // wait yields the lock while waiting, but will have it again when awoken.
193 // We've got the lock back here, no matter if we ran wait or not.
195 // The queue is not empty, so we have something to run. Claim it.
196 LinkedRunnable* r = pool->fQueue.head();
198 pool->fQueue.remove(r);
200 // Having claimed our SkRunnable, we now give up the lock while we run it.
201 // Otherwise, we'd only ever do work on one thread at a time, which rather
202 // defeats the point of this code.
203 pool->fBusyThreads++;
204 pool->fReady.unlock();
206 // OK, now really do the work.
207 threadLocal.run(r->fRunnable);
210 // Let everyone know we're not busy.
212 pool->fBusyThreads--;
213 pool->fReady.unlock();
216 SkASSERT(false); // Unreachable. The only exit happens when pool->fState is kHalting_State.
219 typedef SkTThreadPool<void> SkThreadPool;