1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the maintainence of threads in response to team
29 creation and termination. */
40 /* This array manages threads spawned from the top level, which will
41 return to the idle loop once the current PARALLEL construct ends. */
42 static struct gomp_thread **gomp_threads;
43 static unsigned gomp_threads_size;
44 static unsigned gomp_threads_used;
46 /* This attribute contains PTHREAD_CREATE_DETACHED. */
47 static pthread_attr_t gomp_thread_attr;
49 /* This barrier holds and releases threads waiting in gomp_threads. */
50 static gomp_barrier_t gomp_threads_dock;
52 /* This is the libgomp per-thread data structure. */
54 __thread struct gomp_thread gomp_tls_data;
56 pthread_key_t gomp_tls_key;
60 /* This structure is used to communicate across pthread_create. */
62 struct gomp_thread_start_data
64 struct gomp_team_state ts;
71 /* This function is a pthread_create entry point. This contains the idle
72 loop in which a thread waits to be called up to become part of a team. */
75 gomp_thread_start (void *xdata)
77 struct gomp_thread_start_data *data = xdata;
78 struct gomp_thread *thr;
79 void (*local_fn) (void *);
85 struct gomp_thread local_thr;
87 pthread_setspecific (gomp_tls_key, thr);
89 gomp_sem_init (&thr->release, 0);
91 /* Extract what we need from data. */
93 local_data = data->fn_data;
96 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
100 gomp_barrier_wait (&thr->ts.team->barrier);
101 local_fn (local_data);
102 gomp_barrier_wait (&thr->ts.team->barrier);
106 gomp_threads[thr->ts.team_id] = thr;
108 gomp_barrier_wait (&gomp_threads_dock);
111 struct gomp_team *team;
113 local_fn (local_data);
115 /* Clear out the team and function data. This is a debugging
116 signal that we're in fact back in the dock. */
121 thr->ts.work_share = NULL;
123 thr->ts.work_share_generation = 0;
124 thr->ts.static_trip = 0;
126 gomp_barrier_wait (&team->barrier);
127 gomp_barrier_wait (&gomp_threads_dock);
130 local_data = thr->data;
139 /* Create a new team data structure. */
141 static struct gomp_team *
142 new_team (unsigned nthreads, struct gomp_work_share *work_share)
144 struct gomp_team *team;
147 size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
148 team = gomp_malloc (size);
149 gomp_mutex_init (&team->work_share_lock);
151 team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
152 team->generation_mask = 3;
153 team->oldest_live_gen = work_share == NULL;
154 team->num_live_gen = work_share != NULL;
155 team->work_shares[0] = work_share;
157 team->nthreads = nthreads;
158 gomp_barrier_init (&team->barrier, nthreads);
160 gomp_sem_init (&team->master_release, 0);
161 team->ordered_release[0] = &team->master_release;
167 /* Free a team data structure. */
170 free_team (struct gomp_team *team)
172 free (team->work_shares);
173 gomp_mutex_destroy (&team->work_share_lock);
174 gomp_barrier_destroy (&team->barrier);
175 gomp_sem_destroy (&team->master_release);
183 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
184 struct gomp_work_share *work_share)
186 struct gomp_thread_start_data *start_data;
187 struct gomp_thread *thr, *nthr;
188 struct gomp_team *team;
190 unsigned i, n, old_threads_used = 0;
192 thr = gomp_thread ();
193 nested = thr->ts.team != NULL;
195 team = new_team (nthreads, work_share);
197 /* Always save the previous state, even if this isn't a nested team.
198 In particular, we should save any work share state from an outer
199 orphaned work share construct. */
200 team->prev_ts = thr->ts;
203 thr->ts.work_share = work_share;
205 thr->ts.work_share_generation = 0;
206 thr->ts.static_trip = 0;
213 /* We only allow the reuse of idle threads for non-nested PARALLEL
214 regions. This appears to be implied by the semantics of
215 threadprivate variables, but perhaps that's reading too much into
216 things. Certainly it does prevent any locking problems, since
217 only the initial program thread will modify gomp_threads. */
220 old_threads_used = gomp_threads_used;
222 if (nthreads <= old_threads_used)
224 else if (old_threads_used == 0)
227 gomp_barrier_init (&gomp_threads_dock, nthreads);
231 n = old_threads_used;
233 /* Increase the barrier threshold to make sure all new
234 threads arrive before the team is released. */
235 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
238 /* Not true yet, but soon will be. We're going to release all
239 threads from the dock, and those that aren't part of the
241 gomp_threads_used = nthreads;
243 /* Release existing idle threads. */
246 nthr = gomp_threads[i];
247 nthr->ts.team = team;
248 nthr->ts.work_share = work_share;
249 nthr->ts.team_id = i;
250 nthr->ts.work_share_generation = 0;
251 nthr->ts.static_trip = 0;
254 team->ordered_release[i] = &nthr->release;
260 /* If necessary, expand the size of the gomp_threads array. It is
261 expected that changes in the number of threads is rare, thus we
262 make no effort to expand gomp_threads_size geometrically. */
263 if (nthreads >= gomp_threads_size)
265 gomp_threads_size = nthreads + 1;
267 = gomp_realloc (gomp_threads,
269 * sizeof (struct gomp_thread_data *));
273 start_data = alloca (sizeof (struct gomp_thread_start_data) * (nthreads-i));
275 /* Launch new threads. */
276 for (; i < nthreads; ++i, ++start_data)
281 start_data->ts.team = team;
282 start_data->ts.work_share = work_share;
283 start_data->ts.team_id = i;
284 start_data->ts.work_share_generation = 0;
285 start_data->ts.static_trip = 0;
287 start_data->fn_data = data;
288 start_data->nested = nested;
290 err = pthread_create (&pt, &gomp_thread_attr,
291 gomp_thread_start, start_data);
293 gomp_fatal ("Thread creation failed: %s", strerror (err));
297 gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
299 /* Decrease the barrier threshold to match the number of threads
300 that should arrive back at the end of this team. The extra
301 threads should be exiting. Note that we arrange for this test
302 to never be true for nested teams. */
303 if (nthreads < old_threads_used)
304 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
308 /* Terminate the current team. This is only to be called by the master
309 thread. We assume that we must wait for the other threads. */
314 struct gomp_thread *thr = gomp_thread ();
315 struct gomp_team *team = thr->ts.team;
317 gomp_barrier_wait (&team->barrier);
319 thr->ts = team->prev_ts;
325 /* Constructors for this file. */
327 static void __attribute__((constructor))
328 initialize_team (void)
330 struct gomp_thread *thr;
333 static struct gomp_thread initial_thread_tls_data;
335 pthread_key_create (&gomp_tls_key, NULL);
336 pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
340 thr = &gomp_tls_data;
342 thr = &initial_thread_tls_data;
344 gomp_sem_init (&thr->release, 0);
346 pthread_attr_init (&gomp_thread_attr);
347 pthread_attr_setdetachstate (&gomp_thread_attr, PTHREAD_CREATE_DETACHED);