1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * SPDX-License-Identifier: LGPL-2.1-or-later
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
25 * file for a list of people on the GLib Team. See the ChangeLog
26 * files for a list of changes. These files are distributed with
27 * GLib at ftp://ftp.gtk.org/pub/gtk/.
30 /* The GMutex, GCond and GPrivate implementations in this file are some
31 * of the lowest-level code in GLib. All other parts of GLib (messages,
32 * memory, slices, etc) assume that they can freely use these facilities
33 * without risking recursion.
35 * As such, these functions are NOT permitted to call any other part of
38 * The thread manipulation functions (create, exit, join, etc.) have
39 * more freedom -- they can do as they please.
47 #include "gmessages.h"
49 #include "gstrfuncs.h"
50 #include "gtestutils.h"
51 #include "gthreadprivate.h"
63 #ifdef HAVE_PTHREAD_SET_NAME_NP
64 #include <pthread_np.h>
73 #if defined(HAVE_SYS_SCHED_GETATTR)
74 #include <sys/syscall.h>
77 #if (defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)) && \
78 (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST))
79 #define USE_NATIVE_MUTEX
83 g_thread_abort (gint status,
84 const gchar *function)
86 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
87 function, strerror (status));
93 #if !defined(USE_NATIVE_MUTEX)
95 static pthread_mutex_t *
96 g_mutex_impl_new (void)
98 pthread_mutexattr_t *pattr = NULL;
99 pthread_mutex_t *mutex;
101 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
102 pthread_mutexattr_t attr;
105 mutex = malloc (sizeof (pthread_mutex_t));
106 if G_UNLIKELY (mutex == NULL)
107 g_thread_abort (errno, "malloc");
109 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
110 pthread_mutexattr_init (&attr);
111 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
115 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
116 g_thread_abort (status, "pthread_mutex_init");
118 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
119 pthread_mutexattr_destroy (&attr);
126 g_mutex_impl_free (pthread_mutex_t *mutex)
128 pthread_mutex_destroy (mutex);
132 static inline pthread_mutex_t *
133 g_mutex_get_impl (GMutex *mutex)
135 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
137 if G_UNLIKELY (impl == NULL)
139 impl = g_mutex_impl_new ();
140 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
141 g_mutex_impl_free (impl);
151 * @mutex: an uninitialized #GMutex
153 * Initializes a #GMutex so that it can be used.
155 * This function is useful to initialize a mutex that has been
156 * allocated on the stack, or as part of a larger structure.
157 * It is not necessary to initialize a mutex that has been
158 * statically allocated.
160 * |[<!-- language="C" -->
168 * b = g_new (Blob, 1);
169 * g_mutex_init (&b->m);
172 * To undo the effect of g_mutex_init() when a mutex is no longer
173 * needed, use g_mutex_clear().
175 * Calling g_mutex_init() on an already initialized #GMutex leads
176 * to undefined behaviour.
181 g_mutex_init (GMutex *mutex)
183 mutex->p = g_mutex_impl_new ();
188 * @mutex: an initialized #GMutex
190 * Frees the resources allocated to a mutex with g_mutex_init().
192 * This function should not be used with a #GMutex that has been
193 * statically allocated.
195 * Calling g_mutex_clear() on a locked mutex leads to undefined
201 g_mutex_clear (GMutex *mutex)
203 g_mutex_impl_free (mutex->p);
210 * Locks @mutex. If @mutex is already locked by another thread, the
211 * current thread will block until @mutex is unlocked by the other
214 * #GMutex is neither guaranteed to be recursive nor to be
215 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
216 * already been locked by the same thread results in undefined behaviour
217 * (including but not limited to deadlocks).
220 g_mutex_lock (GMutex *mutex)
224 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
225 g_thread_abort (status, "pthread_mutex_lock");
232 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
233 * call for @mutex, it will become unblocked and can lock @mutex itself.
235 * Calling g_mutex_unlock() on a mutex that is not locked by the
236 * current thread leads to undefined behaviour.
239 g_mutex_unlock (GMutex *mutex)
243 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
244 g_thread_abort (status, "pthread_mutex_unlock");
251 * Tries to lock @mutex. If @mutex is already locked by another thread,
252 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
255 * #GMutex is neither guaranteed to be recursive nor to be
256 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
257 * already been locked by the same thread results in undefined behaviour
258 * (including but not limited to deadlocks or arbitrary return values).
260 * Returns: %TRUE if @mutex could be locked
263 g_mutex_trylock (GMutex *mutex)
267 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
270 if G_UNLIKELY (status != EBUSY)
271 g_thread_abort (status, "pthread_mutex_trylock");
276 #endif /* !defined(USE_NATIVE_MUTEX) */
280 static pthread_mutex_t *
281 g_rec_mutex_impl_new (void)
283 pthread_mutexattr_t attr;
284 pthread_mutex_t *mutex;
286 mutex = malloc (sizeof (pthread_mutex_t));
287 if G_UNLIKELY (mutex == NULL)
288 g_thread_abort (errno, "malloc");
290 pthread_mutexattr_init (&attr);
291 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
292 pthread_mutex_init (mutex, &attr);
293 pthread_mutexattr_destroy (&attr);
299 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
301 pthread_mutex_destroy (mutex);
305 static inline pthread_mutex_t *
306 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
308 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
310 if G_UNLIKELY (impl == NULL)
312 impl = g_rec_mutex_impl_new ();
313 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
314 g_rec_mutex_impl_free (impl);
323 * @rec_mutex: an uninitialized #GRecMutex
325 * Initializes a #GRecMutex so that it can be used.
327 * This function is useful to initialize a recursive mutex
328 * that has been allocated on the stack, or as part of a larger
331 * It is not necessary to initialise a recursive mutex that has been
332 * statically allocated.
334 * |[<!-- language="C" -->
342 * b = g_new (Blob, 1);
343 * g_rec_mutex_init (&b->m);
346 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
347 * leads to undefined behaviour.
349 * To undo the effect of g_rec_mutex_init() when a recursive mutex
350 * is no longer needed, use g_rec_mutex_clear().
355 g_rec_mutex_init (GRecMutex *rec_mutex)
357 rec_mutex->p = g_rec_mutex_impl_new ();
362 * @rec_mutex: an initialized #GRecMutex
364 * Frees the resources allocated to a recursive mutex with
365 * g_rec_mutex_init().
367 * This function should not be used with a #GRecMutex that has been
368 * statically allocated.
370 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
371 * to undefined behaviour.
376 g_rec_mutex_clear (GRecMutex *rec_mutex)
378 g_rec_mutex_impl_free (rec_mutex->p);
383 * @rec_mutex: a #GRecMutex
385 * Locks @rec_mutex. If @rec_mutex is already locked by another
386 * thread, the current thread will block until @rec_mutex is
387 * unlocked by the other thread. If @rec_mutex is already locked
388 * by the current thread, the 'lock count' of @rec_mutex is increased.
389 * The mutex will only become available again when it is unlocked
390 * as many times as it has been locked.
395 g_rec_mutex_lock (GRecMutex *mutex)
397 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
401 * g_rec_mutex_unlock:
402 * @rec_mutex: a #GRecMutex
404 * Unlocks @rec_mutex. If another thread is blocked in a
405 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
406 * and can lock @rec_mutex itself.
408 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
409 * locked by the current thread leads to undefined behaviour.
414 g_rec_mutex_unlock (GRecMutex *rec_mutex)
416 pthread_mutex_unlock (rec_mutex->p);
420 * g_rec_mutex_trylock:
421 * @rec_mutex: a #GRecMutex
423 * Tries to lock @rec_mutex. If @rec_mutex is already locked
424 * by another thread, it immediately returns %FALSE. Otherwise
425 * it locks @rec_mutex and returns %TRUE.
427 * Returns: %TRUE if @rec_mutex could be locked
432 g_rec_mutex_trylock (GRecMutex *rec_mutex)
434 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
442 static pthread_rwlock_t *
443 g_rw_lock_impl_new (void)
445 pthread_rwlock_t *rwlock;
448 rwlock = malloc (sizeof (pthread_rwlock_t));
449 if G_UNLIKELY (rwlock == NULL)
450 g_thread_abort (errno, "malloc");
452 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
453 g_thread_abort (status, "pthread_rwlock_init");
459 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
461 pthread_rwlock_destroy (rwlock);
465 static inline pthread_rwlock_t *
466 g_rw_lock_get_impl (GRWLock *lock)
468 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
470 if G_UNLIKELY (impl == NULL)
472 impl = g_rw_lock_impl_new ();
473 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
474 g_rw_lock_impl_free (impl);
483 * @rw_lock: an uninitialized #GRWLock
485 * Initializes a #GRWLock so that it can be used.
487 * This function is useful to initialize a lock that has been
488 * allocated on the stack, or as part of a larger structure. It is not
489 * necessary to initialise a reader-writer lock that has been statically
492 * |[<!-- language="C" -->
500 * b = g_new (Blob, 1);
501 * g_rw_lock_init (&b->l);
504 * To undo the effect of g_rw_lock_init() when a lock is no longer
505 * needed, use g_rw_lock_clear().
507 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
508 * to undefined behaviour.
513 g_rw_lock_init (GRWLock *rw_lock)
515 rw_lock->p = g_rw_lock_impl_new ();
520 * @rw_lock: an initialized #GRWLock
522 * Frees the resources allocated to a lock with g_rw_lock_init().
524 * This function should not be used with a #GRWLock that has been
525 * statically allocated.
527 * Calling g_rw_lock_clear() when any thread holds the lock
528 * leads to undefined behaviour.
533 g_rw_lock_clear (GRWLock *rw_lock)
535 g_rw_lock_impl_free (rw_lock->p);
539 * g_rw_lock_writer_lock:
540 * @rw_lock: a #GRWLock
542 * Obtain a write lock on @rw_lock. If another thread currently holds
543 * a read or write lock on @rw_lock, the current thread will block
544 * until all other threads have dropped their locks on @rw_lock.
546 * Calling g_rw_lock_writer_lock() while the current thread already
547 * owns a read or write lock on @rw_lock leads to undefined behaviour.
552 g_rw_lock_writer_lock (GRWLock *rw_lock)
554 int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
557 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
561 * g_rw_lock_writer_trylock:
562 * @rw_lock: a #GRWLock
564 * Tries to obtain a write lock on @rw_lock. If another thread
565 * currently holds a read or write lock on @rw_lock, it immediately
567 * Otherwise it locks @rw_lock and returns %TRUE.
569 * Returns: %TRUE if @rw_lock could be locked
574 g_rw_lock_writer_trylock (GRWLock *rw_lock)
576 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
583 * g_rw_lock_writer_unlock:
584 * @rw_lock: a #GRWLock
586 * Release a write lock on @rw_lock.
588 * Calling g_rw_lock_writer_unlock() on a lock that is not held
589 * by the current thread leads to undefined behaviour.
594 g_rw_lock_writer_unlock (GRWLock *rw_lock)
596 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
600 * g_rw_lock_reader_lock:
601 * @rw_lock: a #GRWLock
603 * Obtain a read lock on @rw_lock. If another thread currently holds
604 * the write lock on @rw_lock, the current thread will block until the
605 * write lock was (held and) released. If another thread does not hold
606 * the write lock, but is waiting for it, it is implementation defined
607 * whether the reader or writer will block. Read locks can be taken
610 * Calling g_rw_lock_reader_lock() while the current thread already
611 * owns a write lock leads to undefined behaviour. Read locks however
612 * can be taken recursively, in which case you need to make sure to
613 * call g_rw_lock_reader_unlock() the same amount of times.
615 * It is implementation-defined how many read locks are allowed to be
616 * held on the same lock simultaneously. If the limit is hit,
617 * or if a deadlock is detected, a critical warning will be emitted.
622 g_rw_lock_reader_lock (GRWLock *rw_lock)
624 int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
627 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
631 * g_rw_lock_reader_trylock:
632 * @rw_lock: a #GRWLock
634 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
635 * the read lock was successfully obtained. Otherwise it
638 * Returns: %TRUE if @rw_lock could be locked
643 g_rw_lock_reader_trylock (GRWLock *rw_lock)
645 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
652 * g_rw_lock_reader_unlock:
653 * @rw_lock: a #GRWLock
655 * Release a read lock on @rw_lock.
657 * Calling g_rw_lock_reader_unlock() on a lock that is not held
658 * by the current thread leads to undefined behaviour.
663 g_rw_lock_reader_unlock (GRWLock *rw_lock)
665 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
670 #if !defined(USE_NATIVE_MUTEX)
672 static pthread_cond_t *
673 g_cond_impl_new (void)
675 pthread_condattr_t attr;
676 pthread_cond_t *cond;
679 pthread_condattr_init (&attr);
681 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
682 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
683 if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
684 g_thread_abort (status, "pthread_condattr_setclock");
686 #error Cannot support GCond on your platform.
689 cond = malloc (sizeof (pthread_cond_t));
690 if G_UNLIKELY (cond == NULL)
691 g_thread_abort (errno, "malloc");
693 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
694 g_thread_abort (status, "pthread_cond_init");
696 pthread_condattr_destroy (&attr);
702 g_cond_impl_free (pthread_cond_t *cond)
704 pthread_cond_destroy (cond);
708 static inline pthread_cond_t *
709 g_cond_get_impl (GCond *cond)
711 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
713 if G_UNLIKELY (impl == NULL)
715 impl = g_cond_impl_new ();
716 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
717 g_cond_impl_free (impl);
726 * @cond: an uninitialized #GCond
728 * Initialises a #GCond so that it can be used.
730 * This function is useful to initialise a #GCond that has been
731 * allocated as part of a larger structure. It is not necessary to
732 * initialise a #GCond that has been statically allocated.
734 * To undo the effect of g_cond_init() when a #GCond is no longer
735 * needed, use g_cond_clear().
737 * Calling g_cond_init() on an already-initialised #GCond leads
738 * to undefined behaviour.
743 g_cond_init (GCond *cond)
745 cond->p = g_cond_impl_new ();
750 * @cond: an initialised #GCond
752 * Frees the resources allocated to a #GCond with g_cond_init().
754 * This function should not be used with a #GCond that has been
755 * statically allocated.
757 * Calling g_cond_clear() for a #GCond on which threads are
758 * blocking leads to undefined behaviour.
763 g_cond_clear (GCond *cond)
765 g_cond_impl_free (cond->p);
771 * @mutex: a #GMutex that is currently locked
773 * Atomically releases @mutex and waits until @cond is signalled.
774 * When this function returns, @mutex is locked again and owned by the
777 * When using condition variables, it is possible that a spurious wakeup
778 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
779 * not called). It's also possible that a stolen wakeup may occur.
780 * This is when g_cond_signal() is called, but another thread acquires
781 * @mutex before this thread and modifies the state of the program in
782 * such a way that when g_cond_wait() is able to return, the expected
783 * condition is no longer met.
785 * For this reason, g_cond_wait() must always be used in a loop. See
786 * the documentation for #GCond for a complete example.
789 g_cond_wait (GCond *cond,
794 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
795 g_thread_abort (status, "pthread_cond_wait");
802 * If threads are waiting for @cond, at least one of them is unblocked.
803 * If no threads are waiting for @cond, this function has no effect.
804 * It is good practice to hold the same lock as the waiting thread
805 * while calling this function, though not required.
808 g_cond_signal (GCond *cond)
812 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
813 g_thread_abort (status, "pthread_cond_signal");
820 * If threads are waiting for @cond, all of them are unblocked.
821 * If no threads are waiting for @cond, this function has no effect.
822 * It is good practice to lock the same mutex as the waiting threads
823 * while calling this function, though not required.
826 g_cond_broadcast (GCond *cond)
830 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
831 g_thread_abort (status, "pthread_cond_broadcast");
837 * @mutex: a #GMutex that is currently locked
838 * @end_time: the monotonic time to wait until
840 * Waits until either @cond is signalled or @end_time has passed.
842 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
843 * could occur. For that reason, waiting on a condition variable should
844 * always be in a loop, based on an explicitly-checked predicate.
846 * %TRUE is returned if the condition variable was signalled (or in the
847 * case of a spurious wakeup). %FALSE is returned if @end_time has
850 * The following code shows how to correctly perform a timed wait on a
851 * condition variable (extending the example presented in the
852 * documentation for #GCond):
854 * |[<!-- language="C" -->
856 * pop_data_timed (void)
861 * g_mutex_lock (&data_mutex);
863 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
864 * while (!current_data)
865 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
867 * // timeout has passed.
868 * g_mutex_unlock (&data_mutex);
872 * // there is data for us
873 * data = current_data;
874 * current_data = NULL;
876 * g_mutex_unlock (&data_mutex);
882 * Notice that the end time is calculated once, before entering the
883 * loop and reused. This is the motivation behind the use of absolute
884 * time on this API -- if a relative time of 5 seconds were passed
885 * directly to the call and a spurious wakeup occurred, the program would
886 * have to start over waiting again (which would lead to a total wait
887 * time of more than 5 seconds).
889 * Returns: %TRUE on a signal, %FALSE on a timeout
893 g_cond_wait_until (GCond *cond,
900 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
901 /* end_time is given relative to the monotonic clock as returned by
902 * g_get_monotonic_time().
904 * Since this pthreads wants the relative time, convert it back again.
907 gint64 now = g_get_monotonic_time ();
913 relative = end_time - now;
915 ts.tv_sec = relative / 1000000;
916 ts.tv_nsec = (relative % 1000000) * 1000;
918 if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
921 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
922 /* This is the exact check we used during init to set the clock to
923 * monotonic, so if we're in this branch, timedwait() will already be
924 * expecting a monotonic clock.
927 ts.tv_sec = end_time / 1000000;
928 ts.tv_nsec = (end_time % 1000000) * 1000;
930 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
934 #error Cannot support GCond on your platform.
937 if G_UNLIKELY (status != ETIMEDOUT)
938 g_thread_abort (status, "pthread_cond_timedwait");
943 #endif /* defined(USE_NATIVE_MUTEX) */
950 * The #GPrivate struct is an opaque data structure to represent a
951 * thread-local data key. It is approximately equivalent to the
952 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
953 * TlsSetValue()/TlsGetValue() on Windows.
955 * If you don't already know why you might want this functionality,
956 * then you probably don't need it.
958 * #GPrivate is a very limited resource (as far as 128 per program,
959 * shared between all libraries). It is also not possible to destroy a
960 * #GPrivate after it has been used. As such, it is only ever acceptable
961 * to use #GPrivate in static scope, and even then sparingly so.
963 * See G_PRIVATE_INIT() for a couple of examples.
965 * The #GPrivate structure should be considered opaque. It should only
966 * be accessed via the g_private_ functions.
971 * @notify: a #GDestroyNotify
973 * A macro to assist with the static initialisation of a #GPrivate.
975 * This macro is useful for the case that a #GDestroyNotify function
976 * should be associated with the key. This is needed when the key will be
977 * used to point at memory that should be deallocated when the thread
980 * Additionally, the #GDestroyNotify will also be called on the previous
981 * value stored in the key when g_private_replace() is used.
983 * If no #GDestroyNotify is needed, then use of this macro is not
984 * required -- if the #GPrivate is declared in static scope then it will
985 * be properly initialised by default (ie: to all zeros). See the
988 * |[<!-- language="C" -->
989 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
991 * // return value should not be freed
993 * get_local_name (void)
995 * return g_private_get (&name_key);
999 * set_local_name (const gchar *name)
1001 * g_private_replace (&name_key, g_strdup (name));
1005 * static GPrivate count_key; // no free function
1008 * get_local_count (void)
1010 * return GPOINTER_TO_INT (g_private_get (&count_key));
1014 * set_local_count (gint count)
1016 * g_private_set (&count_key, GINT_TO_POINTER (count));
1023 static pthread_key_t *
1024 g_private_impl_new (GDestroyNotify notify)
1029 key = malloc (sizeof (pthread_key_t));
1030 if G_UNLIKELY (key == NULL)
1031 g_thread_abort (errno, "malloc");
1032 status = pthread_key_create (key, notify);
1033 if G_UNLIKELY (status != 0)
1034 g_thread_abort (status, "pthread_key_create");
1040 g_private_impl_free (pthread_key_t *key)
1044 status = pthread_key_delete (*key);
1045 if G_UNLIKELY (status != 0)
1046 g_thread_abort (status, "pthread_key_delete");
1051 g_private_impl_new_direct (GDestroyNotify notify)
1053 gpointer impl = (void *) (gssize) -1;
1057 status = pthread_key_create (&key, notify);
1058 if G_UNLIKELY (status != 0)
1059 g_thread_abort (status, "pthread_key_create");
1061 memcpy (&impl, &key, sizeof (pthread_key_t));
1063 /* pthread_key_create could theoretically put a NULL value into key.
1064 * If that happens, waste the result and create a new one, since we
1065 * use NULL to mean "not yet allocated".
1067 * This will only happen once per program run.
1069 * We completely avoid this problem for the case where pthread_key_t
1070 * is smaller than void* (for example, on 64 bit Linux) by putting
1071 * some high bits in the value of 'impl' to start with. Since we only
1072 * overwrite part of the pointer, we will never end up with NULL.
1074 if (sizeof (pthread_key_t) == sizeof (gpointer))
1076 if G_UNLIKELY (impl == NULL)
1078 status = pthread_key_create (&key, notify);
1079 if G_UNLIKELY (status != 0)
1080 g_thread_abort (status, "pthread_key_create");
1082 memcpy (&impl, &key, sizeof (pthread_key_t));
1084 if G_UNLIKELY (impl == NULL)
1085 g_thread_abort (status, "pthread_key_create (gave NULL result twice)");
1093 g_private_impl_free_direct (gpointer impl)
1098 memcpy (&tmp, &impl, sizeof (pthread_key_t));
1100 status = pthread_key_delete (tmp);
1101 if G_UNLIKELY (status != 0)
1102 g_thread_abort (status, "pthread_key_delete");
1105 static inline pthread_key_t
1106 g_private_get_impl (GPrivate *key)
1108 if (sizeof (pthread_key_t) > sizeof (gpointer))
1110 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1112 if G_UNLIKELY (impl == NULL)
1114 impl = g_private_impl_new (key->notify);
1115 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1117 g_private_impl_free (impl);
1126 gpointer impl = g_atomic_pointer_get (&key->p);
1129 if G_UNLIKELY (impl == NULL)
1131 impl = g_private_impl_new_direct (key->notify);
1132 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1134 g_private_impl_free_direct (impl);
1139 memcpy (&tmp, &impl, sizeof (pthread_key_t));
1149 * Returns the current value of the thread local variable @key.
1151 * If the value has not yet been set in this thread, %NULL is returned.
1152 * Values are never copied between threads (when a new thread is
1153 * created, for example).
1155 * Returns: the thread-local value
1158 g_private_get (GPrivate *key)
1160 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1161 return pthread_getspecific (g_private_get_impl (key));
1167 * @value: the new value
1169 * Sets the thread local variable @key to have the value @value in the
1172 * This function differs from g_private_replace() in the following way:
1173 * the #GDestroyNotify for @key is not called on the old value.
1176 g_private_set (GPrivate *key,
1181 if G_UNLIKELY ((status = pthread_setspecific (g_private_get_impl (key), value)) != 0)
1182 g_thread_abort (status, "pthread_setspecific");
1186 * g_private_replace:
1188 * @value: the new value
1190 * Sets the thread local variable @key to have the value @value in the
1193 * This function differs from g_private_set() in the following way: if
1194 * the previous value was non-%NULL then the #GDestroyNotify handler for
1195 * @key is run on it.
1200 g_private_replace (GPrivate *key,
1203 pthread_key_t impl = g_private_get_impl (key);
1207 old = pthread_getspecific (impl);
1209 if G_UNLIKELY ((status = pthread_setspecific (impl, value)) != 0)
1210 g_thread_abort (status, "pthread_setspecific");
1212 if (old && key->notify)
1218 #define posix_check_err(err, name) G_STMT_START{ \
1219 int error = (err); \
1221 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1222 __FILE__, __LINE__, G_STRFUNC, \
1223 g_strerror (error), name); \
1226 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1232 pthread_t system_thread;
1236 void *(*proxy) (void *);
1240 g_system_thread_free (GRealThread *thread)
1242 GThreadPosix *pt = (GThreadPosix *) thread;
1245 pthread_detach (pt->system_thread);
1247 g_mutex_clear (&pt->lock);
1249 g_slice_free (GThreadPosix, pt);
1253 g_system_thread_new (GThreadFunc proxy,
1260 GThreadPosix *thread;
1261 GRealThread *base_thread;
1262 pthread_attr_t attr;
1265 thread = g_slice_new0 (GThreadPosix);
1266 base_thread = (GRealThread*)thread;
1267 base_thread->ref_count = 2;
1268 base_thread->ours = TRUE;
1269 base_thread->thread.joinable = TRUE;
1270 base_thread->thread.func = func;
1271 base_thread->thread.data = data;
1272 base_thread->name = g_strdup (name);
1273 thread->proxy = proxy;
1275 posix_check_cmd (pthread_attr_init (&attr));
1277 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1280 #ifdef _SC_THREAD_STACK_MIN
1281 long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1282 if (min_stack_size >= 0)
1283 stack_size = MAX ((gulong) min_stack_size, stack_size);
1284 #endif /* _SC_THREAD_STACK_MIN */
1285 /* No error check here, because some systems can't do it and
1286 * we simply don't want threads to fail because of that. */
1287 pthread_attr_setstacksize (&attr, stack_size);
1289 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1291 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1293 /* While this is the default, better be explicit about it */
1294 pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED);
1296 #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */
1298 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1300 posix_check_cmd (pthread_attr_destroy (&attr));
1304 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1305 "Error creating thread: %s", g_strerror (ret));
1306 g_free (thread->thread.name);
1307 g_slice_free (GThreadPosix, thread);
1311 posix_check_err (ret, "pthread_create");
1313 g_mutex_init (&thread->lock);
1315 return (GRealThread *) thread;
1321 * Causes the calling thread to voluntarily relinquish the CPU, so
1322 * that other threads can run.
1324 * This function is often used as a method to make busy wait less evil.
1327 g_thread_yield (void)
1333 g_system_thread_wait (GRealThread *thread)
1335 GThreadPosix *pt = (GThreadPosix *) thread;
1337 g_mutex_lock (&pt->lock);
1341 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1345 g_mutex_unlock (&pt->lock);
1349 g_system_thread_exit (void)
1351 pthread_exit (NULL);
1355 g_system_thread_set_name (const gchar *name)
1357 #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1358 pthread_setname_np (name); /* on OS X and iOS */
1359 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1360 pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1361 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1362 pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1363 #elif defined(HAVE_PTHREAD_SET_NAME_NP)
1364 pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1368 /* {{{1 GMutex and GCond futex implementation */
1370 #if defined(USE_NATIVE_MUTEX)
1371 /* We should expand the set of operations available in gatomic once we
1372 * have better C11 support in GCC in common distributions (ie: 4.9).
1374 * Before then, let's define a couple of useful things for our own
1378 #ifdef HAVE_STDATOMIC_H
1380 #include <stdatomic.h>
1382 #define exchange_acquire(ptr, new) \
1383 atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE)
1384 #define compare_exchange_acquire(ptr, old, new) \
1385 atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \
1386 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1388 #define exchange_release(ptr, new) \
1389 atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1390 #define store_release(ptr, new) \
1391 atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1395 #define exchange_acquire(ptr, new) \
1396 __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1397 #define compare_exchange_acquire(ptr, old, new) \
1398 __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1400 #define exchange_release(ptr, new) \
1401 __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1402 #define store_release(ptr, new) \
1403 __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1407 /* Our strategy for the mutex is pretty simple:
1411 * 1: acquired by one thread only, no contention
1417 G_MUTEX_STATE_EMPTY = 0,
1418 G_MUTEX_STATE_OWNED,
1419 G_MUTEX_STATE_CONTENDED,
1423 * As such, attempting to acquire the lock should involve an increment.
1424 * If we find that the previous value was 0 then we can return
1427 * On unlock, we always store 0 to indicate that the lock is available.
1428 * If the value there was 1 before then we didn't have contention and
1429 * can return immediately. If the value was something other than 1 then
1430 * we have the contended case and need to wake a waiter.
1432 * If it was not 0 then there is another thread holding it and we must
1433 * wait. We must always ensure that we mark a value >1 while we are
1434 * waiting in order to instruct the holder to do a wake operation on
1439 g_mutex_init (GMutex *mutex)
1441 mutex->i[0] = G_MUTEX_STATE_EMPTY;
1445 g_mutex_clear (GMutex *mutex)
1447 if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY)
1449 fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1456 g_mutex_lock_slowpath (GMutex *mutex)
1458 /* Set to contended. If it was empty before then we
1459 * just acquired the lock.
1461 * Otherwise, sleep for as long as the contended state remains...
1463 while (exchange_acquire (&mutex->i[0], G_MUTEX_STATE_CONTENDED) != G_MUTEX_STATE_EMPTY)
1465 g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE,
1466 G_MUTEX_STATE_CONTENDED, NULL);
1472 g_mutex_unlock_slowpath (GMutex *mutex,
1475 /* We seem to get better code for the uncontended case by splitting
1478 if G_UNLIKELY (prev == G_MUTEX_STATE_EMPTY)
1480 fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1484 g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1488 g_mutex_lock (GMutex *mutex)
1490 /* empty -> owned and we're done. Anything else, and we need to wait... */
1491 if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0],
1492 G_MUTEX_STATE_EMPTY,
1493 G_MUTEX_STATE_OWNED))
1494 g_mutex_lock_slowpath (mutex);
1498 g_mutex_unlock (GMutex *mutex)
1502 prev = exchange_release (&mutex->i[0], G_MUTEX_STATE_EMPTY);
1504 /* 1-> 0 and we're done. Anything else and we need to signal... */
1505 if G_UNLIKELY (prev != G_MUTEX_STATE_OWNED)
1506 g_mutex_unlock_slowpath (mutex, prev);
1510 g_mutex_trylock (GMutex *mutex)
1512 GMutexState empty = G_MUTEX_STATE_EMPTY;
1514 /* We don't want to touch the value at all unless we can move it from
1515 * exactly empty to owned.
1517 return compare_exchange_acquire (&mutex->i[0], &empty, G_MUTEX_STATE_OWNED);
1520 /* Condition variables are implemented in a rather simple way as well.
1521 * In many ways, futex() as an abstraction is even more ideally suited
1522 * to condition variables than it is to mutexes.
1524 * We store a generation counter. We sample it with the lock held and
1525 * unlock before sleeping on the futex.
1527 * Signalling simply involves increasing the counter and making the
1528 * appropriate futex call.
1530 * The only thing that is the slightest bit complicated is timed waits
1531 * because we must convert our absolute time to relative.
1535 g_cond_init (GCond *cond)
1541 g_cond_clear (GCond *cond)
1546 g_cond_wait (GCond *cond,
1549 guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
1551 g_mutex_unlock (mutex);
1552 g_futex_simple (&cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1553 g_mutex_lock (mutex);
1557 g_cond_signal (GCond *cond)
1559 g_atomic_int_inc (&cond->i[0]);
1561 g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1565 g_cond_broadcast (GCond *cond)
1567 g_atomic_int_inc (&cond->i[0]);
1569 g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1573 g_cond_wait_until (GCond *cond,
1577 struct timespec now;
1578 struct timespec span;
1587 clock_gettime (CLOCK_MONOTONIC, &now);
1588 span.tv_sec = (end_time / 1000000) - now.tv_sec;
1589 span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1590 if (span.tv_nsec < 0)
1592 span.tv_nsec += 1000000000;
1596 if (span.tv_sec < 0)
1599 /* `struct timespec` as defined by the libc headers does not necessarily
1600 * have any relation to the one used by the kernel for the `futex` syscall.
1602 * Specifically, the libc headers might use 64-bit `time_t` while the kernel
1603 * headers use 32-bit types on certain systems.
1605 * To get around this problem we
1606 * a) check if `futex_time64` is available, which only exists on 32-bit
1607 * platforms and always uses 64-bit `time_t`.
1608 * b) otherwise (or if that returns `ENOSYS`), we call the normal `futex`
1609 * syscall with the `struct timespec` used by the kernel. By default, we
1610 * use `__kernel_long_t` for both its fields, which is equivalent to
1611 * `__kernel_old_time_t` and is available in the kernel headers for a
1613 * c) With very old headers (~2.6.x), `__kernel_long_t` is not available, and
1614 * we use an older definition that uses `__kernel_time_t` and `long`.
1616 * Also some 32-bit systems do not define `__NR_futex` at all and only
1617 * define `__NR_futex_time64`.
1620 sampled = cond->i[0];
1621 g_mutex_unlock (mutex);
1623 #ifdef __NR_futex_time64
1631 span_arg.tv_sec = span.tv_sec;
1632 span_arg.tv_nsec = span.tv_nsec;
1634 res = syscall (__NR_futex_time64, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1636 /* If the syscall does not exist (`ENOSYS`), we retry again below with the
1637 * normal `futex` syscall. This can happen if newer kernel headers are
1638 * used than the kernel that is actually running.
1641 if (res >= 0 || errno != ENOSYS)
1642 # endif /* defined(__NR_futex) */
1644 success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1645 g_mutex_lock (mutex);
1654 # ifdef __kernel_long_t
1655 # define KERNEL_SPAN_SEC_TYPE __kernel_long_t
1658 __kernel_long_t tv_sec;
1659 __kernel_long_t tv_nsec;
1662 /* Very old kernel headers: version 2.6.32 and thereabouts */
1663 # define KERNEL_SPAN_SEC_TYPE __kernel_time_t
1666 __kernel_time_t tv_sec;
1670 /* Make sure to only ever call this if the end time actually fits into the target type */
1671 if (G_UNLIKELY (sizeof (KERNEL_SPAN_SEC_TYPE) < 8 && span.tv_sec > G_MAXINT32))
1672 g_error ("%s: Can’t wait for more than %us", G_STRFUNC, G_MAXINT32);
1674 span_arg.tv_sec = span.tv_sec;
1675 span_arg.tv_nsec = span.tv_nsec;
1677 res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1678 success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1679 g_mutex_lock (mutex);
1683 # undef KERNEL_SPAN_SEC_TYPE
1684 #endif /* defined(__NR_futex) */
1686 /* We can't end up here because of the checks above */
1687 g_assert_not_reached ();
1693 /* vim:set foldmethod=marker: */