1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
33 * As such, these functions are NOT permitted to call any other part of
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
45 #include "gmessages.h"
47 #include "gstrfuncs.h"
48 #include "gtestutils.h"
49 #include "gthreadprivate.h"
61 #ifdef HAVE_PTHREAD_SET_NAME_NP
62 #include <pthread_np.h>
71 #if defined(HAVE_SYS_SCHED_GETATTR)
72 #include <sys/syscall.h>
75 #if defined(HAVE_FUTEX) && \
76 (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST))
77 #define USE_NATIVE_MUTEX
81 g_thread_abort (gint status,
82 const gchar *function)
84 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
85 function, strerror (status));
91 #if !defined(USE_NATIVE_MUTEX)
93 static pthread_mutex_t *
94 g_mutex_impl_new (void)
96 pthread_mutexattr_t *pattr = NULL;
97 pthread_mutex_t *mutex;
99 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
100 pthread_mutexattr_t attr;
103 mutex = malloc (sizeof (pthread_mutex_t));
104 if G_UNLIKELY (mutex == NULL)
105 g_thread_abort (errno, "malloc");
107 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
108 pthread_mutexattr_init (&attr);
109 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
113 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
114 g_thread_abort (status, "pthread_mutex_init");
116 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
117 pthread_mutexattr_destroy (&attr);
124 g_mutex_impl_free (pthread_mutex_t *mutex)
126 pthread_mutex_destroy (mutex);
130 static inline pthread_mutex_t *
131 g_mutex_get_impl (GMutex *mutex)
133 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
135 if G_UNLIKELY (impl == NULL)
137 impl = g_mutex_impl_new ();
138 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
139 g_mutex_impl_free (impl);
149 * @mutex: an uninitialized #GMutex
151 * Initializes a #GMutex so that it can be used.
153 * This function is useful to initialize a mutex that has been
154 * allocated on the stack, or as part of a larger structure.
155 * It is not necessary to initialize a mutex that has been
156 * statically allocated.
158 * |[<!-- language="C" -->
166 * b = g_new (Blob, 1);
167 * g_mutex_init (&b->m);
170 * To undo the effect of g_mutex_init() when a mutex is no longer
171 * needed, use g_mutex_clear().
173 * Calling g_mutex_init() on an already initialized #GMutex leads
174 * to undefined behaviour.
179 g_mutex_init (GMutex *mutex)
181 mutex->p = g_mutex_impl_new ();
186 * @mutex: an initialized #GMutex
188 * Frees the resources allocated to a mutex with g_mutex_init().
190 * This function should not be used with a #GMutex that has been
191 * statically allocated.
193 * Calling g_mutex_clear() on a locked mutex leads to undefined
199 g_mutex_clear (GMutex *mutex)
201 g_mutex_impl_free (mutex->p);
208 * Locks @mutex. If @mutex is already locked by another thread, the
209 * current thread will block until @mutex is unlocked by the other
212 * #GMutex is neither guaranteed to be recursive nor to be
213 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
214 * already been locked by the same thread results in undefined behaviour
215 * (including but not limited to deadlocks).
218 g_mutex_lock (GMutex *mutex)
222 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
223 g_thread_abort (status, "pthread_mutex_lock");
230 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
231 * call for @mutex, it will become unblocked and can lock @mutex itself.
233 * Calling g_mutex_unlock() on a mutex that is not locked by the
234 * current thread leads to undefined behaviour.
237 g_mutex_unlock (GMutex *mutex)
241 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
242 g_thread_abort (status, "pthread_mutex_unlock");
249 * Tries to lock @mutex. If @mutex is already locked by another thread,
250 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
253 * #GMutex is neither guaranteed to be recursive nor to be
254 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
255 * already been locked by the same thread results in undefined behaviour
256 * (including but not limited to deadlocks or arbitrary return values).
258 * Returns: %TRUE if @mutex could be locked
261 g_mutex_trylock (GMutex *mutex)
265 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
268 if G_UNLIKELY (status != EBUSY)
269 g_thread_abort (status, "pthread_mutex_trylock");
274 #endif /* !defined(USE_NATIVE_MUTEX) */
278 static pthread_mutex_t *
279 g_rec_mutex_impl_new (void)
281 pthread_mutexattr_t attr;
282 pthread_mutex_t *mutex;
284 mutex = malloc (sizeof (pthread_mutex_t));
285 if G_UNLIKELY (mutex == NULL)
286 g_thread_abort (errno, "malloc");
288 pthread_mutexattr_init (&attr);
289 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
290 pthread_mutex_init (mutex, &attr);
291 pthread_mutexattr_destroy (&attr);
297 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
299 pthread_mutex_destroy (mutex);
303 static inline pthread_mutex_t *
304 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
306 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
308 if G_UNLIKELY (impl == NULL)
310 impl = g_rec_mutex_impl_new ();
311 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
312 g_rec_mutex_impl_free (impl);
321 * @rec_mutex: an uninitialized #GRecMutex
323 * Initializes a #GRecMutex so that it can be used.
325 * This function is useful to initialize a recursive mutex
326 * that has been allocated on the stack, or as part of a larger
329 * It is not necessary to initialise a recursive mutex that has been
330 * statically allocated.
332 * |[<!-- language="C" -->
340 * b = g_new (Blob, 1);
341 * g_rec_mutex_init (&b->m);
344 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
345 * leads to undefined behaviour.
347 * To undo the effect of g_rec_mutex_init() when a recursive mutex
348 * is no longer needed, use g_rec_mutex_clear().
353 g_rec_mutex_init (GRecMutex *rec_mutex)
355 rec_mutex->p = g_rec_mutex_impl_new ();
360 * @rec_mutex: an initialized #GRecMutex
362 * Frees the resources allocated to a recursive mutex with
363 * g_rec_mutex_init().
365 * This function should not be used with a #GRecMutex that has been
366 * statically allocated.
368 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
369 * to undefined behaviour.
374 g_rec_mutex_clear (GRecMutex *rec_mutex)
376 g_rec_mutex_impl_free (rec_mutex->p);
381 * @rec_mutex: a #GRecMutex
383 * Locks @rec_mutex. If @rec_mutex is already locked by another
384 * thread, the current thread will block until @rec_mutex is
385 * unlocked by the other thread. If @rec_mutex is already locked
386 * by the current thread, the 'lock count' of @rec_mutex is increased.
387 * The mutex will only become available again when it is unlocked
388 * as many times as it has been locked.
393 g_rec_mutex_lock (GRecMutex *mutex)
395 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
399 * g_rec_mutex_unlock:
400 * @rec_mutex: a #GRecMutex
402 * Unlocks @rec_mutex. If another thread is blocked in a
403 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
404 * and can lock @rec_mutex itself.
406 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
407 * locked by the current thread leads to undefined behaviour.
412 g_rec_mutex_unlock (GRecMutex *rec_mutex)
414 pthread_mutex_unlock (rec_mutex->p);
418 * g_rec_mutex_trylock:
419 * @rec_mutex: a #GRecMutex
421 * Tries to lock @rec_mutex. If @rec_mutex is already locked
422 * by another thread, it immediately returns %FALSE. Otherwise
423 * it locks @rec_mutex and returns %TRUE.
425 * Returns: %TRUE if @rec_mutex could be locked
430 g_rec_mutex_trylock (GRecMutex *rec_mutex)
432 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
440 static pthread_rwlock_t *
441 g_rw_lock_impl_new (void)
443 pthread_rwlock_t *rwlock;
446 rwlock = malloc (sizeof (pthread_rwlock_t));
447 if G_UNLIKELY (rwlock == NULL)
448 g_thread_abort (errno, "malloc");
450 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
451 g_thread_abort (status, "pthread_rwlock_init");
457 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
459 pthread_rwlock_destroy (rwlock);
463 static inline pthread_rwlock_t *
464 g_rw_lock_get_impl (GRWLock *lock)
466 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
468 if G_UNLIKELY (impl == NULL)
470 impl = g_rw_lock_impl_new ();
471 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
472 g_rw_lock_impl_free (impl);
481 * @rw_lock: an uninitialized #GRWLock
483 * Initializes a #GRWLock so that it can be used.
485 * This function is useful to initialize a lock that has been
486 * allocated on the stack, or as part of a larger structure. It is not
487 * necessary to initialise a reader-writer lock that has been statically
490 * |[<!-- language="C" -->
498 * b = g_new (Blob, 1);
499 * g_rw_lock_init (&b->l);
502 * To undo the effect of g_rw_lock_init() when a lock is no longer
503 * needed, use g_rw_lock_clear().
505 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
506 * to undefined behaviour.
511 g_rw_lock_init (GRWLock *rw_lock)
513 rw_lock->p = g_rw_lock_impl_new ();
518 * @rw_lock: an initialized #GRWLock
520 * Frees the resources allocated to a lock with g_rw_lock_init().
522 * This function should not be used with a #GRWLock that has been
523 * statically allocated.
525 * Calling g_rw_lock_clear() when any thread holds the lock
526 * leads to undefined behaviour.
531 g_rw_lock_clear (GRWLock *rw_lock)
533 g_rw_lock_impl_free (rw_lock->p);
537 * g_rw_lock_writer_lock:
538 * @rw_lock: a #GRWLock
540 * Obtain a write lock on @rw_lock. If any thread already holds
541 * a read or write lock on @rw_lock, the current thread will block
542 * until all other threads have dropped their locks on @rw_lock.
547 g_rw_lock_writer_lock (GRWLock *rw_lock)
549 int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
552 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
556 * g_rw_lock_writer_trylock:
557 * @rw_lock: a #GRWLock
559 * Tries to obtain a write lock on @rw_lock. If any other thread holds
560 * a read or write lock on @rw_lock, it immediately returns %FALSE.
561 * Otherwise it locks @rw_lock and returns %TRUE.
563 * Returns: %TRUE if @rw_lock could be locked
568 g_rw_lock_writer_trylock (GRWLock *rw_lock)
570 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
577 * g_rw_lock_writer_unlock:
578 * @rw_lock: a #GRWLock
580 * Release a write lock on @rw_lock.
582 * Calling g_rw_lock_writer_unlock() on a lock that is not held
583 * by the current thread leads to undefined behaviour.
588 g_rw_lock_writer_unlock (GRWLock *rw_lock)
590 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
594 * g_rw_lock_reader_lock:
595 * @rw_lock: a #GRWLock
597 * Obtain a read lock on @rw_lock. If another thread currently holds
598 * the write lock on @rw_lock, the current thread will block. If another thread
599 * does not hold the write lock, but is waiting for it, it is implementation
600 * defined whether the reader or writer will block. Read locks can be taken
603 * It is implementation-defined how many threads are allowed to
604 * hold read locks on the same lock simultaneously. If the limit is hit,
605 * or if a deadlock is detected, a critical warning will be emitted.
610 g_rw_lock_reader_lock (GRWLock *rw_lock)
612 int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
615 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
619 * g_rw_lock_reader_trylock:
620 * @rw_lock: a #GRWLock
622 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
623 * the read lock was successfully obtained. Otherwise it
626 * Returns: %TRUE if @rw_lock could be locked
631 g_rw_lock_reader_trylock (GRWLock *rw_lock)
633 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
640 * g_rw_lock_reader_unlock:
641 * @rw_lock: a #GRWLock
643 * Release a read lock on @rw_lock.
645 * Calling g_rw_lock_reader_unlock() on a lock that is not held
646 * by the current thread leads to undefined behaviour.
651 g_rw_lock_reader_unlock (GRWLock *rw_lock)
653 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
658 #if !defined(USE_NATIVE_MUTEX)
660 static pthread_cond_t *
661 g_cond_impl_new (void)
663 pthread_condattr_t attr;
664 pthread_cond_t *cond;
667 pthread_condattr_init (&attr);
669 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
670 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
671 if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
672 g_thread_abort (status, "pthread_condattr_setclock");
674 #error Cannot support GCond on your platform.
677 cond = malloc (sizeof (pthread_cond_t));
678 if G_UNLIKELY (cond == NULL)
679 g_thread_abort (errno, "malloc");
681 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
682 g_thread_abort (status, "pthread_cond_init");
684 pthread_condattr_destroy (&attr);
690 g_cond_impl_free (pthread_cond_t *cond)
692 pthread_cond_destroy (cond);
696 static inline pthread_cond_t *
697 g_cond_get_impl (GCond *cond)
699 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
701 if G_UNLIKELY (impl == NULL)
703 impl = g_cond_impl_new ();
704 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
705 g_cond_impl_free (impl);
714 * @cond: an uninitialized #GCond
716 * Initialises a #GCond so that it can be used.
718 * This function is useful to initialise a #GCond that has been
719 * allocated as part of a larger structure. It is not necessary to
720 * initialise a #GCond that has been statically allocated.
722 * To undo the effect of g_cond_init() when a #GCond is no longer
723 * needed, use g_cond_clear().
725 * Calling g_cond_init() on an already-initialised #GCond leads
726 * to undefined behaviour.
731 g_cond_init (GCond *cond)
733 cond->p = g_cond_impl_new ();
738 * @cond: an initialised #GCond
740 * Frees the resources allocated to a #GCond with g_cond_init().
742 * This function should not be used with a #GCond that has been
743 * statically allocated.
745 * Calling g_cond_clear() for a #GCond on which threads are
746 * blocking leads to undefined behaviour.
751 g_cond_clear (GCond *cond)
753 g_cond_impl_free (cond->p);
759 * @mutex: a #GMutex that is currently locked
761 * Atomically releases @mutex and waits until @cond is signalled.
762 * When this function returns, @mutex is locked again and owned by the
765 * When using condition variables, it is possible that a spurious wakeup
766 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
767 * not called). It's also possible that a stolen wakeup may occur.
768 * This is when g_cond_signal() is called, but another thread acquires
769 * @mutex before this thread and modifies the state of the program in
770 * such a way that when g_cond_wait() is able to return, the expected
771 * condition is no longer met.
773 * For this reason, g_cond_wait() must always be used in a loop. See
774 * the documentation for #GCond for a complete example.
777 g_cond_wait (GCond *cond,
782 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
783 g_thread_abort (status, "pthread_cond_wait");
790 * If threads are waiting for @cond, at least one of them is unblocked.
791 * If no threads are waiting for @cond, this function has no effect.
792 * It is good practice to hold the same lock as the waiting thread
793 * while calling this function, though not required.
796 g_cond_signal (GCond *cond)
800 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
801 g_thread_abort (status, "pthread_cond_signal");
808 * If threads are waiting for @cond, all of them are unblocked.
809 * If no threads are waiting for @cond, this function has no effect.
810 * It is good practice to lock the same mutex as the waiting threads
811 * while calling this function, though not required.
814 g_cond_broadcast (GCond *cond)
818 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
819 g_thread_abort (status, "pthread_cond_broadcast");
825 * @mutex: a #GMutex that is currently locked
826 * @end_time: the monotonic time to wait until
828 * Waits until either @cond is signalled or @end_time has passed.
830 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
831 * could occur. For that reason, waiting on a condition variable should
832 * always be in a loop, based on an explicitly-checked predicate.
834 * %TRUE is returned if the condition variable was signalled (or in the
835 * case of a spurious wakeup). %FALSE is returned if @end_time has
838 * The following code shows how to correctly perform a timed wait on a
839 * condition variable (extending the example presented in the
840 * documentation for #GCond):
842 * |[<!-- language="C" -->
844 * pop_data_timed (void)
849 * g_mutex_lock (&data_mutex);
851 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
852 * while (!current_data)
853 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
855 * // timeout has passed.
856 * g_mutex_unlock (&data_mutex);
860 * // there is data for us
861 * data = current_data;
862 * current_data = NULL;
864 * g_mutex_unlock (&data_mutex);
870 * Notice that the end time is calculated once, before entering the
871 * loop and reused. This is the motivation behind the use of absolute
872 * time on this API -- if a relative time of 5 seconds were passed
873 * directly to the call and a spurious wakeup occurred, the program would
874 * have to start over waiting again (which would lead to a total wait
875 * time of more than 5 seconds).
877 * Returns: %TRUE on a signal, %FALSE on a timeout
881 g_cond_wait_until (GCond *cond,
888 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
889 /* end_time is given relative to the monotonic clock as returned by
890 * g_get_monotonic_time().
892 * Since this pthreads wants the relative time, convert it back again.
895 gint64 now = g_get_monotonic_time ();
901 relative = end_time - now;
903 ts.tv_sec = relative / 1000000;
904 ts.tv_nsec = (relative % 1000000) * 1000;
906 if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
909 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
910 /* This is the exact check we used during init to set the clock to
911 * monotonic, so if we're in this branch, timedwait() will already be
912 * expecting a monotonic clock.
915 ts.tv_sec = end_time / 1000000;
916 ts.tv_nsec = (end_time % 1000000) * 1000;
918 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
922 #error Cannot support GCond on your platform.
925 if G_UNLIKELY (status != ETIMEDOUT)
926 g_thread_abort (status, "pthread_cond_timedwait");
931 #endif /* defined(USE_NATIVE_MUTEX) */
938 * The #GPrivate struct is an opaque data structure to represent a
939 * thread-local data key. It is approximately equivalent to the
940 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
941 * TlsSetValue()/TlsGetValue() on Windows.
943 * If you don't already know why you might want this functionality,
944 * then you probably don't need it.
946 * #GPrivate is a very limited resource (as far as 128 per program,
947 * shared between all libraries). It is also not possible to destroy a
948 * #GPrivate after it has been used. As such, it is only ever acceptable
949 * to use #GPrivate in static scope, and even then sparingly so.
951 * See G_PRIVATE_INIT() for a couple of examples.
953 * The #GPrivate structure should be considered opaque. It should only
954 * be accessed via the g_private_ functions.
959 * @notify: a #GDestroyNotify
961 * A macro to assist with the static initialisation of a #GPrivate.
963 * This macro is useful for the case that a #GDestroyNotify function
964 * should be associated with the key. This is needed when the key will be
965 * used to point at memory that should be deallocated when the thread
968 * Additionally, the #GDestroyNotify will also be called on the previous
969 * value stored in the key when g_private_replace() is used.
971 * If no #GDestroyNotify is needed, then use of this macro is not
972 * required -- if the #GPrivate is declared in static scope then it will
973 * be properly initialised by default (ie: to all zeros). See the
976 * |[<!-- language="C" -->
977 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
979 * // return value should not be freed
981 * get_local_name (void)
983 * return g_private_get (&name_key);
987 * set_local_name (const gchar *name)
989 * g_private_replace (&name_key, g_strdup (name));
993 * static GPrivate count_key; // no free function
996 * get_local_count (void)
998 * return GPOINTER_TO_INT (g_private_get (&count_key));
1002 * set_local_count (gint count)
1004 * g_private_set (&count_key, GINT_TO_POINTER (count));
1011 static pthread_key_t *
1012 g_private_impl_new (GDestroyNotify notify)
1017 key = malloc (sizeof (pthread_key_t));
1018 if G_UNLIKELY (key == NULL)
1019 g_thread_abort (errno, "malloc");
1020 status = pthread_key_create (key, notify);
1021 if G_UNLIKELY (status != 0)
1022 g_thread_abort (status, "pthread_key_create");
1028 g_private_impl_free (pthread_key_t *key)
1032 status = pthread_key_delete (*key);
1033 if G_UNLIKELY (status != 0)
1034 g_thread_abort (status, "pthread_key_delete");
1038 static inline pthread_key_t *
1039 g_private_get_impl (GPrivate *key)
1041 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1043 if G_UNLIKELY (impl == NULL)
1045 impl = g_private_impl_new (key->notify);
1046 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1048 g_private_impl_free (impl);
1060 * Returns the current value of the thread local variable @key.
1062 * If the value has not yet been set in this thread, %NULL is returned.
1063 * Values are never copied between threads (when a new thread is
1064 * created, for example).
1066 * Returns: the thread-local value
1069 g_private_get (GPrivate *key)
1071 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1072 return pthread_getspecific (*g_private_get_impl (key));
1078 * @value: the new value
1080 * Sets the thread local variable @key to have the value @value in the
1083 * This function differs from g_private_replace() in the following way:
1084 * the #GDestroyNotify for @key is not called on the old value.
1087 g_private_set (GPrivate *key,
1092 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1093 g_thread_abort (status, "pthread_setspecific");
1097 * g_private_replace:
1099 * @value: the new value
1101 * Sets the thread local variable @key to have the value @value in the
1104 * This function differs from g_private_set() in the following way: if
1105 * the previous value was non-%NULL then the #GDestroyNotify handler for
1106 * @key is run on it.
1111 g_private_replace (GPrivate *key,
1114 pthread_key_t *impl = g_private_get_impl (key);
1118 old = pthread_getspecific (*impl);
1120 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1121 g_thread_abort (status, "pthread_setspecific");
1123 if (old && key->notify)
1129 #define posix_check_err(err, name) G_STMT_START{ \
1130 int error = (err); \
1132 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1133 __FILE__, __LINE__, G_STRFUNC, \
1134 g_strerror (error), name); \
1137 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1143 pthread_t system_thread;
1147 void *(*proxy) (void *);
1149 /* Must be statically allocated and valid forever */
1150 const GThreadSchedulerSettings *scheduler_settings;
1154 g_system_thread_free (GRealThread *thread)
1156 GThreadPosix *pt = (GThreadPosix *) thread;
1159 pthread_detach (pt->system_thread);
1161 g_mutex_clear (&pt->lock);
1163 g_slice_free (GThreadPosix, pt);
1167 g_system_thread_get_scheduler_settings (GThreadSchedulerSettings *scheduler_settings)
1169 /* FIXME: Implement the same for macOS and the BSDs so it doesn't go through
1170 * the fallback code using an additional thread. */
1171 #if defined(HAVE_SYS_SCHED_GETATTR)
1174 /* FIXME: The struct definition does not seem to be possible to pull in
1175 * via any of the normal system headers and it's only declared in the
1176 * kernel headers. That's why we hardcode 56 here right now. */
1177 guint size = 56; /* Size as of Linux 5.3.9 */
1180 tid = (pid_t) syscall (SYS_gettid);
1182 scheduler_settings->attr = g_malloc0 (size);
1188 res = syscall (SYS_sched_getattr, tid, scheduler_settings->attr, size, flags);
1192 if (errsv == EAGAIN)
1196 else if (errsv == E2BIG)
1198 g_assert (size < G_MAXINT);
1200 scheduler_settings->attr = g_realloc (scheduler_settings->attr, size);
1201 /* Needs to be zero-initialized */
1202 memset (scheduler_settings->attr, 0, size);
1206 g_debug ("Failed to get thread scheduler attributes: %s", g_strerror (errsv));
1207 g_free (scheduler_settings->attr);
1215 /* Try setting them on the current thread to see if any system policies are
1216 * in place that would disallow doing so */
1217 res = syscall (SYS_sched_setattr, tid, scheduler_settings->attr, flags);
1222 g_debug ("Failed to set thread scheduler attributes: %s", g_strerror (errsv));
1223 g_free (scheduler_settings->attr);
1234 #if defined(HAVE_SYS_SCHED_GETATTR)
1236 linux_pthread_proxy (void *data)
1238 GThreadPosix *thread = data;
1239 static gboolean printed_scheduler_warning = FALSE; /* (atomic) */
1241 /* Set scheduler settings first if requested */
1242 if (thread->scheduler_settings)
1249 tid = (pid_t) syscall (SYS_gettid);
1250 res = syscall (SYS_sched_setattr, tid, thread->scheduler_settings->attr, flags);
1252 if (res == -1 && g_atomic_int_compare_and_exchange (&printed_scheduler_warning, FALSE, TRUE))
1253 g_critical ("Failed to set scheduler settings: %s", g_strerror (errsv));
1255 g_debug ("Failed to set scheduler settings: %s", g_strerror (errsv));
1256 printed_scheduler_warning = TRUE;
1259 return thread->proxy (data);
1264 g_system_thread_new (GThreadFunc proxy,
1266 const GThreadSchedulerSettings *scheduler_settings,
1272 GThreadPosix *thread;
1273 GRealThread *base_thread;
1274 pthread_attr_t attr;
1277 thread = g_slice_new0 (GThreadPosix);
1278 base_thread = (GRealThread*)thread;
1279 base_thread->ref_count = 2;
1280 base_thread->ours = TRUE;
1281 base_thread->thread.joinable = TRUE;
1282 base_thread->thread.func = func;
1283 base_thread->thread.data = data;
1284 base_thread->name = g_strdup (name);
1285 thread->scheduler_settings = scheduler_settings;
1286 thread->proxy = proxy;
1288 posix_check_cmd (pthread_attr_init (&attr));
1290 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1293 #ifdef _SC_THREAD_STACK_MIN
1294 long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1295 if (min_stack_size >= 0)
1296 stack_size = MAX ((gulong) min_stack_size, stack_size);
1297 #endif /* _SC_THREAD_STACK_MIN */
1298 /* No error check here, because some systems can't do it and
1299 * we simply don't want threads to fail because of that. */
1300 pthread_attr_setstacksize (&attr, stack_size);
1302 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1304 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1305 if (!scheduler_settings)
1307 /* While this is the default, better be explicit about it */
1308 pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED);
1310 #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */
1312 #if defined(HAVE_SYS_SCHED_GETATTR)
1313 ret = pthread_create (&thread->system_thread, &attr, linux_pthread_proxy, thread);
1315 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1318 posix_check_cmd (pthread_attr_destroy (&attr));
1322 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1323 "Error creating thread: %s", g_strerror (ret));
1324 g_slice_free (GThreadPosix, thread);
1328 posix_check_err (ret, "pthread_create");
1330 g_mutex_init (&thread->lock);
1332 return (GRealThread *) thread;
1338 * Causes the calling thread to voluntarily relinquish the CPU, so
1339 * that other threads can run.
1341 * This function is often used as a method to make busy wait less evil.
1344 g_thread_yield (void)
1350 g_system_thread_wait (GRealThread *thread)
1352 GThreadPosix *pt = (GThreadPosix *) thread;
1354 g_mutex_lock (&pt->lock);
1358 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1362 g_mutex_unlock (&pt->lock);
1366 g_system_thread_exit (void)
1368 pthread_exit (NULL);
1372 g_system_thread_set_name (const gchar *name)
1374 #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1375 pthread_setname_np (name); /* on OS X and iOS */
1376 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1377 pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1378 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1379 pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1380 #elif defined(HAVE_PTHREAD_SET_NAME_NP)
1381 pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1385 /* {{{1 GMutex and GCond futex implementation */
1387 #if defined(USE_NATIVE_MUTEX)
1389 #include <linux/futex.h>
1390 #include <sys/syscall.h>
1392 #ifndef FUTEX_WAIT_PRIVATE
1393 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
1394 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
1397 /* We should expand the set of operations available in gatomic once we
1398 * have better C11 support in GCC in common distributions (ie: 4.9).
1400 * Before then, let's define a couple of useful things for our own
1404 #ifdef HAVE_STDATOMIC_H
1406 #include <stdatomic.h>
1408 #define exchange_acquire(ptr, new) \
1409 atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE)
1410 #define compare_exchange_acquire(ptr, old, new) \
1411 atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \
1412 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1414 #define exchange_release(ptr, new) \
1415 atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1416 #define store_release(ptr, new) \
1417 atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1421 #define exchange_acquire(ptr, new) \
1422 __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1423 #define compare_exchange_acquire(ptr, old, new) \
1424 __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1426 #define exchange_release(ptr, new) \
1427 __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1428 #define store_release(ptr, new) \
1429 __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1433 /* Our strategy for the mutex is pretty simple:
1437 * 1: acquired by one thread only, no contention
1442 * As such, attempting to acquire the lock should involve an increment.
1443 * If we find that the previous value was 0 then we can return
1446 * On unlock, we always store 0 to indicate that the lock is available.
1447 * If the value there was 1 before then we didn't have contention and
1448 * can return immediately. If the value was something other than 1 then
1449 * we have the contended case and need to wake a waiter.
1451 * If it was not 0 then there is another thread holding it and we must
1452 * wait. We must always ensure that we mark a value >1 while we are
1453 * waiting in order to instruct the holder to do a wake operation on
1458 g_mutex_init (GMutex *mutex)
1464 g_mutex_clear (GMutex *mutex)
1466 if G_UNLIKELY (mutex->i[0] != 0)
1468 fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1473 static void __attribute__((noinline))
1474 g_mutex_lock_slowpath (GMutex *mutex)
1476 /* Set to 2 to indicate contention. If it was zero before then we
1477 * just acquired the lock.
1479 * Otherwise, sleep for as long as the 2 remains...
1481 while (exchange_acquire (&mutex->i[0], 2) != 0)
1482 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL);
1485 static void __attribute__((noinline))
1486 g_mutex_unlock_slowpath (GMutex *mutex,
1489 /* We seem to get better code for the uncontended case by splitting
1492 if G_UNLIKELY (prev == 0)
1494 fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1498 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1502 g_mutex_lock (GMutex *mutex)
1504 /* 0 -> 1 and we're done. Anything else, and we need to wait... */
1505 if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0)
1506 g_mutex_lock_slowpath (mutex);
1510 g_mutex_unlock (GMutex *mutex)
1514 prev = exchange_release (&mutex->i[0], 0);
1516 /* 1-> 0 and we're done. Anything else and we need to signal... */
1517 if G_UNLIKELY (prev != 1)
1518 g_mutex_unlock_slowpath (mutex, prev);
1522 g_mutex_trylock (GMutex *mutex)
1526 /* We don't want to touch the value at all unless we can move it from
1529 return compare_exchange_acquire (&mutex->i[0], &zero, 1);
1532 /* Condition variables are implemented in a rather simple way as well.
1533 * In many ways, futex() as an abstraction is even more ideally suited
1534 * to condition variables than it is to mutexes.
1536 * We store a generation counter. We sample it with the lock held and
1537 * unlock before sleeping on the futex.
1539 * Signalling simply involves increasing the counter and making the
1540 * appropriate futex call.
1542 * The only thing that is the slightest bit complicated is timed waits
1543 * because we must convert our absolute time to relative.
1547 g_cond_init (GCond *cond)
1553 g_cond_clear (GCond *cond)
1558 g_cond_wait (GCond *cond,
1561 guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
1563 g_mutex_unlock (mutex);
1564 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1565 g_mutex_lock (mutex);
1569 g_cond_signal (GCond *cond)
1571 g_atomic_int_inc (&cond->i[0]);
1573 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1577 g_cond_broadcast (GCond *cond)
1579 g_atomic_int_inc (&cond->i[0]);
1581 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1585 g_cond_wait_until (GCond *cond,
1589 struct timespec now;
1590 struct timespec span;
1598 clock_gettime (CLOCK_MONOTONIC, &now);
1599 span.tv_sec = (end_time / 1000000) - now.tv_sec;
1600 span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1601 if (span.tv_nsec < 0)
1603 span.tv_nsec += 1000000000;
1607 if (span.tv_sec < 0)
1610 sampled = cond->i[0];
1611 g_mutex_unlock (mutex);
1612 res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span);
1613 success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1614 g_mutex_lock (mutex);
1622 /* vim:set foldmethod=marker: */