1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
33 * As such, these functions are NOT permitted to call any other part of
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
44 #include "gthreadprivate.h"
46 #include "gmessages.h"
47 #include "gstrfuncs.h"
61 #ifdef HAVE_SYS_PRCTL_H
62 #include <sys/prctl.h>
69 g_thread_abort (gint status,
70 const gchar *function)
72 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
73 function, strerror (status));
79 static pthread_mutex_t *
80 g_mutex_impl_new (void)
82 pthread_mutexattr_t *pattr = NULL;
83 pthread_mutex_t *mutex;
86 mutex = malloc (sizeof (pthread_mutex_t));
87 if G_UNLIKELY (mutex == NULL)
88 g_thread_abort (errno, "malloc");
90 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
92 pthread_mutexattr_t attr;
93 pthread_mutexattr_init (&attr);
94 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
99 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
100 g_thread_abort (status, "pthread_mutex_init");
102 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
103 pthread_mutexattr_destroy (&attr);
110 g_mutex_impl_free (pthread_mutex_t *mutex)
112 pthread_mutex_destroy (mutex);
116 static pthread_mutex_t *
117 g_mutex_get_impl (GMutex *mutex)
119 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
121 if G_UNLIKELY (impl == NULL)
123 impl = g_mutex_impl_new ();
124 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
125 g_mutex_impl_free (impl);
135 * @mutex: an uninitialized #GMutex
137 * Initializes a #GMutex so that it can be used.
139 * This function is useful to initialize a mutex that has been
140 * allocated on the stack, or as part of a larger structure.
141 * It is not necessary to initialize a mutex that has been
142 * statically allocated.
152 * b = g_new (Blob, 1);
153 * g_mutex_init (&b->m);
156 * To undo the effect of g_mutex_init() when a mutex is no longer
157 * needed, use g_mutex_clear().
159 * Calling g_mutex_init() on an already initialized #GMutex leads
160 * to undefined behaviour.
165 g_mutex_init (GMutex *mutex)
167 mutex->p = g_mutex_impl_new ();
172 * @mutex: an initialized #GMutex
174 * Frees the resources allocated to a mutex with g_mutex_init().
176 * This function should not be used with a #GMutex that has been
177 * statically allocated.
179 * Calling g_mutex_clear() on a locked mutex leads to undefined
185 g_mutex_clear (GMutex *mutex)
187 g_mutex_impl_free (mutex->p);
194 * Locks @mutex. If @mutex is already locked by another thread, the
195 * current thread will block until @mutex is unlocked by the other
198 * #GMutex is neither guaranteed to be recursive nor to be
199 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
200 * already been locked by the same thread results in undefined behaviour
201 * (including but not limited to deadlocks).
204 g_mutex_lock (GMutex *mutex)
208 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
209 g_thread_abort (status, "pthread_mutex_lock");
216 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
217 * call for @mutex, it will become unblocked and can lock @mutex itself.
219 * Calling g_mutex_unlock() on a mutex that is not locked by the
220 * current thread leads to undefined behaviour.
223 g_mutex_unlock (GMutex *mutex)
227 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
228 g_thread_abort (status, "pthread_mutex_unlock");
235 * Tries to lock @mutex. If @mutex is already locked by another thread,
236 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
239 * #GMutex is neither guaranteed to be recursive nor to be
240 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
241 * already been locked by the same thread results in undefined behaviour
242 * (including but not limited to deadlocks or arbitrary return values).
244 * Returns: %TRUE if @mutex could be locked
247 g_mutex_trylock (GMutex *mutex)
251 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
254 if G_UNLIKELY (status != EBUSY)
255 g_thread_abort (status, "pthread_mutex_trylock");
262 static pthread_mutex_t *
263 g_rec_mutex_impl_new (void)
265 pthread_mutexattr_t attr;
266 pthread_mutex_t *mutex;
268 mutex = malloc (sizeof (pthread_mutex_t));
269 if G_UNLIKELY (mutex == NULL)
270 g_thread_abort (errno, "malloc");
272 pthread_mutexattr_init (&attr);
273 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
274 pthread_mutex_init (mutex, &attr);
275 pthread_mutexattr_destroy (&attr);
281 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
283 pthread_mutex_destroy (mutex);
287 static pthread_mutex_t *
288 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
290 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
292 if G_UNLIKELY (impl == NULL)
294 impl = g_rec_mutex_impl_new ();
295 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
296 g_rec_mutex_impl_free (impl);
305 * @rec_mutex: an uninitialized #GRecMutex
307 * Initializes a #GRecMutex so that it can be used.
309 * This function is useful to initialize a recursive mutex
310 * that has been allocated on the stack, or as part of a larger
313 * It is not necessary to initialise a recursive mutex that has been
314 * statically allocated.
324 * b = g_new (Blob, 1);
325 * g_rec_mutex_init (&b->m);
328 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
329 * leads to undefined behaviour.
331 * To undo the effect of g_rec_mutex_init() when a recursive mutex
332 * is no longer needed, use g_rec_mutex_clear().
337 g_rec_mutex_init (GRecMutex *rec_mutex)
339 rec_mutex->p = g_rec_mutex_impl_new ();
344 * @rec_mutex: an initialized #GRecMutex
346 * Frees the resources allocated to a recursive mutex with
347 * g_rec_mutex_init().
349 * This function should not be used with a #GRecMutex that has been
350 * statically allocated.
352 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
353 * to undefined behaviour.
358 g_rec_mutex_clear (GRecMutex *rec_mutex)
360 g_rec_mutex_impl_free (rec_mutex->p);
365 * @rec_mutex: a #GRecMutex
367 * Locks @rec_mutex. If @rec_mutex is already locked by another
368 * thread, the current thread will block until @rec_mutex is
369 * unlocked by the other thread. If @rec_mutex is already locked
370 * by the current thread, the 'lock count' of @rec_mutex is increased.
371 * The mutex will only become available again when it is unlocked
372 * as many times as it has been locked.
377 g_rec_mutex_lock (GRecMutex *mutex)
379 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
383 * g_rec_mutex_unlock:
384 * @rec_mutex: a #GRecMutex
386 * Unlocks @rec_mutex. If another thread is blocked in a
387 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
388 * and can lock @rec_mutex itself.
390 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
391 * locked by the current thread leads to undefined behaviour.
396 g_rec_mutex_unlock (GRecMutex *rec_mutex)
398 pthread_mutex_unlock (rec_mutex->p);
402 * g_rec_mutex_trylock:
403 * @rec_mutex: a #GRecMutex
405 * Tries to lock @rec_mutex. If @rec_mutex is already locked
406 * by another thread, it immediately returns %FALSE. Otherwise
407 * it locks @rec_mutex and returns %TRUE.
409 * Returns: %TRUE if @rec_mutex could be locked
414 g_rec_mutex_trylock (GRecMutex *rec_mutex)
416 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
424 static pthread_rwlock_t *
425 g_rw_lock_impl_new (void)
427 pthread_rwlock_t *rwlock;
430 rwlock = malloc (sizeof (pthread_rwlock_t));
431 if G_UNLIKELY (rwlock == NULL)
432 g_thread_abort (errno, "malloc");
434 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
435 g_thread_abort (status, "pthread_rwlock_init");
441 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
443 pthread_rwlock_destroy (rwlock);
447 static pthread_rwlock_t *
448 g_rw_lock_get_impl (GRWLock *lock)
450 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
452 if G_UNLIKELY (impl == NULL)
454 impl = g_rw_lock_impl_new ();
455 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
456 g_rw_lock_impl_free (impl);
465 * @rw_lock: an uninitialized #GRWLock
467 * Initializes a #GRWLock so that it can be used.
469 * This function is useful to initialize a lock that has been
470 * allocated on the stack, or as part of a larger structure. It is not
471 * necessary to initialise a reader-writer lock that has been statically
482 * b = g_new (Blob, 1);
483 * g_rw_lock_init (&b->l);
486 * To undo the effect of g_rw_lock_init() when a lock is no longer
487 * needed, use g_rw_lock_clear().
489 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
490 * to undefined behaviour.
495 g_rw_lock_init (GRWLock *rw_lock)
497 rw_lock->p = g_rw_lock_impl_new ();
502 * @rw_lock: an initialized #GRWLock
504 * Frees the resources allocated to a lock with g_rw_lock_init().
506 * This function should not be used with a #GRWLock that has been
507 * statically allocated.
509 * Calling g_rw_lock_clear() when any thread holds the lock
510 * leads to undefined behaviour.
515 g_rw_lock_clear (GRWLock *rw_lock)
517 g_rw_lock_impl_free (rw_lock->p);
521 * g_rw_lock_writer_lock:
522 * @rw_lock: a #GRWLock
524 * Obtain a write lock on @rw_lock. If any thread already holds
525 * a read or write lock on @rw_lock, the current thread will block
526 * until all other threads have dropped their locks on @rw_lock.
531 g_rw_lock_writer_lock (GRWLock *rw_lock)
533 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
537 * g_rw_lock_writer_trylock:
538 * @rw_lock: a #GRWLock
540 * Tries to obtain a write lock on @rw_lock. If any other thread holds
541 * a read or write lock on @rw_lock, it immediately returns %FALSE.
542 * Otherwise it locks @rw_lock and returns %TRUE.
544 * Returns: %TRUE if @rw_lock could be locked
549 g_rw_lock_writer_trylock (GRWLock *rw_lock)
551 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
558 * g_rw_lock_writer_unlock:
559 * @rw_lock: a #GRWLock
561 * Release a write lock on @rw_lock.
563 * Calling g_rw_lock_writer_unlock() on a lock that is not held
564 * by the current thread leads to undefined behaviour.
569 g_rw_lock_writer_unlock (GRWLock *rw_lock)
571 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
575 * g_rw_lock_reader_lock:
576 * @rw_lock: a #GRWLock
578 * Obtain a read lock on @rw_lock. If another thread currently holds
579 * the write lock on @rw_lock or blocks waiting for it, the current
580 * thread will block. Read locks can be taken recursively.
582 * It is implementation-defined how many threads are allowed to
583 * hold read locks on the same lock simultaneously.
588 g_rw_lock_reader_lock (GRWLock *rw_lock)
590 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
594 * g_rw_lock_reader_trylock:
595 * @rw_lock: a #GRWLock
597 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
598 * the read lock was successfully obtained. Otherwise it
601 * Returns: %TRUE if @rw_lock could be locked
606 g_rw_lock_reader_trylock (GRWLock *rw_lock)
608 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
615 * g_rw_lock_reader_unlock:
616 * @rw_lock: a #GRWLock
618 * Release a read lock on @rw_lock.
620 * Calling g_rw_lock_reader_unlock() on a lock that is not held
621 * by the current thread leads to undefined behaviour.
626 g_rw_lock_reader_unlock (GRWLock *rw_lock)
628 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
633 static pthread_cond_t *
634 g_cond_impl_new (void)
636 pthread_condattr_t attr;
637 pthread_cond_t *cond;
640 pthread_condattr_init (&attr);
641 #if defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
642 pthread_condattr_setclock (&attr, CLOCK_MONOTONIC);
645 cond = malloc (sizeof (pthread_cond_t));
646 if G_UNLIKELY (cond == NULL)
647 g_thread_abort (errno, "malloc");
649 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
650 g_thread_abort (status, "pthread_cond_init");
652 pthread_condattr_destroy (&attr);
658 g_cond_impl_free (pthread_cond_t *cond)
660 pthread_cond_destroy (cond);
664 static pthread_cond_t *
665 g_cond_get_impl (GCond *cond)
667 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
669 if G_UNLIKELY (impl == NULL)
671 impl = g_cond_impl_new ();
672 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
673 g_cond_impl_free (impl);
682 * @cond: an uninitialized #GCond
684 * Initialises a #GCond so that it can be used.
686 * This function is useful to initialise a #GCond that has been
687 * allocated as part of a larger structure. It is not necessary to
688 * initialise a #GCond that has been statically allocated.
690 * To undo the effect of g_cond_init() when a #GCond is no longer
691 * needed, use g_cond_clear().
693 * Calling g_cond_init() on an already-initialised #GCond leads
694 * to undefined behaviour.
699 g_cond_init (GCond *cond)
701 cond->p = g_cond_impl_new ();
706 * @cond: an initialised #GCond
708 * Frees the resources allocated to a #GCond with g_cond_init().
710 * This function should not be used with a #GCond that has been
711 * statically allocated.
713 * Calling g_cond_clear() for a #GCond on which threads are
714 * blocking leads to undefined behaviour.
719 g_cond_clear (GCond *cond)
721 g_cond_impl_free (cond->p);
727 * @mutex: a #GMutex that is currently locked
729 * Atomically releases @mutex and waits until @cond is signalled.
730 * When this function returns, @mutex is locked again and owned by the
733 * When using condition variables, it is possible that a spurious wakeup
734 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
735 * not called). It's also possible that a stolen wakeup may occur.
736 * This is when g_cond_signal() is called, but another thread acquires
737 * @mutex before this thread and modifies the state of the program in
738 * such a way that when g_cond_wait() is able to return, the expected
739 * condition is no longer met.
741 * For this reason, g_cond_wait() must always be used in a loop. See
742 * the documentation for #GCond for a complete example.
745 g_cond_wait (GCond *cond,
750 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
751 g_thread_abort (status, "pthread_cond_wait");
758 * If threads are waiting for @cond, at least one of them is unblocked.
759 * If no threads are waiting for @cond, this function has no effect.
760 * It is good practice to hold the same lock as the waiting thread
761 * while calling this function, though not required.
764 g_cond_signal (GCond *cond)
768 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
769 g_thread_abort (status, "pthread_cond_signal");
776 * If threads are waiting for @cond, all of them are unblocked.
777 * If no threads are waiting for @cond, this function has no effect.
778 * It is good practice to lock the same mutex as the waiting threads
779 * while calling this function, though not required.
782 g_cond_broadcast (GCond *cond)
786 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
787 g_thread_abort (status, "pthread_cond_broadcast");
793 * @mutex: a #GMutex that is currently locked
794 * @end_time: the monotonic time to wait until
796 * Waits until either @cond is signalled or @end_time has passed.
798 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
799 * could occur. For that reason, waiting on a condition variable should
800 * always be in a loop, based on an explicitly-checked predicate.
802 * %TRUE is returned if the condition variable was signalled (or in the
803 * case of a spurious wakeup). %FALSE is returned if @end_time has
806 * The following code shows how to correctly perform a timed wait on a
807 * condition variable (extending the example presented in the
808 * documentation for #GCond):
812 * pop_data_timed (void)
817 * g_mutex_lock (&data_mutex);
819 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
820 * while (!current_data)
821 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
823 * /* timeout has passed. */
824 * g_mutex_unlock (&data_mutex);
828 * /* there is data for us */
829 * data = current_data;
830 * current_data = NULL;
832 * g_mutex_unlock (&data_mutex);
838 * Notice that the end time is calculated once, before entering the
839 * loop and reused. This is the motivation behind the use of absolute
840 * time on this API -- if a relative time of 5 seconds were passed
841 * directly to the call and a spurious wakeup occurred, the program would
842 * have to start over waiting again (which would lead to a total wait
843 * time of more than 5 seconds).
845 * Returns: %TRUE on a signal, %FALSE on a timeout
849 g_cond_wait_until (GCond *cond,
856 ts.tv_sec = end_time / 1000000;
857 ts.tv_nsec = (end_time % 1000000) * 1000;
859 #if defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
860 if ((status = pthread_cond_timedwait_monotonic (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
862 #elif defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC_NP)
863 if ((status = pthread_cond_timedwait_monotonic_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
866 /* Pray that the cond is actually using the monotonic clock */
867 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
871 if G_UNLIKELY (status != ETIMEDOUT)
872 g_thread_abort (status, "pthread_cond_timedwait");
882 * The #GPrivate struct is an opaque data structure to represent a
883 * thread-local data key. It is approximately equivalent to the
884 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
885 * TlsSetValue()/TlsGetValue() on Windows.
887 * If you don't already know why you might want this functionality,
888 * then you probably don't need it.
890 * #GPrivate is a very limited resource (as far as 128 per program,
891 * shared between all libraries). It is also not possible to destroy a
892 * #GPrivate after it has been used. As such, it is only ever acceptable
893 * to use #GPrivate in static scope, and even then sparingly so.
895 * See G_PRIVATE_INIT() for a couple of examples.
897 * The #GPrivate structure should be considered opaque. It should only
898 * be accessed via the <function>g_private_</function> functions.
903 * @notify: a #GDestroyNotify
905 * A macro to assist with the static initialisation of a #GPrivate.
907 * This macro is useful for the case that a #GDestroyNotify function
908 * should be associated the key. This is needed when the key will be
909 * used to point at memory that should be deallocated when the thread
912 * Additionally, the #GDestroyNotify will also be called on the previous
913 * value stored in the key when g_private_replace() is used.
915 * If no #GDestroyNotify is needed, then use of this macro is not
916 * required -- if the #GPrivate is declared in static scope then it will
917 * be properly initialised by default (ie: to all zeros). See the
921 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
923 * /* return value should not be freed */
925 * get_local_name (void)
927 * return g_private_get (&name_key);
931 * set_local_name (const gchar *name)
933 * g_private_replace (&name_key, g_strdup (name));
937 * static GPrivate count_key; /* no free function */
940 * get_local_count (void)
942 * return GPOINTER_TO_INT (g_private_get (&count_key));
946 * set_local_count (gint count)
948 * g_private_set (&count_key, GINT_TO_POINTER (count));
955 static pthread_key_t *
956 g_private_impl_new (GDestroyNotify notify)
961 key = malloc (sizeof (pthread_key_t));
962 if G_UNLIKELY (key == NULL)
963 g_thread_abort (errno, "malloc");
964 status = pthread_key_create (key, notify);
965 if G_UNLIKELY (status != 0)
966 g_thread_abort (status, "pthread_key_create");
972 g_private_impl_free (pthread_key_t *key)
976 status = pthread_key_delete (*key);
977 if G_UNLIKELY (status != 0)
978 g_thread_abort (status, "pthread_key_delete");
982 static pthread_key_t *
983 g_private_get_impl (GPrivate *key)
985 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
987 if G_UNLIKELY (impl == NULL)
989 impl = g_private_impl_new (key->notify);
990 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
992 g_private_impl_free (impl);
1004 * Returns the current value of the thread local variable @key.
1006 * If the value has not yet been set in this thread, %NULL is returned.
1007 * Values are never copied between threads (when a new thread is
1008 * created, for example).
1010 * Returns: the thread-local value
1013 g_private_get (GPrivate *key)
1015 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1016 return pthread_getspecific (*g_private_get_impl (key));
1022 * @value: the new value
1024 * Sets the thread local variable @key to have the value @value in the
1027 * This function differs from g_private_replace() in the following way:
1028 * the #GDestroyNotify for @key is not called on the old value.
1031 g_private_set (GPrivate *key,
1036 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1037 g_thread_abort (status, "pthread_setspecific");
1041 * g_private_replace:
1043 * @value: the new value
1045 * Sets the thread local variable @key to have the value @value in the
1048 * This function differs from g_private_set() in the following way: if
1049 * the previous value was non-%NULL then the #GDestroyNotify handler for
1050 * @key is run on it.
1055 g_private_replace (GPrivate *key,
1058 pthread_key_t *impl = g_private_get_impl (key);
1062 old = pthread_getspecific (*impl);
1063 if (old && key->notify)
1066 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1067 g_thread_abort (status, "pthread_setspecific");
1072 #define posix_check_err(err, name) G_STMT_START{ \
1073 int error = (err); \
1075 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1076 __FILE__, __LINE__, G_STRFUNC, \
1077 g_strerror (error), name); \
1080 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1086 pthread_t system_thread;
1092 g_system_thread_free (GRealThread *thread)
1094 GThreadPosix *pt = (GThreadPosix *) thread;
1097 pthread_detach (pt->system_thread);
1099 g_mutex_clear (&pt->lock);
1101 g_slice_free (GThreadPosix, pt);
1105 g_system_thread_new (GThreadFunc thread_func,
1109 GThreadPosix *thread;
1110 pthread_attr_t attr;
1113 thread = g_slice_new0 (GThreadPosix);
1115 posix_check_cmd (pthread_attr_init (&attr));
1117 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1120 #ifdef _SC_THREAD_STACK_MIN
1121 stack_size = MAX (sysconf (_SC_THREAD_STACK_MIN), stack_size);
1122 #endif /* _SC_THREAD_STACK_MIN */
1123 /* No error check here, because some systems can't do it and
1124 * we simply don't want threads to fail because of that. */
1125 pthread_attr_setstacksize (&attr, stack_size);
1127 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1129 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread);
1131 posix_check_cmd (pthread_attr_destroy (&attr));
1135 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1136 "Error creating thread: %s", g_strerror (ret));
1137 g_slice_free (GThreadPosix, thread);
1141 posix_check_err (ret, "pthread_create");
1143 g_mutex_init (&thread->lock);
1145 return (GRealThread *) thread;
1151 * Causes the calling thread to voluntarily relinquish the CPU, so
1152 * that other threads can run.
1154 * This function is often used as a method to make busy wait less evil.
1157 g_thread_yield (void)
1163 g_system_thread_wait (GRealThread *thread)
1165 GThreadPosix *pt = (GThreadPosix *) thread;
1167 g_mutex_lock (&pt->lock);
1171 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1175 g_mutex_unlock (&pt->lock);
1179 g_system_thread_exit (void)
1181 pthread_exit (NULL);
1185 g_system_thread_set_name (const gchar *name)
1187 #ifdef HAVE_SYS_PRCTL_H
1189 prctl (PR_SET_NAME, name, 0, 0, 0, 0);
1195 /* vim:set foldmethod=marker: */