1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 02111-1307, USA.
24 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
25 * file for a list of people on the GLib Team. See the ChangeLog
26 * files for a list of changes. These files are distributed with
27 * GLib at ftp://ftp.gtk.org/pub/gtk/.
30 /* The GMutex, GCond and GPrivate implementations in this file are some
31 * of the lowest-level code in GLib. All other parts of GLib (messages,
32 * memory, slices, etc) assume that they can freely use these facilities
33 * without risking recursion.
35 * As such, these functions are NOT permitted to call any other part of
38 * The thread manipulation functions (create, exit, join, etc.) have
39 * more freedom -- they can do as they please.
46 #include "gthreadprivate.h"
48 #include "gmessages.h"
49 #include "gstrfuncs.h"
57 #ifdef HAVE_SYS_TIME_H
58 # include <sys/time.h>
66 #ifdef HAVE_SYS_PRCTL_H
67 #include <sys/prctl.h>
71 g_thread_abort (gint status,
72 const gchar *function)
74 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
75 function, strerror (status));
81 static pthread_mutex_t *
82 g_mutex_impl_new (void)
84 pthread_mutexattr_t *pattr = NULL;
85 pthread_mutex_t *mutex;
88 mutex = malloc (sizeof (pthread_mutex_t));
89 if G_UNLIKELY (mutex == NULL)
90 g_thread_abort (errno, "malloc");
92 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
94 pthread_mutexattr_t attr;
95 pthread_mutexattr_init (&attr);
96 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
101 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
102 g_thread_abort (status, "pthread_mutex_init");
104 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
105 pthread_mutexattr_destroy (&attr);
112 g_mutex_impl_free (pthread_mutex_t *mutex)
114 pthread_mutex_destroy (mutex);
118 static pthread_mutex_t *
119 g_mutex_get_impl (GMutex *mutex)
121 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
123 if G_UNLIKELY (impl == NULL)
125 impl = g_mutex_impl_new ();
126 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
127 g_mutex_impl_free (impl);
137 * @mutex: an uninitialized #GMutex
139 * Initializes a #GMutex so that it can be used.
141 * This function is useful to initialize a mutex that has been
142 * allocated on the stack, or as part of a larger structure.
143 * It is not necessary to initialize a mutex that has been
144 * statically allocated.
154 * b = g_new (Blob, 1);
155 * g_mutex_init (&b->m);
158 * To undo the effect of g_mutex_init() when a mutex is no longer
159 * needed, use g_mutex_clear().
161 * Calling g_mutex_init() on an already initialized #GMutex leads
162 * to undefined behaviour.
167 g_mutex_init (GMutex *mutex)
169 mutex->p = g_mutex_impl_new ();
174 * @mutex: an initialized #GMutex
176 * Frees the resources allocated to a mutex with g_mutex_init().
178 * This function should not be used with a #GMutex that has been
179 * statically allocated.
181 * Calling g_mutex_clear() on a locked mutex leads to undefined
187 g_mutex_clear (GMutex *mutex)
189 g_mutex_impl_free (mutex->p);
196 * Locks @mutex. If @mutex is already locked by another thread, the
197 * current thread will block until @mutex is unlocked by the other
200 * <note>#GMutex is neither guaranteed to be recursive nor to be
201 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
202 * already been locked by the same thread results in undefined behaviour
203 * (including but not limited to deadlocks).</note>
206 g_mutex_lock (GMutex *mutex)
210 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
211 g_thread_abort (status, "pthread_mutex_lock");
218 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
219 * call for @mutex, it will become unblocked and can lock @mutex itself.
221 * Calling g_mutex_unlock() on a mutex that is not locked by the
222 * current thread leads to undefined behaviour.
225 g_mutex_unlock (GMutex *mutex)
229 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
230 g_thread_abort (status, "pthread_mutex_unlock");
237 * Tries to lock @mutex. If @mutex is already locked by another thread,
238 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
241 * <note>#GMutex is neither guaranteed to be recursive nor to be
242 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
243 * already been locked by the same thread results in undefined behaviour
244 * (including but not limited to deadlocks or arbitrary return values).
247 * Returns: %TRUE if @mutex could be locked
250 g_mutex_trylock (GMutex *mutex)
254 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
257 if G_UNLIKELY (status != EBUSY)
258 g_thread_abort (status, "pthread_mutex_trylock");
265 static pthread_mutex_t *
266 g_rec_mutex_impl_new (void)
268 pthread_mutexattr_t attr;
269 pthread_mutex_t *mutex;
271 mutex = g_slice_new (pthread_mutex_t);
272 pthread_mutexattr_init (&attr);
273 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
274 pthread_mutex_init (mutex, &attr);
275 pthread_mutexattr_destroy (&attr);
281 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
283 pthread_mutex_destroy (mutex);
284 g_slice_free (pthread_mutex_t, mutex);
287 static pthread_mutex_t *
288 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
290 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
292 if G_UNLIKELY (impl == NULL)
294 impl = g_rec_mutex_impl_new ();
295 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
296 g_rec_mutex_impl_free (impl);
305 * @rec_mutex: an uninitialized #GRecMutex
307 * Initializes a #GRecMutex so that it can be used.
309 * This function is useful to initialize a recursive mutex
310 * that has been allocated on the stack, or as part of a larger
313 * It is not necessary to initialise a recursive mutex that has been
314 * statically allocated.
324 * b = g_new (Blob, 1);
325 * g_rec_mutex_init (&b->m);
328 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
329 * leads to undefined behaviour.
331 * To undo the effect of g_rec_mutex_init() when a recursive mutex
332 * is no longer needed, use g_rec_mutex_clear().
337 g_rec_mutex_init (GRecMutex *rec_mutex)
339 rec_mutex->p = g_rec_mutex_impl_new ();
344 * @rec_mutex: an initialized #GRecMutex
346 * Frees the resources allocated to a recursive mutex with
347 * g_rec_mutex_init().
349 * This function should not be used with a #GRecMutex that has been
350 * statically allocated.
352 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
353 * to undefined behaviour.
358 g_rec_mutex_clear (GRecMutex *rec_mutex)
360 g_rec_mutex_impl_free (rec_mutex->p);
365 * @rec_mutex: a #GRecMutex
367 * Locks @rec_mutex. If @rec_mutex is already locked by another
368 * thread, the current thread will block until @rec_mutex is
369 * unlocked by the other thread. If @rec_mutex is already locked
370 * by the current thread, the 'lock count' of @rec_mutex is increased.
371 * The mutex will only become available again when it is unlocked
372 * as many times as it has been locked.
377 g_rec_mutex_lock (GRecMutex *mutex)
379 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
383 * g_rec_mutex_unlock:
384 * @rec_mutex: a #GRecMutex
386 * Unlocks @rec_mutex. If another thread is blocked in a
387 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
388 * and can lock @rec_mutex itself.
390 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
391 * locked by the current thread leads to undefined behaviour.
396 g_rec_mutex_unlock (GRecMutex *rec_mutex)
398 pthread_mutex_unlock (rec_mutex->p);
402 * g_rec_mutex_trylock:
403 * @rec_mutex: a #GRecMutex
405 * Tries to lock @rec_mutex. If @rec_mutex is already locked
406 * by another thread, it immediately returns %FALSE. Otherwise
407 * it locks @rec_mutex and returns %TRUE.
409 * Returns: %TRUE if @rec_mutex could be locked
414 g_rec_mutex_trylock (GRecMutex *rec_mutex)
416 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
424 static pthread_rwlock_t *
425 g_rw_lock_impl_new (void)
427 pthread_rwlock_t *rwlock;
430 rwlock = malloc (sizeof (pthread_rwlock_t));
431 if G_UNLIKELY (rwlock == NULL)
432 g_thread_abort (errno, "malloc");
434 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
435 g_thread_abort (status, "pthread_rwlock_init");
441 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
443 pthread_rwlock_destroy (rwlock);
447 static pthread_rwlock_t *
448 g_rw_lock_get_impl (GRWLock *lock)
450 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
452 if G_UNLIKELY (impl == NULL)
454 impl = g_rw_lock_impl_new ();
455 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
456 g_rw_lock_impl_free (impl);
465 * @rw_lock: an uninitialized #GRWLock
467 * Initializes a #GRWLock so that it can be used.
469 * This function is useful to initialize a lock that has been
470 * allocated on the stack, or as part of a larger structure. It is not
471 * necessary to initialise a reader-writer lock that has been statically
482 * b = g_new (Blob, 1);
483 * g_rw_lock_init (&b->l);
486 * To undo the effect of g_rw_lock_init() when a lock is no longer
487 * needed, use g_rw_lock_clear().
489 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
490 * to undefined behaviour.
495 g_rw_lock_init (GRWLock *rw_lock)
497 rw_lock->p = g_rw_lock_impl_new ();
502 * @rw_lock: an initialized #GRWLock
504 * Frees the resources allocated to a lock with g_rw_lock_init().
506 * This function should not be used with a #GRWLock that has been
507 * statically allocated.
509 * Calling g_rw_lock_clear() when any thread holds the lock
510 * leads to undefined behaviour.
515 g_rw_lock_clear (GRWLock *rw_lock)
517 g_rw_lock_impl_free (rw_lock->p);
521 * g_rw_lock_writer_lock:
522 * @rw_lock: a #GRWLock
524 * Obtain a write lock on @rw_lock. If any thread already holds
525 * a read or write lock on @rw_lock, the current thread will block
526 * until all other threads have dropped their locks on @rw_lock.
531 g_rw_lock_writer_lock (GRWLock *rw_lock)
533 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
537 * g_rw_lock_writer_trylock:
538 * @rw_lock: a #GRWLock
540 * Tries to obtain a write lock on @rw_lock. If any other thread holds
541 * a read or write lock on @rw_lock, it immediately returns %FALSE.
542 * Otherwise it locks @rw_lock and returns %TRUE.
544 * Returns: %TRUE if @rw_lock could be locked
549 g_rw_lock_writer_trylock (GRWLock *rw_lock)
551 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
558 * g_rw_lock_writer_unlock:
559 * @rw_lock: a #GRWLock
561 * Release a write lock on @rw_lock.
563 * Calling g_rw_lock_writer_unlock() on a lock that is not held
564 * by the current thread leads to undefined behaviour.
569 g_rw_lock_writer_unlock (GRWLock *rw_lock)
571 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
575 * g_rw_lock_reader_lock:
576 * @rw_lock: a #GRWLock
578 * Obtain a read lock on @rw_lock. If another thread currently holds
579 * the write lock on @rw_lock or blocks waiting for it, the current
580 * thread will block. Read locks can be taken recursively.
582 * It is implementation-defined how many threads are allowed to
583 * hold read locks on the same lock simultaneously.
588 g_rw_lock_reader_lock (GRWLock *rw_lock)
590 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
594 * g_rw_lock_reader_trylock:
595 * @rw_lock: a #GRWLock
597 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
598 * the read lock was successfully obtained. Otherwise it
601 * Returns: %TRUE if @rw_lock could be locked
606 g_rw_lock_reader_trylock (GRWLock *rw_lock)
608 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
615 * g_rw_lock_reader_unlock:
616 * @rw_lock: a #GRWLock
618 * Release a read lock on @rw_lock.
620 * Calling g_rw_lock_reader_unlock() on a lock that is not held
621 * by the current thread leads to undefined behaviour.
626 g_rw_lock_reader_unlock (GRWLock *rw_lock)
628 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
633 static pthread_cond_t *
634 g_cond_impl_new (void)
636 pthread_condattr_t attr;
637 pthread_cond_t *cond;
640 pthread_condattr_init (&attr);
641 #if defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
642 pthread_condattr_setclock (&attr, CLOCK_MONOTONIC);
645 cond = malloc (sizeof (pthread_cond_t));
646 if G_UNLIKELY (cond == NULL)
647 g_thread_abort (errno, "malloc");
649 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
650 g_thread_abort (status, "pthread_cond_init");
652 pthread_condattr_destroy (&attr);
658 g_cond_impl_free (pthread_cond_t *cond)
660 pthread_cond_destroy (cond);
664 static pthread_cond_t *
665 g_cond_get_impl (GCond *cond)
667 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
669 if G_UNLIKELY (impl == NULL)
671 impl = g_cond_impl_new ();
672 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
673 g_cond_impl_free (impl);
682 * @cond: an uninitialized #GCond
684 * Initialises a #GCond so that it can be used.
686 * This function is useful to initialise a #GCond that has been
687 * allocated as part of a larger structure. It is not necessary to
688 * initialise a #GCond that has been statically allocated.
690 * To undo the effect of g_cond_init() when a #GCond is no longer
691 * needed, use g_cond_clear().
693 * Calling g_cond_init() on an already-initialised #GCond leads
694 * to undefined behaviour.
699 g_cond_init (GCond *cond)
701 cond->p = g_cond_impl_new ();
706 * @cond: an initialised #GCond
708 * Frees the resources allocated to a #GCond with g_cond_init().
710 * This function should not be used with a #GCond that has been
711 * statically allocated.
713 * Calling g_cond_clear() for a #GCond on which threads are
714 * blocking leads to undefined behaviour.
719 g_cond_clear (GCond *cond)
721 g_cond_impl_free (cond->p);
727 * @mutex: a #GMutex that is currently locked
729 * Atomically releases @mutex and waits until @cond is signalled.
731 * When using condition variables, it is possible that a spurious wakeup
732 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
733 * not called). It's also possible that a stolen wakeup may occur.
734 * This is when g_cond_signal() is called, but another thread acquires
735 * @mutex before this thread and modifies the state of the program in
736 * such a way that when g_cond_wait() is able to return, the expected
737 * condition is no longer met.
739 * For this reason, g_cond_wait() must always be used in a loop. See
740 * the documentation for #GCond for a complete example.
743 g_cond_wait (GCond *cond,
748 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
749 g_thread_abort (status, "pthread_cond_wait");
756 * If threads are waiting for @cond, at least one of them is unblocked.
757 * If no threads are waiting for @cond, this function has no effect.
758 * It is good practice to hold the same lock as the waiting thread
759 * while calling this function, though not required.
762 g_cond_signal (GCond *cond)
766 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
767 g_thread_abort (status, "pthread_cond_signal");
774 * If threads are waiting for @cond, all of them are unblocked.
775 * If no threads are waiting for @cond, this function has no effect.
776 * It is good practice to lock the same mutex as the waiting threads
777 * while calling this function, though not required.
780 g_cond_broadcast (GCond *cond)
784 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
785 g_thread_abort (status, "pthread_cond_broadcast");
791 * @mutex: a #GMutex that is currently locked
792 * @end_time: the monotonic time to wait until
794 * Waits until either @cond is signalled or @end_time has passed.
796 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
797 * could occur. For that reason, waiting on a condition variable should
798 * always be in a loop, based on an explicitly-checked predicate.
800 * %TRUE is returned if the condition variable was signalled (or in the
801 * case of a spurious wakeup). %FALSE is returned if @end_time has
804 * The following code shows how to correctly perform a timed wait on a
805 * condition variable (extended the example presented in the
806 * documentation for #GCond):
810 * pop_data_timed (void)
815 * g_mutex_lock (&data_mutex);
817 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
818 * while (!current_data)
819 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
821 * // timeout has passed.
822 * g_mutex_unlock (&data_mutex);
826 * // there is data for us
827 * data = current_data;
828 * current_data = NULL;
830 * g_mutex_unlock (&data_mutex);
836 * Notice that the end time is calculated once, before entering the
837 * loop and reused. This is the motivation behind the use of absolute
838 * time on this API -- if a relative time of 5 seconds were passed
839 * directly to the call and a spurious wakeup occurred, the program would
840 * have to start over waiting again (which would lead to a total wait
841 * time of more than 5 seconds).
843 * Returns: %TRUE on a signal, %FALSE on a timeout
847 g_cond_wait_until (GCond *cond,
854 ts.tv_sec = end_time / 1000000;
855 ts.tv_nsec = (end_time % 1000000) * 1000;
857 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
860 if G_UNLIKELY (status != ETIMEDOUT)
861 g_thread_abort (status, "pthread_cond_timedwait");
871 * The #GPrivate struct is an opaque data structure to represent a
872 * thread-local data key. It is approximately equivalent to the
873 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
874 * TlsSetValue()/TlsGetValue() on Windows.
876 * If you don't already know why you might want this functionality,
877 * then you probably don't need it.
879 * #GPrivate is a very limited resource (as far as 128 per program,
880 * shared between all libraries). It is also not possible to destroy a
881 * #GPrivate after it has been used. As such, it is only ever acceptable
882 * to use #GPrivate in static scope, and even then sparingly so.
884 * See G_PRIVATE_INIT() for a couple of examples.
886 * The #GPrivate structure should be considered opaque. It should only
887 * be accessed via the <function>g_private_</function> functions.
892 * @notify: a #GDestroyNotify
894 * A macro to assist with the static initialisation of a #GPrivate.
896 * This macro is useful for the case that a #GDestroyNotify function
897 * should be associated the key. This is needed when the key will be
898 * used to point at memory that should be deallocated when the thread
901 * Additionally, the #GDestroyNotify will also be called on the previous
902 * value stored in the key when g_private_replace() is used.
904 * If no #GDestroyNotify is needed, then use of this macro is not
905 * required -- if the #GPrivate is declared in static scope then it will
906 * be properly initialised by default (ie: to all zeros). See the
910 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
912 * // return value should not be freed
914 * get_local_name (void)
916 * return g_private_get (&name_key);
920 * set_local_name (const gchar *name)
922 * g_private_replace (&name_key, g_strdup (name));
926 * static GPrivate count_key; // no free function
929 * get_local_count (void)
931 * return GPOINTER_TO_INT (g_private_get (&count_key));
935 * set_local_count (gint count)
937 * g_private_set (&count_key, GINT_TO_POINTER (count));
944 static pthread_key_t *
945 g_private_impl_new (GDestroyNotify notify)
950 key = malloc (sizeof (pthread_key_t));
951 if G_UNLIKELY (key == NULL)
952 g_thread_abort (errno, "malloc");
953 status = pthread_key_create (key, notify);
954 if G_UNLIKELY (status != 0)
955 g_thread_abort (status, "pthread_key_create");
961 g_private_impl_free (pthread_key_t *key)
965 status = pthread_key_delete (*key);
966 if G_UNLIKELY (status != 0)
967 g_thread_abort (status, "pthread_key_delete");
971 static pthread_key_t *
972 g_private_get_impl (GPrivate *key)
974 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
976 if G_UNLIKELY (impl == NULL)
978 impl = g_private_impl_new (key->notify);
979 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
981 g_private_impl_free (impl);
993 * Returns the current value of the thread local variable @key.
995 * If the value has not yet been set in this thread, %NULL is returned.
996 * Values are never copied between threads (when a new thread is
997 * created, for example).
999 * Returns: the thread-local value
1002 g_private_get (GPrivate *key)
1004 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1005 return pthread_getspecific (*g_private_get_impl (key));
1011 * @value: the new value
1013 * Sets the thread local variable @key to have the value @value in the
1016 * This function differs from g_private_replace() in the following way:
1017 * the #GDestroyNotify for @key is not called on the old value.
1020 g_private_set (GPrivate *key,
1025 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1026 g_thread_abort (status, "pthread_setspecific");
1030 * g_private_replace:
1032 * @value: the new value
1034 * Sets the thread local variable @key to have the value @value in the
1037 * This function differs from g_private_set() in the following way: if
1038 * the previous value was non-%NULL then the #GDestroyNotify handler for
1039 * @key is run on it.
1044 g_private_replace (GPrivate *key,
1047 pthread_key_t *impl = g_private_get_impl (key);
1051 old = pthread_getspecific (*impl);
1052 if (old && key->notify)
1055 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1056 g_thread_abort (status, "pthread_setspecific");
1061 #define posix_check_err(err, name) G_STMT_START{ \
1062 int error = (err); \
1064 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1065 __FILE__, __LINE__, G_STRFUNC, \
1066 g_strerror (error), name); \
1069 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1075 pthread_t system_thread;
1081 g_system_thread_free (GRealThread *thread)
1083 GThreadPosix *pt = (GThreadPosix *) thread;
1086 pthread_detach (pt->system_thread);
1088 g_mutex_clear (&pt->lock);
1090 g_slice_free (GThreadPosix, pt);
1094 g_system_thread_new (GThreadFunc thread_func,
1098 GThreadPosix *thread;
1099 pthread_attr_t attr;
1102 thread = g_slice_new0 (GThreadPosix);
1104 posix_check_cmd (pthread_attr_init (&attr));
1106 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1109 #ifdef _SC_THREAD_STACK_MIN
1110 stack_size = MAX (sysconf (_SC_THREAD_STACK_MIN), stack_size);
1111 #endif /* _SC_THREAD_STACK_MIN */
1112 /* No error check here, because some systems can't do it and
1113 * we simply don't want threads to fail because of that. */
1114 pthread_attr_setstacksize (&attr, stack_size);
1116 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1118 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread);
1120 posix_check_cmd (pthread_attr_destroy (&attr));
1124 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1125 "Error creating thread: %s", g_strerror (ret));
1126 g_slice_free (GThreadPosix, thread);
1130 posix_check_err (ret, "pthread_create");
1132 g_mutex_init (&thread->lock);
1134 return (GRealThread *) thread;
1140 * Causes the calling thread to voluntarily relinquish the CPU, so
1141 * that other threads can run.
1143 * This function is often used as a method to make busy wait less evil.
1146 g_thread_yield (void)
1152 g_system_thread_wait (GRealThread *thread)
1154 GThreadPosix *pt = (GThreadPosix *) thread;
1156 g_mutex_lock (&pt->lock);
1160 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1164 g_mutex_unlock (&pt->lock);
1168 g_system_thread_exit (void)
1170 pthread_exit (NULL);
1174 g_system_thread_set_name (const gchar *name)
1176 #ifdef HAVE_SYS_PRCTL_H
1178 prctl (PR_SET_NAME, name, 0, 0, 0, 0);
1184 /* vim:set foldmethod=marker: */