X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=glib%2Fgthread-posix.c;h=f4703f5e1a695d348172c847b6f9d2f9665542a2;hb=49a5d0f6f2aed99cd78f25655f137f4448e47d92;hp=237b8cb3552b45df8c30ef8959c75a5add1161fb;hpb=90679997ec7439ae520c97eb37b5ae36e0da6bba;p=platform%2Fupstream%2Fglib.git diff --git a/glib/gthread-posix.c b/glib/gthread-posix.c index 237b8cb..f4703f5 100644 --- a/glib/gthread-posix.c +++ b/glib/gthread-posix.c @@ -15,9 +15,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * License along with this library; if not, see . */ /* @@ -42,66 +40,223 @@ #include "config.h" #include "gthread.h" + #include "gthreadprivate.h" +#include "gslice.h" +#include "gmessages.h" +#include "gstrfuncs.h" +#include "gmain.h" -#include #include +#include #include #include -#include +#include + +#include +#include + +#ifdef HAVE_SCHED_H +#include +#endif +#ifdef HAVE_SYS_PRCTL_H +#include +#endif +#ifdef G_OS_WIN32 +#include +#endif + +/* clang defines __ATOMIC_SEQ_CST but doesn't support the GCC extension */ +#if defined(HAVE_FUTEX) && defined(__ATOMIC_SEQ_CST) && !defined(__clang__) +#define USE_NATIVE_MUTEX +#endif static void g_thread_abort (gint status, const gchar *function) { fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n", - strerror (status), function); + function, strerror (status)); abort (); } /* {{{1 GMutex */ -void -g_mutex_init (GMutex *mutex) + +#if !defined(USE_NATIVE_MUTEX) + +static pthread_mutex_t * +g_mutex_impl_new (void) { + pthread_mutexattr_t *pattr = NULL; + pthread_mutex_t *mutex; gint status; +#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP + pthread_mutexattr_t attr; +#endif + + mutex = malloc (sizeof (pthread_mutex_t)); + if G_UNLIKELY (mutex == NULL) + g_thread_abort (errno, "malloc"); - if G_UNLIKELY ((status = pthread_mutex_init (&mutex->impl, NULL)) != 0) +#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP); + pattr = &attr; +#endif + + if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0) g_thread_abort (status, "pthread_mutex_init"); + +#ifdef PTHREAD_ADAPTIVE_MUTEX_NP + pthread_mutexattr_destroy (&attr); +#endif + + return mutex; +} + +static void +g_mutex_impl_free (pthread_mutex_t *mutex) +{ + pthread_mutex_destroy (mutex); + free (mutex); } +static inline pthread_mutex_t * +g_mutex_get_impl (GMutex *mutex) +{ + pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p); + + if G_UNLIKELY (impl == NULL) + { + impl = g_mutex_impl_new (); + if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl)) + g_mutex_impl_free (impl); + impl = mutex->p; + } + + return impl; +} + + +/** + * g_mutex_init: + * @mutex: an uninitialized #GMutex + * + * Initializes a #GMutex so that it can be used. + * + * This function is useful to initialize a mutex that has been + * allocated on the stack, or as part of a larger structure. + * It is not necessary to initialize a mutex that has been + * statically allocated. + * + * |[ + * typedef struct { + * GMutex m; + * ... + * } Blob; + * + * Blob *b; + * + * b = g_new (Blob, 1); + * g_mutex_init (&b->m); + * ]| + * + * To undo the effect of g_mutex_init() when a mutex is no longer + * needed, use g_mutex_clear(). + * + * Calling g_mutex_init() on an already initialized #GMutex leads + * to undefined behaviour. + * + * Since: 2.32 + */ void -g_mutex_clear (GMutex *mutex) +g_mutex_init (GMutex *mutex) { - gint status; + mutex->p = g_mutex_impl_new (); +} - if G_UNLIKELY ((status = pthread_mutex_destroy (&mutex->impl)) != 0) - g_thread_abort (status, "pthread_mutex_destroy"); +/** + * g_mutex_clear: + * @mutex: an initialized #GMutex + * + * Frees the resources allocated to a mutex with g_mutex_init(). + * + * This function should not be used with a #GMutex that has been + * statically allocated. + * + * Calling g_mutex_clear() on a locked mutex leads to undefined + * behaviour. + * + * Sine: 2.32 + */ +void +g_mutex_clear (GMutex *mutex) +{ + g_mutex_impl_free (mutex->p); } +/** + * g_mutex_lock: + * @mutex: a #GMutex + * + * Locks @mutex. If @mutex is already locked by another thread, the + * current thread will block until @mutex is unlocked by the other + * thread. + * + * #GMutex is neither guaranteed to be recursive nor to be + * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has + * already been locked by the same thread results in undefined behaviour + * (including but not limited to deadlocks). + */ void g_mutex_lock (GMutex *mutex) { gint status; - if G_UNLIKELY ((status = pthread_mutex_lock (&mutex->impl)) != 0) + if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0) g_thread_abort (status, "pthread_mutex_lock"); } +/** + * g_mutex_unlock: + * @mutex: a #GMutex + * + * Unlocks @mutex. If another thread is blocked in a g_mutex_lock() + * call for @mutex, it will become unblocked and can lock @mutex itself. + * + * Calling g_mutex_unlock() on a mutex that is not locked by the + * current thread leads to undefined behaviour. + */ void g_mutex_unlock (GMutex *mutex) { gint status; - if G_UNLIKELY ((status = pthread_mutex_unlock (&mutex->impl)) != 0) - g_thread_abort (status, "pthread_mutex_lock"); + if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0) + g_thread_abort (status, "pthread_mutex_unlock"); } +/** + * g_mutex_trylock: + * @mutex: a #GMutex + * + * Tries to lock @mutex. If @mutex is already locked by another thread, + * it immediately returns %FALSE. Otherwise it locks @mutex and returns + * %TRUE. + * + * #GMutex is neither guaranteed to be recursive nor to be + * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has + * already been locked by the same thread results in undefined behaviour + * (including but not limited to deadlocks or arbitrary return values). + + * Returns: %TRUE if @mutex could be locked + */ gboolean g_mutex_trylock (GMutex *mutex) { gint status; - if G_LIKELY ((status = pthread_mutex_trylock (&mutex->impl)) == 0) + if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0) return TRUE; if G_UNLIKELY (status != EBUSY) @@ -110,93 +265,647 @@ g_mutex_trylock (GMutex *mutex) return FALSE; } -/* {{{1 GCond */ +#endif /* !defined(USE_NATIVE_MUTEX) */ + +/* {{{1 GRecMutex */ + +static pthread_mutex_t * +g_rec_mutex_impl_new (void) +{ + pthread_mutexattr_t attr; + pthread_mutex_t *mutex; + mutex = malloc (sizeof (pthread_mutex_t)); + if G_UNLIKELY (mutex == NULL) + g_thread_abort (errno, "malloc"); + + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (mutex, &attr); + pthread_mutexattr_destroy (&attr); + + return mutex; +} + +static void +g_rec_mutex_impl_free (pthread_mutex_t *mutex) +{ + pthread_mutex_destroy (mutex); + free (mutex); +} + +static inline pthread_mutex_t * +g_rec_mutex_get_impl (GRecMutex *rec_mutex) +{ + pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p); + + if G_UNLIKELY (impl == NULL) + { + impl = g_rec_mutex_impl_new (); + if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl)) + g_rec_mutex_impl_free (impl); + impl = rec_mutex->p; + } + + return impl; +} + +/** + * g_rec_mutex_init: + * @rec_mutex: an uninitialized #GRecMutex + * + * Initializes a #GRecMutex so that it can be used. + * + * This function is useful to initialize a recursive mutex + * that has been allocated on the stack, or as part of a larger + * structure. + * + * It is not necessary to initialise a recursive mutex that has been + * statically allocated. + * + * |[ + * typedef struct { + * GRecMutex m; + * ... + * } Blob; + * + * Blob *b; + * + * b = g_new (Blob, 1); + * g_rec_mutex_init (&b->m); + * ]| + * + * Calling g_rec_mutex_init() on an already initialized #GRecMutex + * leads to undefined behaviour. + * + * To undo the effect of g_rec_mutex_init() when a recursive mutex + * is no longer needed, use g_rec_mutex_clear(). + * + * Since: 2.32 + */ void -g_cond_init (GCond *cond) +g_rec_mutex_init (GRecMutex *rec_mutex) +{ + rec_mutex->p = g_rec_mutex_impl_new (); +} + +/** + * g_rec_mutex_clear: + * @rec_mutex: an initialized #GRecMutex + * + * Frees the resources allocated to a recursive mutex with + * g_rec_mutex_init(). + * + * This function should not be used with a #GRecMutex that has been + * statically allocated. + * + * Calling g_rec_mutex_clear() on a locked recursive mutex leads + * to undefined behaviour. + * + * Sine: 2.32 + */ +void +g_rec_mutex_clear (GRecMutex *rec_mutex) +{ + g_rec_mutex_impl_free (rec_mutex->p); +} + +/** + * g_rec_mutex_lock: + * @rec_mutex: a #GRecMutex + * + * Locks @rec_mutex. If @rec_mutex is already locked by another + * thread, the current thread will block until @rec_mutex is + * unlocked by the other thread. If @rec_mutex is already locked + * by the current thread, the 'lock count' of @rec_mutex is increased. + * The mutex will only become available again when it is unlocked + * as many times as it has been locked. + * + * Since: 2.32 + */ +void +g_rec_mutex_lock (GRecMutex *mutex) +{ + pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); +} + +/** + * g_rec_mutex_unlock: + * @rec_mutex: a #GRecMutex + * + * Unlocks @rec_mutex. If another thread is blocked in a + * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked + * and can lock @rec_mutex itself. + * + * Calling g_rec_mutex_unlock() on a recursive mutex that is not + * locked by the current thread leads to undefined behaviour. + * + * Since: 2.32 + */ +void +g_rec_mutex_unlock (GRecMutex *rec_mutex) { + pthread_mutex_unlock (rec_mutex->p); +} + +/** + * g_rec_mutex_trylock: + * @rec_mutex: a #GRecMutex + * + * Tries to lock @rec_mutex. If @rec_mutex is already locked + * by another thread, it immediately returns %FALSE. Otherwise + * it locks @rec_mutex and returns %TRUE. + * + * Returns: %TRUE if @rec_mutex could be locked + * + * Since: 2.32 + */ +gboolean +g_rec_mutex_trylock (GRecMutex *rec_mutex) +{ + if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) + return FALSE; + + return TRUE; +} + +/* {{{1 GRWLock */ + +static pthread_rwlock_t * +g_rw_lock_impl_new (void) +{ + pthread_rwlock_t *rwlock; gint status; - if G_UNLIKELY ((status = pthread_cond_init (&cond->impl, NULL)) != 0) + rwlock = malloc (sizeof (pthread_rwlock_t)); + if G_UNLIKELY (rwlock == NULL) + g_thread_abort (errno, "malloc"); + + if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0) + g_thread_abort (status, "pthread_rwlock_init"); + + return rwlock; +} + +static void +g_rw_lock_impl_free (pthread_rwlock_t *rwlock) +{ + pthread_rwlock_destroy (rwlock); + free (rwlock); +} + +static inline pthread_rwlock_t * +g_rw_lock_get_impl (GRWLock *lock) +{ + pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p); + + if G_UNLIKELY (impl == NULL) + { + impl = g_rw_lock_impl_new (); + if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl)) + g_rw_lock_impl_free (impl); + impl = lock->p; + } + + return impl; +} + +/** + * g_rw_lock_init: + * @rw_lock: an uninitialized #GRWLock + * + * Initializes a #GRWLock so that it can be used. + * + * This function is useful to initialize a lock that has been + * allocated on the stack, or as part of a larger structure. It is not + * necessary to initialise a reader-writer lock that has been statically + * allocated. + * + * |[ + * typedef struct { + * GRWLock l; + * ... + * } Blob; + * + * Blob *b; + * + * b = g_new (Blob, 1); + * g_rw_lock_init (&b->l); + * ]| + * + * To undo the effect of g_rw_lock_init() when a lock is no longer + * needed, use g_rw_lock_clear(). + * + * Calling g_rw_lock_init() on an already initialized #GRWLock leads + * to undefined behaviour. + * + * Since: 2.32 + */ +void +g_rw_lock_init (GRWLock *rw_lock) +{ + rw_lock->p = g_rw_lock_impl_new (); +} + +/** + * g_rw_lock_clear: + * @rw_lock: an initialized #GRWLock + * + * Frees the resources allocated to a lock with g_rw_lock_init(). + * + * This function should not be used with a #GRWLock that has been + * statically allocated. + * + * Calling g_rw_lock_clear() when any thread holds the lock + * leads to undefined behaviour. + * + * Sine: 2.32 + */ +void +g_rw_lock_clear (GRWLock *rw_lock) +{ + g_rw_lock_impl_free (rw_lock->p); +} + +/** + * g_rw_lock_writer_lock: + * @rw_lock: a #GRWLock + * + * Obtain a write lock on @rw_lock. If any thread already holds + * a read or write lock on @rw_lock, the current thread will block + * until all other threads have dropped their locks on @rw_lock. + * + * Since: 2.32 + */ +void +g_rw_lock_writer_lock (GRWLock *rw_lock) +{ + pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); +} + +/** + * g_rw_lock_writer_trylock: + * @rw_lock: a #GRWLock + * + * Tries to obtain a write lock on @rw_lock. If any other thread holds + * a read or write lock on @rw_lock, it immediately returns %FALSE. + * Otherwise it locks @rw_lock and returns %TRUE. + * + * Returns: %TRUE if @rw_lock could be locked + * + * Since: 2.32 + */ +gboolean +g_rw_lock_writer_trylock (GRWLock *rw_lock) +{ + if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) + return FALSE; + + return TRUE; +} + +/** + * g_rw_lock_writer_unlock: + * @rw_lock: a #GRWLock + * + * Release a write lock on @rw_lock. + * + * Calling g_rw_lock_writer_unlock() on a lock that is not held + * by the current thread leads to undefined behaviour. + * + * Since: 2.32 + */ +void +g_rw_lock_writer_unlock (GRWLock *rw_lock) +{ + pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); +} + +/** + * g_rw_lock_reader_lock: + * @rw_lock: a #GRWLock + * + * Obtain a read lock on @rw_lock. If another thread currently holds + * the write lock on @rw_lock or blocks waiting for it, the current + * thread will block. Read locks can be taken recursively. + * + * It is implementation-defined how many threads are allowed to + * hold read locks on the same lock simultaneously. + * + * Since: 2.32 + */ +void +g_rw_lock_reader_lock (GRWLock *rw_lock) +{ + pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); +} + +/** + * g_rw_lock_reader_trylock: + * @rw_lock: a #GRWLock + * + * Tries to obtain a read lock on @rw_lock and returns %TRUE if + * the read lock was successfully obtained. Otherwise it + * returns %FALSE. + * + * Returns: %TRUE if @rw_lock could be locked + * + * Since: 2.32 + */ +gboolean +g_rw_lock_reader_trylock (GRWLock *rw_lock) +{ + if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) + return FALSE; + + return TRUE; +} + +/** + * g_rw_lock_reader_unlock: + * @rw_lock: a #GRWLock + * + * Release a read lock on @rw_lock. + * + * Calling g_rw_lock_reader_unlock() on a lock that is not held + * by the current thread leads to undefined behaviour. + * + * Since: 2.32 + */ +void +g_rw_lock_reader_unlock (GRWLock *rw_lock) +{ + pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); +} + +/* {{{1 GCond */ + +#if !defined(USE_NATIVE_MUTEX) + +static pthread_cond_t * +g_cond_impl_new (void) +{ + pthread_condattr_t attr; + pthread_cond_t *cond; + gint status; + + pthread_condattr_init (&attr); + +#ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP +#elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) + if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0) + g_thread_abort (status, "pthread_condattr_setclock"); +#else +#error Cannot support GCond on your platform. +#endif + + cond = malloc (sizeof (pthread_cond_t)); + if G_UNLIKELY (cond == NULL) + g_thread_abort (errno, "malloc"); + + if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0) g_thread_abort (status, "pthread_cond_init"); + + pthread_condattr_destroy (&attr); + + return cond; +} + +static void +g_cond_impl_free (pthread_cond_t *cond) +{ + pthread_cond_destroy (cond); + free (cond); +} + +static inline pthread_cond_t * +g_cond_get_impl (GCond *cond) +{ + pthread_cond_t *impl = g_atomic_pointer_get (&cond->p); + + if G_UNLIKELY (impl == NULL) + { + impl = g_cond_impl_new (); + if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl)) + g_cond_impl_free (impl); + impl = cond->p; + } + + return impl; +} + +/** + * g_cond_init: + * @cond: an uninitialized #GCond + * + * Initialises a #GCond so that it can be used. + * + * This function is useful to initialise a #GCond that has been + * allocated as part of a larger structure. It is not necessary to + * initialise a #GCond that has been statically allocated. + * + * To undo the effect of g_cond_init() when a #GCond is no longer + * needed, use g_cond_clear(). + * + * Calling g_cond_init() on an already-initialised #GCond leads + * to undefined behaviour. + * + * Since: 2.32 + */ +void +g_cond_init (GCond *cond) +{ + cond->p = g_cond_impl_new (); } +/** + * g_cond_clear: + * @cond: an initialised #GCond + * + * Frees the resources allocated to a #GCond with g_cond_init(). + * + * This function should not be used with a #GCond that has been + * statically allocated. + * + * Calling g_cond_clear() for a #GCond on which threads are + * blocking leads to undefined behaviour. + * + * Since: 2.32 + */ void g_cond_clear (GCond *cond) { - gint status; - - if G_UNLIKELY ((status = pthread_cond_destroy (&cond->impl)) != 0) - g_thread_abort (status, "pthread_cond_destroy"); + g_cond_impl_free (cond->p); } +/** + * g_cond_wait: + * @cond: a #GCond + * @mutex: a #GMutex that is currently locked + * + * Atomically releases @mutex and waits until @cond is signalled. + * When this function returns, @mutex is locked again and owned by the + * calling thread. + * + * When using condition variables, it is possible that a spurious wakeup + * may occur (ie: g_cond_wait() returns even though g_cond_signal() was + * not called). It's also possible that a stolen wakeup may occur. + * This is when g_cond_signal() is called, but another thread acquires + * @mutex before this thread and modifies the state of the program in + * such a way that when g_cond_wait() is able to return, the expected + * condition is no longer met. + * + * For this reason, g_cond_wait() must always be used in a loop. See + * the documentation for #GCond for a complete example. + **/ void g_cond_wait (GCond *cond, GMutex *mutex) { gint status; - if G_UNLIKELY ((status = pthread_cond_wait (&cond->impl, &mutex->impl)) != 0) + if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0) g_thread_abort (status, "pthread_cond_wait"); } +/** + * g_cond_signal: + * @cond: a #GCond + * + * If threads are waiting for @cond, at least one of them is unblocked. + * If no threads are waiting for @cond, this function has no effect. + * It is good practice to hold the same lock as the waiting thread + * while calling this function, though not required. + */ void g_cond_signal (GCond *cond) { gint status; - if G_UNLIKELY ((status = pthread_cond_signal (&cond->impl)) != 0) + if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0) g_thread_abort (status, "pthread_cond_signal"); } +/** + * g_cond_broadcast: + * @cond: a #GCond + * + * If threads are waiting for @cond, all of them are unblocked. + * If no threads are waiting for @cond, this function has no effect. + * It is good practice to lock the same mutex as the waiting threads + * while calling this function, though not required. + */ void g_cond_broadcast (GCond *cond) { gint status; - if G_UNLIKELY ((status = pthread_cond_broadcast (&cond->impl)) != 0) + if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0) g_thread_abort (status, "pthread_cond_broadcast"); } +/** + * g_cond_wait_until: + * @cond: a #GCond + * @mutex: a #GMutex that is currently locked + * @end_time: the monotonic time to wait until + * + * Waits until either @cond is signalled or @end_time has passed. + * + * As with g_cond_wait() it is possible that a spurious or stolen wakeup + * could occur. For that reason, waiting on a condition variable should + * always be in a loop, based on an explicitly-checked predicate. + * + * %TRUE is returned if the condition variable was signalled (or in the + * case of a spurious wakeup). %FALSE is returned if @end_time has + * passed. + * + * The following code shows how to correctly perform a timed wait on a + * condition variable (extending the example presented in the + * documentation for #GCond): + * + * |[ + * gpointer + * pop_data_timed (void) + * { + * gint64 end_time; + * gpointer data; + * + * g_mutex_lock (&data_mutex); + * + * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND; + * while (!current_data) + * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time)) + * { + * // timeout has passed. + * g_mutex_unlock (&data_mutex); + * return NULL; + * } + * + * // there is data for us + * data = current_data; + * current_data = NULL; + * + * g_mutex_unlock (&data_mutex); + * + * return data; + * } + * ]| + * + * Notice that the end time is calculated once, before entering the + * loop and reused. This is the motivation behind the use of absolute + * time on this API -- if a relative time of 5 seconds were passed + * directly to the call and a spurious wakeup occurred, the program would + * have to start over waiting again (which would lead to a total wait + * time of more than 5 seconds). + * + * Returns: %TRUE on a signal, %FALSE on a timeout + * Since: 2.32 + **/ gboolean -g_cond_timed_wait (GCond *cond, - GMutex *mutex, - GTimeVal *abs_time) +g_cond_wait_until (GCond *cond, + GMutex *mutex, + gint64 end_time) { - struct timespec end_time; + struct timespec ts; gint status; - if (abs_time == NULL) - { - g_cond_wait (cond, mutex); - return TRUE; - } - - end_time.tv_sec = abs_time->tv_sec; - end_time.tv_nsec = abs_time->tv_usec * 1000; - - if ((status = pthread_cond_timedwait (&cond->impl, &mutex->impl, &end_time)) == 0) - return TRUE; +#ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP + /* end_time is given relative to the monotonic clock as returned by + * g_get_monotonic_time(). + * + * Since this pthreads wants the relative time, convert it back again. + */ + { + gint64 now = g_get_monotonic_time (); + gint64 relative; - if G_UNLIKELY (status != ETIMEDOUT) - g_thread_abort (status, "pthread_cond_timedwait"); + if (end_time <= now) + return FALSE; - return FALSE; -} + relative = end_time - now; -gboolean -g_cond_timedwait (GCond *cond, - GMutex *mutex, - gint64 abs_time) -{ - struct timespec end_time; - gint status; + ts.tv_sec = relative / 1000000; + ts.tv_nsec = (relative % 1000000) * 1000; - end_time.tv_sec = abs_time / 1000000; - end_time.tv_nsec = (abs_time % 1000000) * 1000; + if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) + return TRUE; + } +#elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) + /* This is the exact check we used during init to set the clock to + * monotonic, so if we're in this branch, timedwait() will already be + * expecting a monotonic clock. + */ + { + ts.tv_sec = end_time / 1000000; + ts.tv_nsec = (end_time % 1000000) * 1000; - if ((status = pthread_cond_timedwait (&cond->impl, &mutex->impl, &end_time)) == 0) - return TRUE; + if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) + return TRUE; + } +#else +#error Cannot support GCond on your platform. +#endif if G_UNLIKELY (status != ETIMEDOUT) g_thread_abort (status, "pthread_cond_timedwait"); @@ -204,73 +913,202 @@ g_cond_timedwait (GCond *cond, return FALSE; } +#endif /* defined(USE_NATIVE_MUTEX) */ + /* {{{1 GPrivate */ -GPrivate * -g_private_new (GDestroyNotify notify) +/** + * GPrivate: + * + * The #GPrivate struct is an opaque data structure to represent a + * thread-local data key. It is approximately equivalent to the + * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to + * TlsSetValue()/TlsGetValue() on Windows. + * + * If you don't already know why you might want this functionality, + * then you probably don't need it. + * + * #GPrivate is a very limited resource (as far as 128 per program, + * shared between all libraries). It is also not possible to destroy a + * #GPrivate after it has been used. As such, it is only ever acceptable + * to use #GPrivate in static scope, and even then sparingly so. + * + * See G_PRIVATE_INIT() for a couple of examples. + * + * The #GPrivate structure should be considered opaque. It should only + * be accessed via the g_private_ functions. + */ + +/** + * G_PRIVATE_INIT: + * @notify: a #GDestroyNotify + * + * A macro to assist with the static initialisation of a #GPrivate. + * + * This macro is useful for the case that a #GDestroyNotify function + * should be associated the key. This is needed when the key will be + * used to point at memory that should be deallocated when the thread + * exits. + * + * Additionally, the #GDestroyNotify will also be called on the previous + * value stored in the key when g_private_replace() is used. + * + * If no #GDestroyNotify is needed, then use of this macro is not + * required -- if the #GPrivate is declared in static scope then it will + * be properly initialised by default (ie: to all zeros). See the + * examples below. + * + * |[ + * static GPrivate name_key = G_PRIVATE_INIT (g_free); + * + * // return value should not be freed + * const gchar * + * get_local_name (void) + * { + * return g_private_get (&name_key); + * } + * + * void + * set_local_name (const gchar *name) + * { + * g_private_replace (&name_key, g_strdup (name)); + * } + * + * + * static GPrivate count_key; // no free function + * + * gint + * get_local_count (void) + * { + * return GPOINTER_TO_INT (g_private_get (&count_key)); + * } + * + * void + * set_local_count (gint count) + * { + * g_private_set (&count_key, GINT_TO_POINTER (count)); + * } + * ]| + * + * Since: 2.32 + **/ + +static pthread_key_t * +g_private_impl_new (GDestroyNotify notify) { - GPrivate *key; + pthread_key_t *key; + gint status; - key = malloc (sizeof (GPrivate)); + key = malloc (sizeof (pthread_key_t)); if G_UNLIKELY (key == NULL) g_thread_abort (errno, "malloc"); - g_private_init (key, notify); + status = pthread_key_create (key, notify); + if G_UNLIKELY (status != 0) + g_thread_abort (status, "pthread_key_create"); return key; } -void -g_private_init (GPrivate *key, - GDestroyNotify notify) +static void +g_private_impl_free (pthread_key_t *key) { - pthread_key_create (&key->key, notify); - key->ready = TRUE; + gint status; + + status = pthread_key_delete (*key); + if G_UNLIKELY (status != 0) + g_thread_abort (status, "pthread_key_delete"); + free (key); +} + +static inline pthread_key_t * +g_private_get_impl (GPrivate *key) +{ + pthread_key_t *impl = g_atomic_pointer_get (&key->p); + + if G_UNLIKELY (impl == NULL) + { + impl = g_private_impl_new (key->notify); + if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) + { + g_private_impl_free (impl); + impl = key->p; + } + } + + return impl; } +/** + * g_private_get: + * @key: a #GPrivate + * + * Returns the current value of the thread local variable @key. + * + * If the value has not yet been set in this thread, %NULL is returned. + * Values are never copied between threads (when a new thread is + * created, for example). + * + * Returns: the thread-local value + */ gpointer g_private_get (GPrivate *key) { - if (!key->ready) - return key->single_value; - /* quote POSIX: No errors are returned from pthread_getspecific(). */ - return pthread_getspecific (key->key); + return pthread_getspecific (*g_private_get_impl (key)); } +/** + * g_private_set: + * @key: a #GPrivate + * @value: the new value + * + * Sets the thread local variable @key to have the value @value in the + * current thread. + * + * This function differs from g_private_replace() in the following way: + * the #GDestroyNotify for @key is not called on the old value. + */ void g_private_set (GPrivate *key, gpointer value) { gint status; - if (!key->ready) - { - key->single_value = value; - return; - } - - if G_UNLIKELY ((status = pthread_setspecific (key->key, value)) != 0) + if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0) g_thread_abort (status, "pthread_setspecific"); } -/* {{{1 GThread */ +/** + * g_private_replace: + * @key: a #GPrivate + * @value: the new value + * + * Sets the thread local variable @key to have the value @value in the + * current thread. + * + * This function differs from g_private_set() in the following way: if + * the previous value was non-%NULL then the #GDestroyNotify handler for + * @key is run on it. + * + * Since: 2.32 + **/ +void +g_private_replace (GPrivate *key, + gpointer value) +{ + pthread_key_t *impl = g_private_get_impl (key); + gpointer old; + gint status; -#include "glib.h" -#include "gthreadprivate.h" + old = pthread_getspecific (*impl); + if (old && key->notify) + key->notify (old); -#include -#include -#include -#ifdef HAVE_SYS_TIME_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif + if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0) + g_thread_abort (status, "pthread_setspecific"); +} -#ifdef HAVE_SCHED_H -#include -#endif +/* {{{1 GThread */ #define posix_check_err(err, name) G_STMT_START{ \ int error = (err); \ @@ -282,228 +1120,327 @@ g_private_set (GPrivate *key, #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd) -#ifdef G_ENABLE_DEBUG -static gboolean posix_check_cmd_prio_warned = FALSE; -# define posix_check_cmd_prio(cmd) G_STMT_START{ \ - int err = (cmd); \ - if (err == EPERM) \ - { \ - if (!posix_check_cmd_prio_warned) \ - { \ - posix_check_cmd_prio_warned = TRUE; \ - g_warning ("Priorities can only be changed " \ - "(resp. increased) by root."); \ - } \ - } \ - else \ - posix_check_err (err, #cmd); \ - }G_STMT_END -#else /* G_ENABLE_DEBUG */ -# define posix_check_cmd_prio(cmd) G_STMT_START{ \ - int err = (cmd); \ - if (err != EPERM) \ - posix_check_err (err, #cmd); \ - }G_STMT_END -#endif /* G_ENABLE_DEBUG */ - -#if defined (POSIX_MIN_PRIORITY) && defined (POSIX_MAX_PRIORITY) -# define HAVE_PRIORITIES 1 -static gint priority_normal_value; -# ifdef __FreeBSD__ - /* FreeBSD threads use different priority values from the POSIX_ - * defines so we just set them here. The corresponding macros - * PTHREAD_MIN_PRIORITY and PTHREAD_MAX_PRIORITY are implied to be - * exported by the docs, but they aren't. - */ -# define PRIORITY_LOW_VALUE 0 -# define PRIORITY_URGENT_VALUE 31 -# else /* !__FreeBSD__ */ -# define PRIORITY_LOW_VALUE POSIX_MIN_PRIORITY -# define PRIORITY_URGENT_VALUE POSIX_MAX_PRIORITY -# endif /* !__FreeBSD__ */ -# define PRIORITY_NORMAL_VALUE priority_normal_value - -# define PRIORITY_HIGH_VALUE \ - ((PRIORITY_NORMAL_VALUE + PRIORITY_URGENT_VALUE * 2) / 3) - -static gint -g_thread_priority_map (GThreadPriority priority) -{ - switch (priority) - { - case G_THREAD_PRIORITY_LOW: - return PRIORITY_LOW_VALUE; - - case G_THREAD_PRIORITY_NORMAL: - return PRIORITY_NORMAL_VALUE; - - case G_THREAD_PRIORITY_HIGH: - return PRIORITY_HIGH_VALUE; - - case G_THREAD_PRIORITY_URGENT: - return PRIORITY_URGENT_VALUE; +typedef struct +{ + GRealThread thread; - default: - g_assert_not_reached (); - } -} + pthread_t system_thread; + gboolean joined; + GMutex lock; +} GThreadPosix; -#endif /* POSIX_MIN_PRIORITY && POSIX_MAX_PRIORITY */ +void +g_system_thread_free (GRealThread *thread) +{ + GThreadPosix *pt = (GThreadPosix *) thread; -static gulong g_thread_min_stack_size = 0; + if (!pt->joined) + pthread_detach (pt->system_thread); -#define G_MUTEX_SIZE (sizeof (pthread_mutex_t)) + g_mutex_clear (&pt->lock); -void -_g_thread_impl_init(void) -{ -#ifdef _SC_THREAD_STACK_MIN - g_thread_min_stack_size = MAX (sysconf (_SC_THREAD_STACK_MIN), 0); -#endif /* _SC_THREAD_STACK_MIN */ -#ifdef HAVE_PRIORITIES - { - struct sched_param sched; - int policy; - posix_check_cmd (pthread_getschedparam (pthread_self(), &policy, &sched)); - priority_normal_value = sched.sched_priority; - } -#endif /* HAVE_PRIORITIES */ + g_slice_free (GThreadPosix, pt); } -static void -g_thread_create_posix_impl (GThreadFunc thread_func, - gpointer arg, - gulong stack_size, - gboolean joinable, - gboolean bound, - GThreadPriority priority, - gpointer thread, - GError **error) +GRealThread * +g_system_thread_new (GThreadFunc thread_func, + gulong stack_size, + GError **error) { + GThreadPosix *thread; pthread_attr_t attr; gint ret; - g_return_if_fail (thread_func); - g_return_if_fail (priority >= G_THREAD_PRIORITY_LOW); - g_return_if_fail (priority <= G_THREAD_PRIORITY_URGENT); + thread = g_slice_new0 (GThreadPosix); posix_check_cmd (pthread_attr_init (&attr)); #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE if (stack_size) { - stack_size = MAX (g_thread_min_stack_size, stack_size); +#ifdef _SC_THREAD_STACK_MIN + stack_size = MAX (sysconf (_SC_THREAD_STACK_MIN), stack_size); +#endif /* _SC_THREAD_STACK_MIN */ /* No error check here, because some systems can't do it and * we simply don't want threads to fail because of that. */ pthread_attr_setstacksize (&attr, stack_size); } #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ -#ifdef PTHREAD_SCOPE_SYSTEM - if (bound) - /* No error check here, because some systems can't do it and we - * simply don't want threads to fail because of that. */ - pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM); -#endif /* PTHREAD_SCOPE_SYSTEM */ - - posix_check_cmd (pthread_attr_setdetachstate (&attr, - joinable ? PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED)); - -#ifdef HAVE_PRIORITIES - { - struct sched_param sched; - posix_check_cmd (pthread_attr_getschedparam (&attr, &sched)); - sched.sched_priority = g_thread_priority_map (priority); - posix_check_cmd_prio (pthread_attr_setschedparam (&attr, &sched)); - } -#endif /* HAVE_PRIORITIES */ - ret = pthread_create (thread, &attr, (void* (*)(void*))thread_func, arg); + ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread); posix_check_cmd (pthread_attr_destroy (&attr)); if (ret == EAGAIN) { g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, - "Error creating thread: %s", g_strerror (ret)); - return; + "Error creating thread: %s", g_strerror (ret)); + g_slice_free (GThreadPosix, thread); + return NULL; } posix_check_err (ret, "pthread_create"); + + g_mutex_init (&thread->lock); + + return (GRealThread *) thread; } -static void -g_thread_yield_posix_impl (void) +/** + * g_thread_yield: + * + * Causes the calling thread to voluntarily relinquish the CPU, so + * that other threads can run. + * + * This function is often used as a method to make busy wait less evil. + */ +void +g_thread_yield (void) { - POSIX_YIELD_FUNC; + sched_yield (); } -static void -g_thread_join_posix_impl (gpointer thread) +void +g_system_thread_wait (GRealThread *thread) { - gpointer ignore; - posix_check_cmd (pthread_join (*(pthread_t*)thread, &ignore)); + GThreadPosix *pt = (GThreadPosix *) thread; + + g_mutex_lock (&pt->lock); + + if (!pt->joined) + { + posix_check_cmd (pthread_join (pt->system_thread, NULL)); + pt->joined = TRUE; + } + + g_mutex_unlock (&pt->lock); } -static void -g_thread_exit_posix_impl (void) +void +g_system_thread_exit (void) { pthread_exit (NULL); } -static void -g_thread_set_priority_posix_impl (gpointer thread, GThreadPriority priority) +void +g_system_thread_set_name (const gchar *name) { - g_return_if_fail (priority >= G_THREAD_PRIORITY_LOW); - g_return_if_fail (priority <= G_THREAD_PRIORITY_URGENT); -#ifdef HAVE_PRIORITIES - { - struct sched_param sched; - int policy; - posix_check_cmd (pthread_getschedparam (*(pthread_t*)thread, &policy, - &sched)); - sched.sched_priority = g_thread_priority_map (priority); - posix_check_cmd_prio (pthread_setschedparam (*(pthread_t*)thread, policy, - &sched)); - } -#endif /* HAVE_PRIORITIES */ +#ifdef HAVE_SYS_PRCTL_H +#ifdef PR_SET_NAME + prctl (PR_SET_NAME, name, 0, 0, 0, 0); +#endif +#endif } -static void -g_thread_self_posix_impl (gpointer thread) -{ - *(pthread_t*)thread = pthread_self(); -} - -static gboolean -g_thread_equal_posix_impl (gpointer thread1, gpointer thread2) -{ - return (pthread_equal (*(pthread_t*)thread1, *(pthread_t*)thread2) != 0); -} - -/* {{{1 Epilogue */ -GThreadFunctions g_thread_functions_for_glib_use = -{ - g_mutex_new, - g_mutex_lock, - g_mutex_trylock, - g_mutex_unlock, - g_mutex_free, - g_cond_new, - g_cond_signal, - g_cond_broadcast, - g_cond_wait, - g_cond_timed_wait, - g_cond_free, - g_private_new, - g_private_get, - g_private_set, - g_thread_create_posix_impl, - g_thread_yield_posix_impl, - g_thread_join_posix_impl, - g_thread_exit_posix_impl, - g_thread_set_priority_posix_impl, - g_thread_self_posix_impl, - g_thread_equal_posix_impl -}; +/* {{{1 GMutex and GCond futex implementation */ + +#if defined(USE_NATIVE_MUTEX) + +#include +#include + +/* We should expand the set of operations available in gatomic once we + * have better C11 support in GCC in common distributions (ie: 4.9). + * + * Before then, let's define a couple of useful things for our own + * purposes... + */ + +#define exchange_acquire(ptr, new) \ + __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE) +#define compare_exchange_acquire(ptr, old, new) \ + __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define exchange_release(ptr, new) \ + __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE) +#define store_release(ptr, new) \ + __atomic_store_4((ptr), (new), __ATOMIC_RELEASE) + +/* Our strategy for the mutex is pretty simple: + * + * 0: not in use + * + * 1: acquired by one thread only, no contention + * + * > 1: contended + * + * + * As such, attempting to acquire the lock should involve an increment. + * If we find that the previous value was 0 then we can return + * immediately. + * + * On unlock, we always store 0 to indicate that the lock is available. + * If the value there was 1 before then we didn't have contention and + * can return immediately. If the value was something other than 1 then + * we have the contended case and need to wake a waiter. + * + * If it was not 0 then there is another thread holding it and we must + * wait. We must always ensure that we mark a value >1 while we are + * waiting in order to instruct the holder to do a wake operation on + * unlock. + */ + +void +g_mutex_init (GMutex *mutex) +{ + mutex->i[0] = 0; +} + +void +g_mutex_clear (GMutex *mutex) +{ + if G_UNLIKELY (mutex->i[0] != 0) + { + fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n"); + abort (); + } +} + +static void __attribute__((noinline)) +g_mutex_lock_slowpath (GMutex *mutex) +{ + /* Set to 2 to indicate contention. If it was zero before then we + * just acquired the lock. + * + * Otherwise, sleep for as long as the 2 remains... + */ + while (exchange_acquire (&mutex->i[0], 2) != 0) + syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT, (gsize) 2, NULL); +} + +static void __attribute__((noinline)) +g_mutex_unlock_slowpath (GMutex *mutex, + guint prev) +{ + /* We seem to get better code for the uncontended case by splitting + * this out... + */ + if G_UNLIKELY (prev == 0) + { + fprintf (stderr, "Attempt to unlock mutex that was not locked\n"); + abort (); + } + + syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE, (gsize) 1, NULL); +} + +void +g_mutex_lock (GMutex *mutex) +{ + /* 0 -> 1 and we're done. Anything else, and we need to wait... */ + if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0) + g_mutex_lock_slowpath (mutex); +} + +void +g_mutex_unlock (GMutex *mutex) +{ + guint prev; + + prev = exchange_release (&mutex->i[0], 0); + + /* 1-> 0 and we're done. Anything else and we need to signal... */ + if G_UNLIKELY (prev != 1) + g_mutex_unlock_slowpath (mutex, prev); +} + +gboolean +g_mutex_trylock (GMutex *mutex) +{ + guint zero = 0; + + /* We don't want to touch the value at all unless we can move it from + * exactly 0 to 1. + */ + return compare_exchange_acquire (&mutex->i[0], &zero, 1); +} + +/* Condition variables are implemented in a rather simple way as well. + * In many ways, futex() as an abstraction is even more ideally suited + * to condition variables than it is to mutexes. + * + * We store a generation counter. We sample it with the lock held and + * unlock before sleeping on the futex. + * + * Signalling simply involves increasing the counter and making the + * appropriate futex call. + * + * The only thing that is the slightest bit complicated is timed waits + * because we must convert our absolute time to relative. + */ + +void +g_cond_init (GCond *cond) +{ + cond->i[0] = 0; +} + +void +g_cond_clear (GCond *cond) +{ +} + +void +g_cond_wait (GCond *cond, + GMutex *mutex) +{ + guint sampled = g_atomic_int_get (&cond->i[0]); + + g_mutex_unlock (mutex); + syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT, (gsize) sampled, NULL); + g_mutex_lock (mutex); +} + +void +g_cond_signal (GCond *cond) +{ + g_atomic_int_inc (&cond->i[0]); + + syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE, (gsize) 1, NULL); +} + +void +g_cond_broadcast (GCond *cond) +{ + g_atomic_int_inc (&cond->i[0]); + + syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE, (gsize) INT_MAX, NULL); +} + +gboolean +g_cond_wait_until (GCond *cond, + GMutex *mutex, + gint64 end_time) +{ + struct timespec now; + struct timespec span; + guint sampled; + int res; + + if (end_time < 0) + return FALSE; + + clock_gettime (CLOCK_MONOTONIC, &now); + span.tv_sec = (end_time / 1000000) - now.tv_sec; + span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec; + if (span.tv_nsec < 0) + { + span.tv_nsec += 1000000000; + span.tv_sec--; + } + + if (span.tv_sec < 0) + return FALSE; + + sampled = cond->i[0]; + g_mutex_unlock (mutex); + res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT, (gsize) sampled, &span); + g_mutex_lock (mutex); + + return (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; +} + +#endif + /* {{{1 Epilogue */ /* vim:set foldmethod=marker: */