2 * Copyright © 2008 Ryan Lortie
3 * Copyright © 2010 Codethink Limited
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the licence, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
20 * Author: Ryan Lortie <desrt@desrt.ca>
25 #include <glib/gmessages.h>
26 #include <glib/gatomic.h>
27 #include <glib/gslist.h>
28 #include <glib/gthread.h>
30 #include "gthreadprivate.h"
34 #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION
39 static GSList *g_futex_address_list = NULL;
40 static GMutex *g_futex_mutex = NULL;
44 _g_futex_thread_init (void) {
46 g_futex_mutex = g_mutex_new ();
52 * We have headers for futex(2) on the build machine. This does not
53 * imply that every system that ever runs the resulting glib will have
54 * kernel support for futex, but you'd have to have a pretty old
55 * kernel in order for that not to be the case.
57 * If anyone actually gets bit by this, please file a bug. :)
59 #include <linux/futex.h>
60 #include <sys/syscall.h>
65 * @address: a pointer to an integer
66 * @value: the value that should be at @address
68 * Atomically checks that the value stored at @address is equal to
69 * @value and then blocks. If the value stored at @address is not
70 * equal to @value then this function returns immediately.
72 * To unblock, call g_futex_wake() on @address.
74 * This call may spuriously unblock (for example, in response to the
75 * process receiving a signal) but this is not guaranteed. Unlike the
76 * Linux system call of a similar name, there is no guarantee that a
77 * waiting process will unblock due to a g_futex_wake() call in a
81 g_futex_wait (const volatile gint *address,
84 syscall (__NR_futex, address, (gsize) FUTEX_WAIT, (gsize) value, NULL);
89 * @address: a pointer to an integer
91 * Nominally, wakes one thread that is blocked in g_futex_wait() on
92 * @address (if any thread is currently waiting).
94 * As mentioned in the documention for g_futex_wait(), spurious
95 * wakeups may occur. As such, this call may result in more than one
96 * thread being woken up.
99 g_futex_wake (const volatile gint *address)
101 syscall (__NR_futex, address, (gsize) FUTEX_WAKE, (gsize) 1, NULL);
106 /* emulate futex(2) */
109 const volatile gint *address;
115 g_futex_find_address (const volatile gint *address)
119 for (node = g_futex_address_list; node; node = node->next)
121 WaitAddress *waiter = node->data;
123 if (waiter->address == address)
131 g_futex_wait (const volatile gint *address,
134 g_mutex_lock (g_futex_mutex);
135 if G_LIKELY (g_atomic_int_get (address) == value)
139 if ((waiter = g_futex_find_address (address)) == NULL)
141 waiter = g_slice_new (WaitAddress);
142 waiter->address = address;
143 waiter->wait_queue = g_cond_new ();
144 waiter->ref_count = 0;
145 g_futex_address_list =
146 g_slist_prepend (g_futex_address_list, waiter);
150 g_cond_wait (waiter->wait_queue, g_futex_mutex);
152 if (!--waiter->ref_count)
154 g_futex_address_list =
155 g_slist_remove (g_futex_address_list, waiter);
156 g_cond_free (waiter->wait_queue);
157 g_slice_free (WaitAddress, waiter);
160 g_mutex_unlock (g_futex_mutex);
164 g_futex_wake (const volatile gint *address)
168 /* need to lock here for two reasons:
169 * 1) need to acquire/release lock to ensure waiter is not in
170 * the process of registering a wait
171 * 2) need to -stay- locked until the end to ensure a wake()
172 * in another thread doesn't cause 'waiter' to stop existing
174 g_mutex_lock (g_futex_mutex);
175 if ((waiter = g_futex_find_address (address)))
176 g_cond_signal (waiter->wait_queue);
177 g_mutex_unlock (g_futex_mutex);
181 #define CONTENTION_CLASSES 11
182 static volatile gint g_bit_lock_contended[CONTENTION_CLASSES];
184 #if (defined (i386) || defined (__amd64__))
185 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
186 #define USE_ASM_GOTO 1
192 * @address: a pointer to an integer
193 * @lock_bit: a bit value between 0 and 31
195 * Sets the indicated @lock_bit in @address. If the bit is already
196 * set, this call will block until g_bit_unlock() unsets the
199 * Attempting to lock on two different bits within the same integer is
200 * not supported and will very probably cause deadlocks.
202 * The value of the bit that is set is (1u << @bit). If @bit is not
203 * between 0 and 31 then the result is undefined.
205 * This function accesses @address atomically. All other accesses to
206 * @address must be atomic in order for this function to work
212 g_bit_lock (volatile gint *address,
217 asm volatile goto ("lock bts %1, (%0)\n"
220 : "r" (address), "r" (lock_bit)
227 guint mask = 1u << lock_bit;
230 v = g_atomic_int_get (address);
233 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
235 g_atomic_int_add (&g_bit_lock_contended[class], +1);
236 g_futex_wait (address, v);
237 g_atomic_int_add (&g_bit_lock_contended[class], -1);
242 guint mask = 1u << lock_bit;
246 v = g_atomic_int_or (address, mask);
250 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
252 g_atomic_int_add (&g_bit_lock_contended[class], +1);
253 g_futex_wait (address, v);
254 g_atomic_int_add (&g_bit_lock_contended[class], -1);
263 * @address: a pointer to an integer
264 * @lock_bit: a bit value between 0 and 31
265 * @returns: %TRUE if the lock was acquired
267 * Sets the indicated @lock_bit in @address, returning %TRUE if
268 * successful. If the bit is already set, returns %FALSE immediately.
270 * Attempting to lock on two different bits within the same integer is
273 * The value of the bit that is set is (1u << @bit). If @bit is not
274 * between 0 and 31 then the result is undefined.
276 * This function accesses @address atomically. All other accesses to
277 * @address must be atomic in order for this function to work
283 g_bit_trylock (volatile gint *address,
289 asm volatile ("lock bts %2, (%1)\n"
293 : "r" (address), "r" (lock_bit)
298 guint mask = 1u << lock_bit;
301 v = g_atomic_int_or (address, mask);
309 * @address: a pointer to an integer
310 * @lock_bit: a bit value between 0 and 31
312 * Clears the indicated @lock_bit in @address. If another thread is
313 * currently blocked in g_bit_lock() on this same bit then it will be
316 * This function accesses @address atomically. All other accesses to
317 * @address must be atomic in order for this function to work
323 g_bit_unlock (volatile gint *address,
327 asm volatile ("lock btr %1, (%0)"
329 : "r" (address), "r" (lock_bit)
332 guint mask = 1u << lock_bit;
334 g_atomic_int_and (address, ~mask);
338 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
340 if (g_atomic_int_get (&g_bit_lock_contended[class]))
341 g_futex_wake (address);
346 /* We emulate pointer-sized futex(2) because the kernel API only
349 * We assume that the 'interesting' part is always the lower order bits.
350 * This assumption holds because pointer bitlocks are restricted to
351 * using the low order bits of the pointer as the lock.
353 * On 32 bits, there is nothing to do since the pointer size is equal to
354 * the integer size. On little endian the lower-order bits don't move,
355 * so do nothing. Only on 64bit big endian do we need to do a bit of
356 * pointer arithmetic: the low order bits are shifted by 4 bytes. We
357 * have a helper function that always does the right thing here.
359 * Since we always consider the low-order bits of the integer value, a
360 * simple cast from (gsize) to (guint) always takes care of that.
362 * After that, pointer-sized futex becomes as simple as:
364 * g_futex_wait (g_futex_int_address (address), (guint) value);
368 * g_futex_wake (g_futex_int_address (int_address));
370 static const volatile gint *
371 g_futex_int_address (const volatile void *address)
373 const volatile gint *int_address = address;
375 #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
383 * g_pointer_bit_lock:
384 * @address: a pointer to a #gpointer-sized value
385 * @lock_bit: a bit value between 0 and 31
387 * This is equivalent to g_bit_lock, but working on pointers (or other
388 * pointer-sized values).
390 * For portability reasons, you may only lock on the bottom 32 bits of
396 (g_pointer_bit_lock) (volatile void *address,
399 g_return_if_fail (lock_bit < 32);
404 asm volatile goto ("lock bts %1, (%0)\n"
407 : "r" (address), "r" ((gsize) lock_bit)
414 volatile gsize *pointer_address = address;
415 gsize mask = 1u << lock_bit;
418 v = (gsize) g_atomic_pointer_get (pointer_address);
421 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
423 g_atomic_int_add (&g_bit_lock_contended[class], +1);
424 g_futex_wait (g_futex_int_address (address), v);
425 g_atomic_int_add (&g_bit_lock_contended[class], -1);
430 volatile gsize *pointer_address = address;
431 gsize mask = 1u << lock_bit;
435 v = g_atomic_pointer_or (pointer_address, mask);
439 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
441 g_atomic_int_add (&g_bit_lock_contended[class], +1);
442 g_futex_wait (g_futex_int_address (address), (guint) v);
443 g_atomic_int_add (&g_bit_lock_contended[class], -1);
452 * g_pointer_bit_trylock:
453 * @address: a pointer to a #gpointer-sized value
454 * @lock_bit: a bit value between 0 and 31
455 * @returns: %TRUE if the lock was acquired
457 * This is equivalent to g_bit_trylock, but working on pointers (or
458 * other pointer-sized values).
460 * For portability reasons, you may only lock on the bottom 32 bits of
466 (g_pointer_bit_trylock) (volatile void *address,
469 g_return_val_if_fail (lock_bit < 32, FALSE);
475 asm volatile ("lock bts %2, (%1)\n"
479 : "r" (address), "r" ((gsize) lock_bit)
484 volatile gsize *pointer_address = address;
485 gsize mask = 1u << lock_bit;
488 g_return_val_if_fail (lock_bit < 32, FALSE);
490 v = g_atomic_pointer_or (pointer_address, mask);
498 * g_pointer_bit_unlock:
499 * @address: a pointer to a #gpointer-sized value
500 * @lock_bit: a bit value between 0 and 31
502 * This is equivalent to g_bit_unlock, but working on pointers (or other
503 * pointer-sized values).
505 * For portability reasons, you may only lock on the bottom 32 bits of
511 (g_pointer_bit_unlock) (volatile void *address,
514 g_return_if_fail (lock_bit < 32);
518 asm volatile ("lock btr %1, (%0)"
520 : "r" (address), "r" ((gsize) lock_bit)
523 volatile gsize *pointer_address = address;
524 gsize mask = 1u << lock_bit;
526 g_atomic_pointer_and (pointer_address, ~mask);
530 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
531 if (g_atomic_int_get (&g_bit_lock_contended[class]))
532 g_futex_wake (g_futex_int_address (address));