2 * Copyright © 2008 Ryan Lortie
3 * Copyright © 2010 Codethink Limited
5 * SPDX-License-Identifier: LGPL-2.1-or-later
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 * Author: Ryan Lortie <desrt@desrt.ca>
27 #include <glib/gmacros.h>
28 #include <glib/gmessages.h>
29 #include <glib/gatomic.h>
30 #include <glib/gslist.h>
31 #include <glib/gthread.h>
32 #include <glib/gslice.h>
34 #include "gthreadprivate.h"
36 #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION
41 static GMutex g_futex_mutex;
42 static GSList *g_futex_address_list = NULL;
47 * We have headers for futex(2) on the build machine. This does not
48 * imply that every system that ever runs the resulting glib will have
49 * kernel support for futex, but you'd have to have a pretty old
50 * kernel in order for that not to be the case.
52 * If anyone actually gets bit by this, please file a bug. :)
54 #include <linux/futex.h>
55 #include <sys/syscall.h>
58 #ifndef FUTEX_WAIT_PRIVATE
59 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
60 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
65 * @address: a pointer to an integer
66 * @value: the value that should be at @address
68 * Atomically checks that the value stored at @address is equal to
69 * @value and then blocks. If the value stored at @address is not
70 * equal to @value then this function returns immediately.
72 * To unblock, call g_futex_wake() on @address.
74 * This call may spuriously unblock (for example, in response to the
75 * process receiving a signal) but this is not guaranteed. Unlike the
76 * Linux system call of a similar name, there is no guarantee that a
77 * waiting process will unblock due to a g_futex_wake() call in a
81 g_futex_wait (const gint *address,
84 syscall (__NR_futex, address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL);
89 * @address: a pointer to an integer
91 * Nominally, wakes one thread that is blocked in g_futex_wait() on
92 * @address (if any thread is currently waiting).
94 * As mentioned in the documentation for g_futex_wait(), spurious
95 * wakeups may occur. As such, this call may result in more than one
96 * thread being woken up.
99 g_futex_wake (const gint *address)
101 syscall (__NR_futex, address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
106 /* emulate futex(2) */
115 g_futex_find_address (const gint *address)
119 for (node = g_futex_address_list; node; node = node->next)
121 WaitAddress *waiter = node->data;
123 if (waiter->address == address)
131 g_futex_wait (const gint *address,
134 g_mutex_lock (&g_futex_mutex);
135 if G_LIKELY (g_atomic_int_get (address) == value)
139 if ((waiter = g_futex_find_address (address)) == NULL)
141 waiter = g_slice_new (WaitAddress);
142 waiter->address = address;
143 g_cond_init (&waiter->wait_queue);
144 waiter->ref_count = 0;
145 g_futex_address_list =
146 g_slist_prepend (g_futex_address_list, waiter);
150 g_cond_wait (&waiter->wait_queue, &g_futex_mutex);
152 if (!--waiter->ref_count)
154 g_futex_address_list =
155 g_slist_remove (g_futex_address_list, waiter);
156 g_cond_clear (&waiter->wait_queue);
157 g_slice_free (WaitAddress, waiter);
160 g_mutex_unlock (&g_futex_mutex);
164 g_futex_wake (const gint *address)
168 /* need to lock here for two reasons:
169 * 1) need to acquire/release lock to ensure waiter is not in
170 * the process of registering a wait
171 * 2) need to -stay- locked until the end to ensure a wake()
172 * in another thread doesn't cause 'waiter' to stop existing
174 g_mutex_lock (&g_futex_mutex);
175 if ((waiter = g_futex_find_address (address)))
176 g_cond_signal (&waiter->wait_queue);
177 g_mutex_unlock (&g_futex_mutex);
181 #define CONTENTION_CLASSES 11
182 static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */
184 #if (defined (i386) || defined (__amd64__))
185 #if G_GNUC_CHECK_VERSION(4, 5)
186 #define USE_ASM_GOTO 1
192 * @address: a pointer to an integer
193 * @lock_bit: a bit value between 0 and 31
195 * Sets the indicated @lock_bit in @address. If the bit is already
196 * set, this call will block until g_bit_unlock() unsets the
199 * Attempting to lock on two different bits within the same integer is
200 * not supported and will very probably cause deadlocks.
202 * The value of the bit that is set is (1u << @bit). If @bit is not
203 * between 0 and 31 then the result is undefined.
205 * This function accesses @address atomically. All other accesses to
206 * @address must be atomic in order for this function to work
207 * reliably. While @address has a `volatile` qualifier, this is a historical
208 * artifact and the argument passed to it should not be `volatile`.
213 g_bit_lock (volatile gint *address,
216 gint *address_nonvolatile = (gint *) address;
220 __asm__ volatile goto ("lock bts %1, (%0)\n"
223 : "r" (address), "r" (lock_bit)
230 guint mask = 1u << lock_bit;
233 v = (guint) g_atomic_int_get (address_nonvolatile);
236 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
238 g_atomic_int_add (&g_bit_lock_contended[class], +1);
239 g_futex_wait (address_nonvolatile, v);
240 g_atomic_int_add (&g_bit_lock_contended[class], -1);
245 guint mask = 1u << lock_bit;
249 v = g_atomic_int_or (address_nonvolatile, mask);
253 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
255 g_atomic_int_add (&g_bit_lock_contended[class], +1);
256 g_futex_wait (address_nonvolatile, v);
257 g_atomic_int_add (&g_bit_lock_contended[class], -1);
266 * @address: a pointer to an integer
267 * @lock_bit: a bit value between 0 and 31
269 * Sets the indicated @lock_bit in @address, returning %TRUE if
270 * successful. If the bit is already set, returns %FALSE immediately.
272 * Attempting to lock on two different bits within the same integer is
275 * The value of the bit that is set is (1u << @bit). If @bit is not
276 * between 0 and 31 then the result is undefined.
278 * This function accesses @address atomically. All other accesses to
279 * @address must be atomic in order for this function to work
280 * reliably. While @address has a `volatile` qualifier, this is a historical
281 * artifact and the argument passed to it should not be `volatile`.
283 * Returns: %TRUE if the lock was acquired
288 g_bit_trylock (volatile gint *address,
294 __asm__ volatile ("lock bts %2, (%1)\n"
298 : "r" (address), "r" (lock_bit)
303 gint *address_nonvolatile = (gint *) address;
304 guint mask = 1u << lock_bit;
307 v = g_atomic_int_or (address_nonvolatile, mask);
315 * @address: a pointer to an integer
316 * @lock_bit: a bit value between 0 and 31
318 * Clears the indicated @lock_bit in @address. If another thread is
319 * currently blocked in g_bit_lock() on this same bit then it will be
322 * This function accesses @address atomically. All other accesses to
323 * @address must be atomic in order for this function to work
324 * reliably. While @address has a `volatile` qualifier, this is a historical
325 * artifact and the argument passed to it should not be `volatile`.
330 g_bit_unlock (volatile gint *address,
333 gint *address_nonvolatile = (gint *) address;
336 __asm__ volatile ("lock btr %1, (%0)"
338 : "r" (address), "r" (lock_bit)
341 guint mask = 1u << lock_bit;
343 g_atomic_int_and (address_nonvolatile, ~mask);
347 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
349 if (g_atomic_int_get (&g_bit_lock_contended[class]))
350 g_futex_wake (address_nonvolatile);
355 /* We emulate pointer-sized futex(2) because the kernel API only
358 * We assume that the 'interesting' part is always the lower order bits.
359 * This assumption holds because pointer bitlocks are restricted to
360 * using the low order bits of the pointer as the lock.
362 * On 32 bits, there is nothing to do since the pointer size is equal to
363 * the integer size. On little endian the lower-order bits don't move,
364 * so do nothing. Only on 64bit big endian do we need to do a bit of
365 * pointer arithmetic: the low order bits are shifted by 4 bytes. We
366 * have a helper function that always does the right thing here.
368 * Since we always consider the low-order bits of the integer value, a
369 * simple cast from (gsize) to (guint) always takes care of that.
371 * After that, pointer-sized futex becomes as simple as:
373 * g_futex_wait (g_futex_int_address (address), (guint) value);
377 * g_futex_wake (g_futex_int_address (int_address));
380 g_futex_int_address (const void *address)
382 const gint *int_address = address;
384 /* this implementation makes these (reasonable) assumptions: */
385 G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN ||
386 (G_BYTE_ORDER == G_BIG_ENDIAN &&
388 (sizeof (gpointer) == 4 || sizeof (gpointer) == 8)));
390 #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
398 * g_pointer_bit_lock:
399 * @address: (not nullable): a pointer to a #gpointer-sized value
400 * @lock_bit: a bit value between 0 and 31
402 * This is equivalent to g_bit_lock, but working on pointers (or other
403 * pointer-sized values).
405 * For portability reasons, you may only lock on the bottom 32 bits of
408 * While @address has a `volatile` qualifier, this is a historical
409 * artifact and the argument passed to it should not be `volatile`.
414 (g_pointer_bit_lock) (volatile void *address,
417 void *address_nonvolatile = (void *) address;
419 g_return_if_fail (lock_bit < 32);
424 __asm__ volatile goto ("lock bts %1, (%0)\n"
427 : "r" (address), "r" ((gsize) lock_bit)
434 gsize *pointer_address = address_nonvolatile;
435 gsize mask = 1u << lock_bit;
438 v = (gsize) g_atomic_pointer_get (pointer_address);
441 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
443 g_atomic_int_add (&g_bit_lock_contended[class], +1);
444 g_futex_wait (g_futex_int_address (address_nonvolatile), v);
445 g_atomic_int_add (&g_bit_lock_contended[class], -1);
450 gsize *pointer_address = address_nonvolatile;
451 gsize mask = 1u << lock_bit;
455 v = g_atomic_pointer_or (pointer_address, mask);
459 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
461 g_atomic_int_add (&g_bit_lock_contended[class], +1);
462 g_futex_wait (g_futex_int_address (address_nonvolatile), (guint) v);
463 g_atomic_int_add (&g_bit_lock_contended[class], -1);
472 * g_pointer_bit_trylock:
473 * @address: (not nullable): a pointer to a #gpointer-sized value
474 * @lock_bit: a bit value between 0 and 31
476 * This is equivalent to g_bit_trylock(), but working on pointers (or
477 * other pointer-sized values).
479 * For portability reasons, you may only lock on the bottom 32 bits of
482 * While @address has a `volatile` qualifier, this is a historical
483 * artifact and the argument passed to it should not be `volatile`.
485 * Returns: %TRUE if the lock was acquired
490 (g_pointer_bit_trylock) (volatile void *address,
493 g_return_val_if_fail (lock_bit < 32, FALSE);
499 __asm__ volatile ("lock bts %2, (%1)\n"
503 : "r" (address), "r" ((gsize) lock_bit)
508 void *address_nonvolatile = (void *) address;
509 gsize *pointer_address = address_nonvolatile;
510 gsize mask = 1u << lock_bit;
513 g_return_val_if_fail (lock_bit < 32, FALSE);
515 v = g_atomic_pointer_or (pointer_address, mask);
523 * g_pointer_bit_unlock:
524 * @address: (not nullable): a pointer to a #gpointer-sized value
525 * @lock_bit: a bit value between 0 and 31
527 * This is equivalent to g_bit_unlock, but working on pointers (or other
528 * pointer-sized values).
530 * For portability reasons, you may only lock on the bottom 32 bits of
533 * While @address has a `volatile` qualifier, this is a historical
534 * artifact and the argument passed to it should not be `volatile`.
539 (g_pointer_bit_unlock) (volatile void *address,
542 void *address_nonvolatile = (void *) address;
544 g_return_if_fail (lock_bit < 32);
548 __asm__ volatile ("lock btr %1, (%0)"
550 : "r" (address), "r" ((gsize) lock_bit)
553 gsize *pointer_address = address_nonvolatile;
554 gsize mask = 1u << lock_bit;
556 g_atomic_pointer_and (pointer_address, ~mask);
560 guint class = ((gsize) address_nonvolatile) % G_N_ELEMENTS (g_bit_lock_contended);
561 if (g_atomic_int_get (&g_bit_lock_contended[class]))
562 g_futex_wake (g_futex_int_address (address_nonvolatile));