2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 * Author: Ryan Lortie <desrt@desrt.ca>
25 * SECTION:atomic_operations
26 * @title: Atomic Operations
27 * @short_description: basic atomic integer and pointer operations
30 * The following is a collection of compiler macros to provide atomic
31 * access to integer and pointer-sized values.
33 * The macros that have 'int' in the name will operate on pointers to
34 * #gint and #guint. The macros with 'pointer' in the name will operate
35 * on pointers to any pointer-sized value, including #gsize. There is
36 * no support for 64bit operations on platforms with 32bit pointers
37 * because it is not generally possible to perform these operations
40 * The get, set and exchange operations for integers and pointers
41 * nominally operate on #gint and #gpointer, respectively. Of the
42 * arithmetic operations, the 'add' operation operates on (and returns)
43 * signed integer values (#gint and #gssize) and the 'and', 'or', and
44 * 'xor' operations operate on (and return) unsigned integer values
45 * (#guint and #gsize).
47 * All of the operations act as a full compiler and (where appropriate)
48 * hardware memory barrier. Acquire and release or producer and
49 * consumer barrier semantics are not available through this API.
51 * It is very important that all accesses to a particular integer or
52 * pointer be performed using only this API and that different sizes of
53 * operation are not mixed or used on overlapping memory regions. Never
54 * read or assign directly from or to a value -- always use this API.
56 * For simple reference counting purposes you should use
57 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
58 * fall outside of simple reference counting patterns are prone to
59 * subtle bugs and occasionally undefined behaviour. It is also worth
60 * noting that since all of these operations require global
61 * synchronisation of the entire machine, they can be quite slow. In * the case of performing multiple atomic operations it can often be
62 * faster to simply acquire a mutex lock around the critical area,
63 * perform the operations normally and then release the lock.
69 * This macro is defined if the atomic operations of GLib are
70 * implemented using real hardware atomic operations. This means that
71 * the GLib atomic API can be used between processes and safely mixed
72 * with other (hardware) atomic APIs.
74 * If this macro is not defined, the atomic operations may be
75 * emulated using a mutex. In that case, the GLib atomic operations are
76 * only atomic relative to themselves and within a single process.
81 * This file is the lowest-level part of GLib.
83 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
84 * messages, etc) call into these functions and macros to get work done.
86 * As such, these functions can not call back into any part of GLib
87 * without risking recursion.
90 #ifdef G_ATOMIC_LOCK_FREE
92 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
93 * implement the atomic operations in a lock-free manner.
96 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
99 * @atomic: a pointer to a #gint or #guint
101 * Gets the current value of @atomic.
103 * This call acts as a full compiler and hardware
104 * memory barrier (before the get).
106 * Returns: the value of the integer
111 (g_atomic_int_get) (const volatile gint *atomic)
113 return g_atomic_int_get (atomic);
118 * @atomic: a pointer to a #gint or #guint
119 * @newval: a new value to store
121 * Sets the value of @atomic to @newval.
123 * This call acts as a full compiler and hardware
124 * memory barrier (after the set).
129 (g_atomic_int_set) (volatile gint *atomic,
132 g_atomic_int_set (atomic, newval);
137 * @atomic: a pointer to a #gint or #guint
139 * Increments the value of @atomic by 1.
141 * Think of this operation as an atomic version of `{ *atomic += 1; }`.
143 * This call acts as a full compiler and hardware memory barrier.
148 (g_atomic_int_inc) (volatile gint *atomic)
150 g_atomic_int_inc (atomic);
154 * g_atomic_int_dec_and_test:
155 * @atomic: a pointer to a #gint or #guint
157 * Decrements the value of @atomic by 1.
159 * Think of this operation as an atomic version of
160 * `{ *atomic -= 1; return (*atomic == 0); }`.
162 * This call acts as a full compiler and hardware memory barrier.
164 * Returns: %TRUE if the resultant value is zero
169 (g_atomic_int_dec_and_test) (volatile gint *atomic)
171 return g_atomic_int_dec_and_test (atomic);
175 * g_atomic_int_compare_and_exchange:
176 * @atomic: a pointer to a #gint or #guint
177 * @oldval: the value to compare with
178 * @newval: the value to conditionally replace with
180 * Compares @atomic to @oldval and, if equal, sets it to @newval.
181 * If @atomic was not equal to @oldval then no change occurs.
183 * This compare and exchange is done atomically.
185 * Think of this operation as an atomic version of
186 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
188 * This call acts as a full compiler and hardware memory barrier.
190 * Returns: %TRUE if the exchange took place
195 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
199 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
204 * @atomic: a pointer to a #gint or #guint
205 * @val: the value to add
207 * Atomically adds @val to the value of @atomic.
209 * Think of this operation as an atomic version of
210 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
212 * This call acts as a full compiler and hardware memory barrier.
214 * Before version 2.30, this function did not return a value
215 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
217 * Returns: the value of @atomic before the add, signed
222 (g_atomic_int_add) (volatile gint *atomic,
225 return g_atomic_int_add (atomic, val);
230 * @atomic: a pointer to a #gint or #guint
231 * @val: the value to 'and'
233 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
234 * storing the result back in @atomic.
236 * This call acts as a full compiler and hardware memory barrier.
238 * Think of this operation as an atomic version of
239 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
241 * Returns: the value of @atomic before the operation, unsigned
246 (g_atomic_int_and) (volatile guint *atomic,
249 return g_atomic_int_and (atomic, val);
254 * @atomic: a pointer to a #gint or #guint
255 * @val: the value to 'or'
257 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
258 * storing the result back in @atomic.
260 * Think of this operation as an atomic version of
261 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
263 * This call acts as a full compiler and hardware memory barrier.
265 * Returns: the value of @atomic before the operation, unsigned
270 (g_atomic_int_or) (volatile guint *atomic,
273 return g_atomic_int_or (atomic, val);
278 * @atomic: a pointer to a #gint or #guint
279 * @val: the value to 'xor'
281 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
282 * storing the result back in @atomic.
284 * Think of this operation as an atomic version of
285 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
287 * This call acts as a full compiler and hardware memory barrier.
289 * Returns: the value of @atomic before the operation, unsigned
294 (g_atomic_int_xor) (volatile guint *atomic,
297 return g_atomic_int_xor (atomic, val);
302 * g_atomic_pointer_get:
303 * @atomic: a pointer to a #gpointer-sized value
305 * Gets the current value of @atomic.
307 * This call acts as a full compiler and hardware
308 * memory barrier (before the get).
310 * Returns: the value of the pointer
315 (g_atomic_pointer_get) (const volatile void *atomic)
317 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
321 * g_atomic_pointer_set:
322 * @atomic: a pointer to a #gpointer-sized value
323 * @newval: a new value to store
325 * Sets the value of @atomic to @newval.
327 * This call acts as a full compiler and hardware
328 * memory barrier (after the set).
333 (g_atomic_pointer_set) (volatile void *atomic,
336 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
340 * g_atomic_pointer_compare_and_exchange:
341 * @atomic: a pointer to a #gpointer-sized value
342 * @oldval: the value to compare with
343 * @newval: the value to conditionally replace with
345 * Compares @atomic to @oldval and, if equal, sets it to @newval.
346 * If @atomic was not equal to @oldval then no change occurs.
348 * This compare and exchange is done atomically.
350 * Think of this operation as an atomic version of
351 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
353 * This call acts as a full compiler and hardware memory barrier.
355 * Returns: %TRUE if the exchange took place
360 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
364 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
369 * g_atomic_pointer_add:
370 * @atomic: a pointer to a #gpointer-sized value
371 * @val: the value to add
373 * Atomically adds @val to the value of @atomic.
375 * Think of this operation as an atomic version of
376 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
378 * This call acts as a full compiler and hardware memory barrier.
380 * Returns: the value of @atomic before the add, signed
385 (g_atomic_pointer_add) (volatile void *atomic,
388 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
392 * g_atomic_pointer_and:
393 * @atomic: a pointer to a #gpointer-sized value
394 * @val: the value to 'and'
396 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
397 * storing the result back in @atomic.
399 * Think of this operation as an atomic version of
400 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
402 * This call acts as a full compiler and hardware memory barrier.
404 * Returns: the value of @atomic before the operation, unsigned
409 (g_atomic_pointer_and) (volatile void *atomic,
412 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
416 * g_atomic_pointer_or:
417 * @atomic: a pointer to a #gpointer-sized value
418 * @val: the value to 'or'
420 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
421 * storing the result back in @atomic.
423 * Think of this operation as an atomic version of
424 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
426 * This call acts as a full compiler and hardware memory barrier.
428 * Returns: the value of @atomic before the operation, unsigned
433 (g_atomic_pointer_or) (volatile void *atomic,
436 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
440 * g_atomic_pointer_xor:
441 * @atomic: a pointer to a #gpointer-sized value
442 * @val: the value to 'xor'
444 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
445 * storing the result back in @atomic.
447 * Think of this operation as an atomic version of
448 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
450 * This call acts as a full compiler and hardware memory barrier.
452 * Returns: the value of @atomic before the operation, unsigned
457 (g_atomic_pointer_xor) (volatile void *atomic,
460 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
463 #elif defined (G_PLATFORM_WIN32)
466 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
467 #define InterlockedAnd _InterlockedAnd
468 #define InterlockedOr _InterlockedOr
469 #define InterlockedXor _InterlockedXor
472 #if !defined (_MSC_VER) || _MSC_VER <= 1200
473 #include "gmessages.h"
474 /* Inlined versions for older compiler */
476 _gInterlockedAnd (volatile guint *atomic,
484 j = InterlockedCompareExchange(atomic, i & val, i);
489 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
491 _gInterlockedOr (volatile guint *atomic,
499 j = InterlockedCompareExchange(atomic, i | val, i);
504 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
506 _gInterlockedXor (volatile guint *atomic,
514 j = InterlockedCompareExchange(atomic, i ^ val, i);
519 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
523 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
526 (g_atomic_int_get) (const volatile gint *atomic)
533 (g_atomic_int_set) (volatile gint *atomic,
541 (g_atomic_int_inc) (volatile gint *atomic)
543 InterlockedIncrement (atomic);
547 (g_atomic_int_dec_and_test) (volatile gint *atomic)
549 return InterlockedDecrement (atomic) == 0;
553 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
557 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
561 (g_atomic_int_add) (volatile gint *atomic,
564 return InterlockedExchangeAdd (atomic, val);
568 (g_atomic_int_and) (volatile guint *atomic,
571 return InterlockedAnd (atomic, val);
575 (g_atomic_int_or) (volatile guint *atomic,
578 return InterlockedOr (atomic, val);
582 (g_atomic_int_xor) (volatile guint *atomic,
585 return InterlockedXor (atomic, val);
590 (g_atomic_pointer_get) (const volatile void *atomic)
592 const volatile gpointer *ptr = atomic;
599 (g_atomic_pointer_set) (volatile void *atomic,
602 volatile gpointer *ptr = atomic;
609 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
613 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
617 (g_atomic_pointer_add) (volatile void *atomic,
620 #if GLIB_SIZEOF_VOID_P == 8
621 return InterlockedExchangeAdd64 (atomic, val);
623 return InterlockedExchangeAdd (atomic, val);
628 (g_atomic_pointer_and) (volatile void *atomic,
631 #if GLIB_SIZEOF_VOID_P == 8
632 return InterlockedAnd64 (atomic, val);
634 return InterlockedAnd (atomic, val);
639 (g_atomic_pointer_or) (volatile void *atomic,
642 #if GLIB_SIZEOF_VOID_P == 8
643 return InterlockedOr64 (atomic, val);
645 return InterlockedOr (atomic, val);
650 (g_atomic_pointer_xor) (volatile void *atomic,
653 #if GLIB_SIZEOF_VOID_P == 8
654 return InterlockedXor64 (atomic, val);
656 return InterlockedXor (atomic, val);
661 /* This error occurs when ./configure decided that we should be capable
662 * of lock-free atomics but we find at compile-time that we are not.
664 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
666 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
668 #else /* G_ATOMIC_LOCK_FREE */
670 /* We are not permitted to call into any GLib functions from here, so we
671 * can not use GMutex.
673 * Fortunately, we already take care of the Windows case above, and all
674 * non-Windows platforms on which glib runs have pthreads. Use those.
678 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
681 (g_atomic_int_get) (const volatile gint *atomic)
685 pthread_mutex_lock (&g_atomic_lock);
687 pthread_mutex_unlock (&g_atomic_lock);
693 (g_atomic_int_set) (volatile gint *atomic,
696 pthread_mutex_lock (&g_atomic_lock);
698 pthread_mutex_unlock (&g_atomic_lock);
702 (g_atomic_int_inc) (volatile gint *atomic)
704 pthread_mutex_lock (&g_atomic_lock);
706 pthread_mutex_unlock (&g_atomic_lock);
710 (g_atomic_int_dec_and_test) (volatile gint *atomic)
714 pthread_mutex_lock (&g_atomic_lock);
715 is_zero = --(*atomic) == 0;
716 pthread_mutex_unlock (&g_atomic_lock);
722 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
728 pthread_mutex_lock (&g_atomic_lock);
730 if ((success = (*atomic == oldval)))
733 pthread_mutex_unlock (&g_atomic_lock);
739 (g_atomic_int_add) (volatile gint *atomic,
744 pthread_mutex_lock (&g_atomic_lock);
746 *atomic = oldval + val;
747 pthread_mutex_unlock (&g_atomic_lock);
753 (g_atomic_int_and) (volatile guint *atomic,
758 pthread_mutex_lock (&g_atomic_lock);
760 *atomic = oldval & val;
761 pthread_mutex_unlock (&g_atomic_lock);
767 (g_atomic_int_or) (volatile guint *atomic,
772 pthread_mutex_lock (&g_atomic_lock);
774 *atomic = oldval | val;
775 pthread_mutex_unlock (&g_atomic_lock);
781 (g_atomic_int_xor) (volatile guint *atomic,
786 pthread_mutex_lock (&g_atomic_lock);
788 *atomic = oldval ^ val;
789 pthread_mutex_unlock (&g_atomic_lock);
796 (g_atomic_pointer_get) (const volatile void *atomic)
798 const volatile gpointer *ptr = atomic;
801 pthread_mutex_lock (&g_atomic_lock);
803 pthread_mutex_unlock (&g_atomic_lock);
809 (g_atomic_pointer_set) (volatile void *atomic,
812 volatile gpointer *ptr = atomic;
814 pthread_mutex_lock (&g_atomic_lock);
816 pthread_mutex_unlock (&g_atomic_lock);
820 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
824 volatile gpointer *ptr = atomic;
827 pthread_mutex_lock (&g_atomic_lock);
829 if ((success = (*ptr == oldval)))
832 pthread_mutex_unlock (&g_atomic_lock);
838 (g_atomic_pointer_add) (volatile void *atomic,
841 volatile gssize *ptr = atomic;
844 pthread_mutex_lock (&g_atomic_lock);
847 pthread_mutex_unlock (&g_atomic_lock);
853 (g_atomic_pointer_and) (volatile void *atomic,
856 volatile gsize *ptr = atomic;
859 pthread_mutex_lock (&g_atomic_lock);
862 pthread_mutex_unlock (&g_atomic_lock);
868 (g_atomic_pointer_or) (volatile void *atomic,
871 volatile gsize *ptr = atomic;
874 pthread_mutex_lock (&g_atomic_lock);
877 pthread_mutex_unlock (&g_atomic_lock);
883 (g_atomic_pointer_xor) (volatile void *atomic,
886 volatile gsize *ptr = atomic;
889 pthread_mutex_lock (&g_atomic_lock);
892 pthread_mutex_unlock (&g_atomic_lock);
900 * g_atomic_int_exchange_and_add:
901 * @atomic: a pointer to a #gint
902 * @val: the value to add
904 * This function existed before g_atomic_int_add() returned the prior
905 * value of the integer (which it now does). It is retained only for
906 * compatibility reasons. Don't use this function in new code.
908 * Returns: the value of @atomic before the add, signed
910 * Deprecated: 2.30: Use g_atomic_int_add() instead.
913 g_atomic_int_exchange_and_add (volatile gint *atomic,
916 return (g_atomic_int_add) (atomic, val);