2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 * Author: Ryan Lortie <desrt@desrt.ca>
27 * SECTION:atomic_operations
28 * @title: Atomic Operations
29 * @short_description: basic atomic integer and pointer operations
32 * The following is a collection of compiler macros to provide atomic
33 * access to integer and pointer-sized values.
35 * The macros that have 'int' in the name will operate on pointers to
36 * #gint and #guint. The macros with 'pointer' in the name will operate
37 * on pointers to any pointer-sized value, including #gsize. There is
38 * no support for 64bit operations on platforms with 32bit pointers
39 * because it is not generally possible to perform these operations
42 * The get, set and exchange operations for integers and pointers
43 * nominally operate on #gint and #gpointer, respectively. Of the
44 * arithmetic operations, the 'add' operation operates on (and returns)
45 * signed integer values (#gint and #gssize) and the 'and', 'or', and
46 * 'xor' operations operate on (and return) unsigned integer values
47 * (#guint and #gsize).
49 * All of the operations act as a full compiler and (where appropriate)
50 * hardware memory barrier. Acquire and release or producer and
51 * consumer barrier semantics are not available through this API.
53 * It is very important that all accesses to a particular integer or
54 * pointer be performed using only this API and that different sizes of
55 * operation are not mixed or used on overlapping memory regions. Never
56 * read or assign directly from or to a value -- always use this API.
58 * For simple reference counting purposes you should use
59 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
60 * fall outside of simple reference counting patterns are prone to
61 * subtle bugs and occasionally undefined behaviour. It is also worth
62 * noting that since all of these operations require global
63 * synchronisation of the entire machine, they can be quite slow. In
64 * the case of performing multiple atomic operations it can often be
65 * faster to simply acquire a mutex lock around the critical area,
66 * perform the operations normally and then release the lock.
72 * This macro is defined if the atomic operations of GLib are
73 * implemented using real hardware atomic operations. This means that
74 * the GLib atomic API can be used between processes and safely mixed
75 * with other (hardware) atomic APIs.
77 * If this macro is not defined, the atomic operations may be
78 * emulated using a mutex. In that case, the GLib atomic operations are
79 * only atomic relative to themselves and within a single process.
84 * This file is the lowest-level part of GLib.
86 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
87 * messages, etc) call into these functions and macros to get work done.
89 * As such, these functions can not call back into any part of GLib
90 * without risking recursion.
93 #ifdef G_ATOMIC_LOCK_FREE
95 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
96 * implement the atomic operations in a lock-free manner.
99 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
102 * @atomic: a pointer to a #gint or #guint
104 * Gets the current value of @atomic.
106 * This call acts as a full compiler and hardware
107 * memory barrier (before the get).
109 * Returns: the value of the integer
114 (g_atomic_int_get) (const volatile gint *atomic)
116 return g_atomic_int_get (atomic);
121 * @atomic: a pointer to a #gint or #guint
122 * @newval: a new value to store
124 * Sets the value of @atomic to @newval.
126 * This call acts as a full compiler and hardware
127 * memory barrier (after the set).
132 (g_atomic_int_set) (volatile gint *atomic,
135 g_atomic_int_set (atomic, newval);
140 * @atomic: a pointer to a #gint or #guint
142 * Increments the value of @atomic by 1.
144 * Think of this operation as an atomic version of
145 * <literal>{ *@atomic += 1; }</literal>
147 * This call acts as a full compiler and hardware memory barrier.
152 (g_atomic_int_inc) (volatile gint *atomic)
154 g_atomic_int_inc (atomic);
158 * g_atomic_int_dec_and_test:
159 * @atomic: a pointer to a #gint or #guint
161 * Decrements the value of @atomic by 1.
163 * Think of this operation as an atomic version of
164 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
166 * This call acts as a full compiler and hardware memory barrier.
168 * Returns: %TRUE if the resultant value is zero
173 (g_atomic_int_dec_and_test) (volatile gint *atomic)
175 return g_atomic_int_dec_and_test (atomic);
179 * g_atomic_int_compare_and_exchange:
180 * @atomic: a pointer to a #gint or #guint
181 * @oldval: the value to compare with
182 * @newval: the value to conditionally replace with
184 * Compares @atomic to @oldval and, if equal, sets it to @newval.
185 * If @atomic was not equal to @oldval then no change occurs.
187 * This compare and exchange is done atomically.
189 * Think of this operation as an atomic version of
190 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
192 * This call acts as a full compiler and hardware memory barrier.
194 * Returns: %TRUE if the exchange took place
199 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
203 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
208 * @atomic: a pointer to a #gint or #guint
209 * @val: the value to add
211 * Atomically adds @val to the value of @atomic.
213 * Think of this operation as an atomic version of
214 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
216 * This call acts as a full compiler and hardware memory barrier.
218 * Before version 2.30, this function did not return a value
219 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
221 * Returns: the value of @atomic before the add, signed
226 (g_atomic_int_add) (volatile gint *atomic,
229 return g_atomic_int_add (atomic, val);
234 * @atomic: a pointer to a #gint or #guint
235 * @val: the value to 'and'
237 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
238 * storing the result back in @atomic.
240 * This call acts as a full compiler and hardware memory barrier.
242 * Think of this operation as an atomic version of
243 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
245 * Returns: the value of @atomic before the operation, unsigned
250 (g_atomic_int_and) (volatile guint *atomic,
253 return g_atomic_int_and (atomic, val);
258 * @atomic: a pointer to a #gint or #guint
259 * @val: the value to 'or'
261 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
262 * storing the result back in @atomic.
264 * Think of this operation as an atomic version of
265 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
267 * This call acts as a full compiler and hardware memory barrier.
269 * Returns: the value of @atomic before the operation, unsigned
274 (g_atomic_int_or) (volatile guint *atomic,
277 return g_atomic_int_or (atomic, val);
282 * @atomic: a pointer to a #gint or #guint
283 * @val: the value to 'xor'
285 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
286 * storing the result back in @atomic.
288 * Think of this operation as an atomic version of
289 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
291 * This call acts as a full compiler and hardware memory barrier.
293 * Returns: the value of @atomic before the operation, unsigned
298 (g_atomic_int_xor) (volatile guint *atomic,
301 return g_atomic_int_xor (atomic, val);
306 * g_atomic_pointer_get:
307 * @atomic: a pointer to a #gpointer-sized value
309 * Gets the current value of @atomic.
311 * This call acts as a full compiler and hardware
312 * memory barrier (before the get).
314 * Returns: the value of the pointer
319 (g_atomic_pointer_get) (const volatile void *atomic)
321 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
325 * g_atomic_pointer_set:
326 * @atomic: a pointer to a #gpointer-sized value
327 * @newval: a new value to store
329 * Sets the value of @atomic to @newval.
331 * This call acts as a full compiler and hardware
332 * memory barrier (after the set).
337 (g_atomic_pointer_set) (volatile void *atomic,
340 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
344 * g_atomic_pointer_compare_and_exchange:
345 * @atomic: a pointer to a #gpointer-sized value
346 * @oldval: the value to compare with
347 * @newval: the value to conditionally replace with
349 * Compares @atomic to @oldval and, if equal, sets it to @newval.
350 * If @atomic was not equal to @oldval then no change occurs.
352 * This compare and exchange is done atomically.
354 * Think of this operation as an atomic version of
355 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
357 * This call acts as a full compiler and hardware memory barrier.
359 * Returns: %TRUE if the exchange took place
364 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
368 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
373 * g_atomic_pointer_add:
374 * @atomic: a pointer to a #gpointer-sized value
375 * @val: the value to add
377 * Atomically adds @val to the value of @atomic.
379 * Think of this operation as an atomic version of
380 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
382 * This call acts as a full compiler and hardware memory barrier.
384 * Returns: the value of @atomic before the add, signed
389 (g_atomic_pointer_add) (volatile void *atomic,
392 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
396 * g_atomic_pointer_and:
397 * @atomic: a pointer to a #gpointer-sized value
398 * @val: the value to 'and'
400 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
401 * storing the result back in @atomic.
403 * Think of this operation as an atomic version of
404 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
406 * This call acts as a full compiler and hardware memory barrier.
408 * Returns: the value of @atomic before the operation, unsigned
413 (g_atomic_pointer_and) (volatile void *atomic,
416 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
420 * g_atomic_pointer_or:
421 * @atomic: a pointer to a #gpointer-sized value
422 * @val: the value to 'or'
424 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
425 * storing the result back in @atomic.
427 * Think of this operation as an atomic version of
428 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
430 * This call acts as a full compiler and hardware memory barrier.
432 * Returns: the value of @atomic before the operation, unsigned
437 (g_atomic_pointer_or) (volatile void *atomic,
440 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
444 * g_atomic_pointer_xor:
445 * @atomic: a pointer to a #gpointer-sized value
446 * @val: the value to 'xor'
448 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
449 * storing the result back in @atomic.
451 * Think of this operation as an atomic version of
452 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
454 * This call acts as a full compiler and hardware memory barrier.
456 * Returns: the value of @atomic before the operation, unsigned
461 (g_atomic_pointer_xor) (volatile void *atomic,
464 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
467 #elif defined (G_PLATFORM_WIN32)
470 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
471 #define InterlockedAnd _InterlockedAnd
472 #define InterlockedOr _InterlockedOr
473 #define InterlockedXor _InterlockedXor
476 #if !defined (_MSC_VER) || _MSC_VER <= 1200
477 #include "gmessages.h"
478 /* Inlined versions for older compiler */
480 _gInterlockedAnd (volatile guint *atomic,
488 j = InterlockedCompareExchange(atomic, i & val, i);
493 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
495 _gInterlockedOr (volatile guint *atomic,
503 j = InterlockedCompareExchange(atomic, i | val, i);
508 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
510 _gInterlockedXor (volatile guint *atomic,
518 j = InterlockedCompareExchange(atomic, i ^ val, i);
523 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
527 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
530 (g_atomic_int_get) (const volatile gint *atomic)
537 (g_atomic_int_set) (volatile gint *atomic,
545 (g_atomic_int_inc) (volatile gint *atomic)
547 InterlockedIncrement (atomic);
551 (g_atomic_int_dec_and_test) (volatile gint *atomic)
553 return InterlockedDecrement (atomic) == 0;
557 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
561 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
565 (g_atomic_int_add) (volatile gint *atomic,
568 return InterlockedExchangeAdd (atomic, val);
572 (g_atomic_int_and) (volatile guint *atomic,
575 return InterlockedAnd (atomic, val);
579 (g_atomic_int_or) (volatile guint *atomic,
582 return InterlockedOr (atomic, val);
586 (g_atomic_int_xor) (volatile guint *atomic,
589 return InterlockedXor (atomic, val);
594 (g_atomic_pointer_get) (const volatile void *atomic)
596 const volatile gpointer *ptr = atomic;
603 (g_atomic_pointer_set) (volatile void *atomic,
606 volatile gpointer *ptr = atomic;
613 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
617 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
621 (g_atomic_pointer_add) (volatile void *atomic,
624 #if GLIB_SIZEOF_VOID_P == 8
625 return InterlockedExchangeAdd64 (atomic, val);
627 return InterlockedExchangeAdd (atomic, val);
632 (g_atomic_pointer_and) (volatile void *atomic,
635 #if GLIB_SIZEOF_VOID_P == 8
636 return InterlockedAnd64 (atomic, val);
638 return InterlockedAnd (atomic, val);
643 (g_atomic_pointer_or) (volatile void *atomic,
646 #if GLIB_SIZEOF_VOID_P == 8
647 return InterlockedOr64 (atomic, val);
649 return InterlockedOr (atomic, val);
654 (g_atomic_pointer_xor) (volatile void *atomic,
657 #if GLIB_SIZEOF_VOID_P == 8
658 return InterlockedXor64 (atomic, val);
660 return InterlockedXor (atomic, val);
665 /* This error occurs when ./configure decided that we should be capable
666 * of lock-free atomics but we find at compile-time that we are not.
668 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
670 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
672 #else /* G_ATOMIC_LOCK_FREE */
674 /* We are not permitted to call into any GLib functions from here, so we
675 * can not use GMutex.
677 * Fortunately, we already take care of the Windows case above, and all
678 * non-Windows platforms on which glib runs have pthreads. Use those.
682 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
685 (g_atomic_int_get) (const volatile gint *atomic)
689 pthread_mutex_lock (&g_atomic_lock);
691 pthread_mutex_unlock (&g_atomic_lock);
697 (g_atomic_int_set) (volatile gint *atomic,
700 pthread_mutex_lock (&g_atomic_lock);
702 pthread_mutex_unlock (&g_atomic_lock);
706 (g_atomic_int_inc) (volatile gint *atomic)
708 pthread_mutex_lock (&g_atomic_lock);
710 pthread_mutex_unlock (&g_atomic_lock);
714 (g_atomic_int_dec_and_test) (volatile gint *atomic)
718 pthread_mutex_lock (&g_atomic_lock);
719 is_zero = --(*atomic) == 0;
720 pthread_mutex_unlock (&g_atomic_lock);
726 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
732 pthread_mutex_lock (&g_atomic_lock);
734 if ((success = (*atomic == oldval)))
737 pthread_mutex_unlock (&g_atomic_lock);
743 (g_atomic_int_add) (volatile gint *atomic,
748 pthread_mutex_lock (&g_atomic_lock);
750 *atomic = oldval + val;
751 pthread_mutex_unlock (&g_atomic_lock);
757 (g_atomic_int_and) (volatile guint *atomic,
762 pthread_mutex_lock (&g_atomic_lock);
764 *atomic = oldval & val;
765 pthread_mutex_unlock (&g_atomic_lock);
771 (g_atomic_int_or) (volatile guint *atomic,
776 pthread_mutex_lock (&g_atomic_lock);
778 *atomic = oldval | val;
779 pthread_mutex_unlock (&g_atomic_lock);
785 (g_atomic_int_xor) (volatile guint *atomic,
790 pthread_mutex_lock (&g_atomic_lock);
792 *atomic = oldval ^ val;
793 pthread_mutex_unlock (&g_atomic_lock);
800 (g_atomic_pointer_get) (const volatile void *atomic)
802 const volatile gpointer *ptr = atomic;
805 pthread_mutex_lock (&g_atomic_lock);
807 pthread_mutex_unlock (&g_atomic_lock);
813 (g_atomic_pointer_set) (volatile void *atomic,
816 volatile gpointer *ptr = atomic;
818 pthread_mutex_lock (&g_atomic_lock);
820 pthread_mutex_unlock (&g_atomic_lock);
824 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
828 volatile gpointer *ptr = atomic;
831 pthread_mutex_lock (&g_atomic_lock);
833 if ((success = (*ptr == oldval)))
836 pthread_mutex_unlock (&g_atomic_lock);
842 (g_atomic_pointer_add) (volatile void *atomic,
845 volatile gssize *ptr = atomic;
848 pthread_mutex_lock (&g_atomic_lock);
851 pthread_mutex_unlock (&g_atomic_lock);
857 (g_atomic_pointer_and) (volatile void *atomic,
860 volatile gsize *ptr = atomic;
863 pthread_mutex_lock (&g_atomic_lock);
866 pthread_mutex_unlock (&g_atomic_lock);
872 (g_atomic_pointer_or) (volatile void *atomic,
875 volatile gsize *ptr = atomic;
878 pthread_mutex_lock (&g_atomic_lock);
881 pthread_mutex_unlock (&g_atomic_lock);
887 (g_atomic_pointer_xor) (volatile void *atomic,
890 volatile gsize *ptr = atomic;
893 pthread_mutex_lock (&g_atomic_lock);
896 pthread_mutex_unlock (&g_atomic_lock);
904 * g_atomic_int_exchange_and_add:
905 * @atomic: a pointer to a #gint
906 * @val: the value to add
908 * This function existed before g_atomic_int_add() returned the prior
909 * value of the integer (which it now does). It is retained only for
910 * compatibility reasons. Don't use this function in new code.
912 * Returns: the value of @atomic before the add, signed
914 * Deprecated: 2.30: Use g_atomic_int_add() instead.
917 g_atomic_int_exchange_and_add (volatile gint *atomic,
920 return (g_atomic_int_add) (atomic, val);