2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 * Author: Ryan Lortie <desrt@desrt.ca>
27 * SECTION:atomic_operations
28 * @title: Atomic Operations
29 * @short_description: basic atomic integer and pointer operations
32 * The following is a collection of compiler macros to provide atomic
33 * access to integer and pointer-sized values.
35 * The macros that have 'int' in the name will operate on pointers to
36 * #gint and #guint. The macros with 'pointer' in the name will operate
37 * on pointers to any pointer-sized value, including #gsize. There is
38 * no support for 64bit operations on platforms with 32bit pointers
39 * because it is not generally possible to perform these operations
42 * The get, set and exchange operations for integers and pointers
43 * nominally operate on #gint and #gpointer, respectively. Of the
44 * arithmetic operations, the 'add' operation operates on (and returns)
45 * signed integer values (#gint and #gssize) and the 'and', 'or', and
46 * 'xor' operations operate on (and return) unsigned integer values
47 * (#guint and #gsize).
49 * All of the operations act as a full compiler and (where appropriate)
50 * hardware memory barrier. Acquire and release or producer and
51 * consumer barrier semantics are not available through this API.
53 * On GCC, these macros are implemented using GCC intrinsic operations.
54 * On non-GCC compilers they will evaluate to function calls to
55 * functions implemented by GLib.
57 * If GLib itself was compiled with GCC then these functions will again
58 * be implemented by the GCC intrinsics. On Windows without GCC, the
59 * interlocked API is used to implement the functions.
61 * With non-GCC compilers on non-Windows systems, the functions are
62 * currently incapable of implementing true atomic operations --
63 * instead, they fallback to holding a global lock while performing the
64 * operation. This provides atomicity between the threads of one
65 * process, but not between separate processes. For this reason, one
66 * should exercise caution when attempting to use these options on
67 * shared memory regions.
69 * It is very important that all accesses to a particular integer or
70 * pointer be performed using only this API and that different sizes of
71 * operation are not mixed or used on overlapping memory regions. Never
72 * read or assign directly from or to a value -- always use this API.
74 * For simple reference counting purposes you should use
75 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
76 * fall outside of simple reference counting patterns are prone to
77 * subtle bugs and occasionally undefined behaviour. It is also worth
78 * noting that since all of these operations require global
79 * synchronisation of the entire machine, they can be quite slow. In
80 * the case of performing multiple atomic operations it can often be
81 * faster to simply acquire a mutex lock around the critical area,
82 * perform the operations normally and then release the lock.
87 * This file is the lowest-level part of GLib.
89 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
90 * messages, etc) call into these functions and macros to get work done.
92 * As such, these functions can not call back into any part of GLib
93 * without risking recursion.
96 #ifdef G_ATOMIC_OP_USE_GCC_BUILTINS
99 #error Using GCC builtin atomic ops, but not compiling with GCC?
104 * @atomic: a pointer to a #gint or #guint
106 * Gets the current value of @atomic.
108 * This call acts as a full compiler and hardware
109 * memory barrier (before the get).
111 * Returns: the value of the integer
116 (g_atomic_int_get) (volatile gint *atomic)
118 return g_atomic_int_get (atomic);
123 * @atomic: a pointer to a #gint or #guint
124 * @newval: a new value to store
126 * Sets the value of @atomic to @newval.
128 * This call acts as a full compiler and hardware
129 * memory barrier (after the set).
134 (g_atomic_int_set) (volatile gint *atomic,
137 g_atomic_int_set (atomic, newval);
142 * @atomic: a pointer to a #gint or #guint
144 * Increments the value of @atomic by 1.
146 * Think of this operation as an atomic version of
147 * <literal>{ *@atomic += 1; }</literal>
149 * This call acts as a full compiler and hardware memory barrier.
154 (g_atomic_int_inc) (volatile gint *atomic)
156 g_atomic_int_inc (atomic);
160 * g_atomic_int_dec_and_test:
161 * @atomic: a pointer to a #gint or #guint
163 * Decrements the value of @atomic by 1.
165 * Think of this operation as an atomic version of
166 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
168 * This call acts as a full compiler and hardware memory barrier.
170 * Returns: %TRUE if the resultant value is zero
175 (g_atomic_int_dec_and_test) (volatile gint *atomic)
177 return g_atomic_int_dec_and_test (atomic);
181 * g_atomic_int_compare_and_exchange:
182 * @atomic: a pointer to a #gint or #guint
183 * @oldval: the value to compare with
184 * @newval: the value to conditionally replace with
186 * Compares @atomic to @oldval and, if equal, sets it to @newval.
187 * If @atomic was not equal to @oldval then no change occurs.
189 * This compare and exchange is done atomically.
191 * Think of this operation as an atomic version of
192 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
194 * This call acts as a full compiler and hardware memory barrier.
196 * Returns: %TRUE if the exchange took place
201 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
205 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
210 * @atomic: a pointer to a #gint or #guint
211 * @val: the value to add
213 * Atomically adds @val to the value of @atomic.
215 * Think of this operation as an atomic version of
216 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
218 * This call acts as a full compiler and hardware memory barrier.
220 * Before version 2.30, this function did not return a value
221 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
223 * Returns: the value of @atomic before the add, signed
228 (g_atomic_int_add) (volatile gint *atomic,
231 return g_atomic_int_add (atomic, val);
236 * @atomic: a pointer to a #gint or #guint
237 * @val: the value to 'and'
239 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
240 * storing the result back in @atomic.
242 * This call acts as a full compiler and hardware memory barrier.
244 * Think of this operation as an atomic version of
245 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
247 * Returns: the value of @atomic before the operation, unsigned
252 (g_atomic_int_and) (volatile guint *atomic,
255 return g_atomic_int_and (atomic, val);
260 * @atomic: a pointer to a #gint or #guint
261 * @val: the value to 'or'
263 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
264 * storing the result back in @atomic.
266 * Think of this operation as an atomic version of
267 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
269 * This call acts as a full compiler and hardware memory barrier.
271 * Returns: the value of @atomic before the operation, unsigned
276 (g_atomic_int_or) (volatile guint *atomic,
279 return g_atomic_int_or (atomic, val);
284 * @atomic: a pointer to a #gint or #guint
285 * @val: the value to 'xor'
287 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
288 * storing the result back in @atomic.
290 * Think of this operation as an atomic version of
291 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
293 * This call acts as a full compiler and hardware memory barrier.
295 * Returns: the value of @atomic before the operation, unsigned
300 (g_atomic_int_xor) (volatile guint *atomic,
303 return g_atomic_int_xor (atomic, val);
308 * g_atomic_pointer_get:
309 * @atomic: a pointer to a #gpointer-sized value
311 * Gets the current value of @atomic.
313 * This call acts as a full compiler and hardware
314 * memory barrier (before the get).
316 * Returns: the value of the pointer
321 (g_atomic_pointer_get) (volatile void *atomic)
323 return g_atomic_pointer_get ((volatile gpointer *) atomic);
327 * g_atomic_pointer_set:
328 * @atomic: a pointer to a #gpointer-sized value
329 * @newval: a new value to store
331 * Sets the value of @atomic to @newval.
333 * This call acts as a full compiler and hardware
334 * memory barrier (after the set).
339 (g_atomic_pointer_set) (volatile void *atomic,
342 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
346 * g_atomic_pointer_compare_and_exchange:
347 * @atomic: a pointer to a #gpointer-sized value
348 * @oldval: the value to compare with
349 * @newval: the value to conditionally replace with
351 * Compares @atomic to @oldval and, if equal, sets it to @newval.
352 * If @atomic was not equal to @oldval then no change occurs.
354 * This compare and exchange is done atomically.
356 * Think of this operation as an atomic version of
357 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
359 * This call acts as a full compiler and hardware memory barrier.
361 * Returns: %TRUE if the exchange took place
366 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
370 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
375 * g_atomic_pointer_add:
376 * @atomic: a pointer to a #gpointer-sized value
377 * @val: the value to add
379 * Atomically adds @val to the value of @atomic.
381 * Think of this operation as an atomic version of
382 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
384 * This call acts as a full compiler and hardware memory barrier.
386 * Returns: the value of @atomic before the add, signed
391 (g_atomic_pointer_add) (volatile void *atomic,
394 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
398 * g_atomic_pointer_and:
399 * @atomic: a pointer to a #gpointer-sized value
400 * @val: the value to 'and'
402 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
403 * storing the result back in @atomic.
405 * Think of this operation as an atomic version of
406 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
408 * This call acts as a full compiler and hardware memory barrier.
410 * Returns: the value of @atomic before the operation, unsigned
415 (g_atomic_pointer_and) (volatile void *atomic,
418 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
422 * g_atomic_pointer_or:
423 * @atomic: a pointer to a #gpointer-sized value
424 * @val: the value to 'or'
426 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
427 * storing the result back in @atomic.
429 * Think of this operation as an atomic version of
430 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
432 * This call acts as a full compiler and hardware memory barrier.
434 * Returns: the value of @atomic before the operation, unsigned
439 (g_atomic_pointer_or) (volatile void *atomic,
442 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
446 * g_atomic_pointer_xor:
447 * @atomic: a pointer to a #gpointer-sized value
448 * @val: the value to 'xor'
450 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
451 * storing the result back in @atomic.
453 * Think of this operation as an atomic version of
454 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
456 * This call acts as a full compiler and hardware memory barrier.
458 * Returns: the value of @atomic before the operation, unsigned
463 (g_atomic_pointer_xor) (volatile void *atomic,
466 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
469 #elif defined (G_PLATFORM_WIN32) && defined(HAVE_WIN32_BUILTINS_FOR_ATOMIC_OPERATIONS)
472 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64)
473 #define InterlockedAnd _InterlockedAnd
474 #define InterlockedOr _InterlockedOr
475 #define InterlockedXor _InterlockedXor
479 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
482 (g_atomic_int_get) (volatile gint *atomic)
489 (g_atomic_int_set) (volatile gint *atomic,
497 (g_atomic_int_inc) (volatile gint *atomic)
499 InterlockedIncrement (atomic);
503 (g_atomic_int_dec_and_test) (volatile gint *atomic)
505 return InterlockedDecrement (atomic) == 0;
509 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
513 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
517 (g_atomic_int_add) (volatile gint *atomic,
520 return InterlockedExchangeAdd (atomic, val);
524 (g_atomic_int_and) (volatile guint *atomic,
527 return InterlockedAnd (atomic, val);
531 (g_atomic_int_or) (volatile guint *atomic,
534 return InterlockedOr (atomic, val);
538 (g_atomic_int_xor) (volatile guint *atomic,
541 return InterlockedXor (atomic, val);
546 (g_atomic_pointer_get) (volatile void *atomic)
548 volatile gpointer *ptr = atomic;
555 (g_atomic_pointer_set) (volatile void *atomic,
558 volatile gpointer *ptr = atomic;
565 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
569 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
573 (g_atomic_pointer_add) (volatile void *atomic,
576 #if GLIB_SIZEOF_VOID_P == 8
577 return InterlockedExchangeAdd64 (atomic, val);
579 return InterlockedExchangeAdd (atomic, val);
584 (g_atomic_pointer_and) (volatile void *atomic,
587 #if GLIB_SIZEOF_VOID_P == 8
588 return InterlockedAnd64 (atomic, val);
590 return InterlockedAnd (atomic, val);
595 (g_atomic_pointer_or) (volatile void *atomic,
598 #if GLIB_SIZEOF_VOID_P == 8
599 return InterlockedOr64 (atomic, val);
601 return InterlockedOr (atomic, val);
606 (g_atomic_pointer_xor) (volatile void *atomic,
609 #if GLIB_SIZEOF_VOID_P == 8
610 return InterlockedXor64 (atomic, val);
612 return InterlockedXor (atomic, val);
618 /* We are not permitted to call into any GLib functions from here, so we
619 * can not use GMutex.
621 * Fortunately, we already take care of the Windows case above, and all
622 * non-Windows platforms on which glib runs have pthreads. Use those.
626 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
629 (g_atomic_int_get) (volatile gint *atomic)
633 pthread_mutex_lock (&g_atomic_lock);
635 pthread_mutex_unlock (&g_atomic_lock);
641 (g_atomic_int_set) (volatile gint *atomic,
644 pthread_mutex_lock (&g_atomic_lock);
646 pthread_mutex_unlock (&g_atomic_lock);
650 (g_atomic_int_inc) (volatile gint *atomic)
652 pthread_mutex_lock (&g_atomic_lock);
654 pthread_mutex_unlock (&g_atomic_lock);
658 (g_atomic_int_dec_and_test) (volatile gint *atomic)
662 pthread_mutex_lock (&g_atomic_lock);
663 is_zero = --(*atomic) == 0;
664 pthread_mutex_unlock (&g_atomic_lock);
670 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
676 pthread_mutex_lock (&g_atomic_lock);
678 if ((success = (*atomic == oldval)))
681 pthread_mutex_unlock (&g_atomic_lock);
687 (g_atomic_int_add) (volatile gint *atomic,
692 pthread_mutex_lock (&g_atomic_lock);
694 *atomic = oldval + val;
695 pthread_mutex_unlock (&g_atomic_lock);
701 (g_atomic_int_and) (volatile guint *atomic,
706 pthread_mutex_lock (&g_atomic_lock);
708 *atomic = oldval & val;
709 pthread_mutex_unlock (&g_atomic_lock);
715 (g_atomic_int_or) (volatile guint *atomic,
720 pthread_mutex_lock (&g_atomic_lock);
722 *atomic = oldval | val;
723 pthread_mutex_unlock (&g_atomic_lock);
729 (g_atomic_int_xor) (volatile guint *atomic,
734 pthread_mutex_lock (&g_atomic_lock);
736 *atomic = oldval ^ val;
737 pthread_mutex_unlock (&g_atomic_lock);
744 (g_atomic_pointer_get) (volatile void *atomic)
746 volatile gpointer *ptr = atomic;
749 pthread_mutex_lock (&g_atomic_lock);
751 pthread_mutex_unlock (&g_atomic_lock);
757 (g_atomic_pointer_set) (volatile void *atomic,
760 volatile gpointer *ptr = atomic;
762 pthread_mutex_lock (&g_atomic_lock);
764 pthread_mutex_unlock (&g_atomic_lock);
768 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
772 volatile gpointer *ptr = atomic;
775 pthread_mutex_lock (&g_atomic_lock);
777 if ((success = (*ptr == oldval)))
780 pthread_mutex_unlock (&g_atomic_lock);
786 (g_atomic_pointer_add) (volatile void *atomic,
789 volatile gssize *ptr = atomic;
792 pthread_mutex_lock (&g_atomic_lock);
795 pthread_mutex_unlock (&g_atomic_lock);
801 (g_atomic_pointer_and) (volatile void *atomic,
804 volatile gsize *ptr = atomic;
807 pthread_mutex_lock (&g_atomic_lock);
810 pthread_mutex_unlock (&g_atomic_lock);
816 (g_atomic_pointer_or) (volatile void *atomic,
819 volatile gsize *ptr = atomic;
822 pthread_mutex_lock (&g_atomic_lock);
825 pthread_mutex_unlock (&g_atomic_lock);
831 (g_atomic_pointer_xor) (volatile void *atomic,
834 volatile gsize *ptr = atomic;
837 pthread_mutex_lock (&g_atomic_lock);
840 pthread_mutex_unlock (&g_atomic_lock);
848 * g_atomic_int_exchange_and_add:
849 * @atomic: a pointer to a #gint
850 * @val: the value to add
852 * This function existed before g_atomic_int_add() returned the prior
853 * value of the integer (which it now does). It is retained only for
854 * compatibility reasons. Don't use this function in new code.
856 * Returns: the value of @atomic before the add, signed
858 * Deprecated: 2.30: Use g_atomic_int_add() instead.
861 g_atomic_int_exchange_and_add (volatile gint *atomic,
864 return (g_atomic_int_add) (atomic, val);