-/* GLIB - Library of useful routines for C programming
- * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
- *
- * GAtomic: atomic integer operation.
- * Copyright (C) 2003 Sebastian Wilhelmi
+/*
+ * Copyright © 2011 Ryan Lortie
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * licence, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- */
-
-/*
- * Modified by the GLib Team and others 1997-2000. See the AUTHORS
- * file for a list of people on the GLib Team. See the ChangeLog
- * files for a list of changes. These files are distributed with
- * GLib at ftp://ftp.gtk.org/pub/gtk/.
- */
-
-#ifndef __G_ATOMIC_H__
-#define __G_ATOMIC_H__
-
-#include <glib/gtypes.h>
-
-G_BEGIN_DECLS
-
-#ifdef G_THREADS_ENABLED
-
-gint32 g_atomic_int_exchange_and_add_fallback (gint32 *atomic,
- gint32 val);
-void g_atomic_int_add_fallback (gint32 *atomic,
- gint32 val);
-gboolean g_atomic_int_compare_and_exchange_fallback (gint32 *atomic,
- gint32 oldval,
- gint32 newval);
-gboolean g_atomic_pointer_compare_and_exchange_fallback (gpointer *atomic,
- gpointer oldval,
- gpointer newval);
-
-# if defined (__GNUC__)
-# if defined (G_ATOMIC_INLINED_IMPLEMENTATION_I486)
-/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
- */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- gint32 result;
-
- __asm__ __volatile__ ("lock; xaddl %0,%1"
- : "=r" (result), "=m" (*atomic)
- : "0" (val), "m" (*atomic));
- return result;
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- __asm__ __volatile__ ("lock; addl %1,%0"
- : "=m" (*atomic)
- : "ir" (val), "m" (*atomic));
-}
-
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- gint32 result;
-
- __asm __volatile ("lock; cmpxchgl %2, %1"
- : "=a" (result), "=m" (*atomic)
- : "r" (newval), "m" (*atomic), "0" (oldval));
-
- return result == oldval;
-}
-
-/* The same code as above, as on i386 gpointer is 32 bit as well.
- * Duplicating the code here seems more natural than casting the
- * arguments and calling the former function */
-
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
-
- __asm __volatile ("lock; cmpxchgl %2, %1"
- : "=a" (result), "=m" (*atomic)
- : "r" (newval), "m" (*atomic), "0" (oldval));
-
- return result == oldval;
-}
-
-# define G_ATOMIC_MEMORY_BARRIER() /* Not needed */
-
-# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_SPARCV9) \
- && (defined(__sparcv8) || defined(__sparcv9) || defined(__sparc_v9__))
-/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
- */
-/* Why the test for __sparcv8, wheras really the sparcv9 architecture
- * is required for the folowing assembler instructions? On
- * sparc-solaris the only difference detectable at compile time
- * between no -m and -mcpu=v9 is __sparcv8.
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
- * However, in case -mcpu=v8 is set, the assembler will fail. This
- * should be rare however, as there are only very few v8-not-v9
- * machines still out there (and we can't do better).
+ * Author: Ryan Lortie <desrt@desrt.ca>
*/
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- gint32 result;
- __asm __volatile ("cas [%4], %2, %0"
- : "=r" (result), "=m" (*atomic)
- : "r" (oldval), "m" (*atomic), "r" (atomic),
- "0" (newval));
- return result != 0;
-}
-
-# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
- __asm __volatile ("cas [%4], %2, %0"
- : "=r" (result), "=m" (*atomic)
- : "r" (oldval), "m" (*atomic), "r" (atomic),
- "0" (newval));
- return result != 0;
-}
-# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
- gpointer *a = atomic;
- __asm __volatile ("casx [%4], %2, %0"
- : "=r" (result), "=m" (*a)
- : "r" (oldval), "m" (*a), "r" (a),
- "0" (newval));
- return result != 0;
-}
-# else /* What's that */
-# error "Your system has an unsupported pointer size"
-# endif /* GLIB_SIZEOF_VOID_P */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- gint32 result;
- do
- result = *atomic;
- while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
- return result;
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- g_atomic_int_exchange_and_add (atomic, val);
-}
-
-# define G_ATOMIC_MEMORY_BARRIER() \
- __asm __volatile ("membar #LoadLoad | #LoadStore" \
- " | #StoreLoad | #StoreStore" : : : "memory")
-
-# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_ALPHA)
-/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
- */
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- gint32 result;
- gint32 prev;
- __asm__ __volatile__ (
- " mb\n"
- "1: ldl_l %0,%2\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,1b\n"
- " mb\n"
- "2:"
- : "=&r" (prev),
- "=&r" (result)
- : "m" (*atomic),
- "Ir" ((gint64)oldval),
- "Ir" (newval)
- : "memory");
- return result != 0;
-}
-# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gint32 result;
- gpointer prev;
- __asm__ __volatile__ (
- " mb\n"
- "1: ldl_l %0,%2\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,1b\n"
- " mb\n"
- "2:"
- : "=&r" (prev),
- "=&r" (result)
- : "m" (*atomic),
- "Ir" ((gint64)oldval),
- "Ir" (newval)
- : "memory");
- return result != 0;
-}
-# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gint32 result;
- gpointer prev;
- __asm__ __volatile__ (
- " mb\n"
- "1: ldq_l %0,%2\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,1b\n"
- " mb\n"
- "2:"
- : "=&r" (prev),
- "=&r" (result)
- : "m" (*atomic),
- "Ir" ((gint64)oldval),
- "Ir" (newval)
- : "memory");
- return result != 0;
-}
-# else /* What's that */
-# error "Your system has an unsupported pointer size"
-# endif /* GLIB_SIZEOF_VOID_P */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- gint32 result;
- do
- result = *atomic;
- while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
-
- return result;
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- g_atomic_int_exchange_and_add (atomic, val);
-}
-
-# define G_ATOMIC_MEMORY_BARRIER() __asm ("mb" : : : "memory")
-
-# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_X86_64)
-/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
- */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- gint32 result;
-
- __asm__ __volatile__ ("lock; xaddl %0,%1"
- : "=r" (result), "=m" (*atomic)
- : "0" (val), "m" (*atomic));
- return result;
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- __asm__ __volatile__ ("lock; addl %1,%0"
- : "=m" (*atomic)
- : "ir" (val), "m" (*atomic));
-}
-
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- gint32 result;
-
- __asm __volatile ("lock; cmpxchgl %2, %1"
- : "=a" (result), "=m" (*atomic)
- : "r" (newval), "m" (*atomic), "0" (oldval));
-
- return result == oldval;
-}
-
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
-
- __asm __volatile ("lock; cmpxchgq %q2, %1"
- : "=a" (result), "=m" (*atomic)
- : "r" (newval), "m" (*atomic), "0" (oldval));
-
- return result == oldval;
-}
-
-# define G_ATOMIC_MEMORY_BARRIER() /* Not needed */
-
-# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_POWERPC)
-/* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h
- * and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
- * and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
- */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- gint32 result, temp;
- __asm __volatile ("1: lwarx %0,0,%3\n"
- " add %1,%0,%4\n"
- " stwcx. %1,0,%3\n"
- " bne- 1b"
- : "=&b" (result), "=&r" (temp), "=m" (*atomic)
- : "b" (atomic), "r" (val), "2" (*atomic)
- : "cr0", "memory");
- return result;
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- g_atomic_int_exchange_and_add (atomic, val);
-}
-
-# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- gint32 result;
- __asm __volatile ("sync\n"
- "1: lwarx %0,0,%1\n"
- " subf. %0,%2,%0\n"
- " bne 2f\n"
- " stwcx. %3,0,%1\n"
- " bne- 1b\n"
- "2: isync"
- : "=&r" (result)
- : "b" (atomic), "r" (oldval), "r" (newval)
- : "cr0", "memory");
- return result == 0;
-}
-
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
- __asm __volatile ("sync\n"
- "1: lwarx %0,0,%1\n"
- " subf. %0,%2,%0\n"
- " bne 2f\n"
- " stwcx. %3,0,%1\n"
- " bne- 1b\n"
- "2: isync"
- : "=&r" (result)
- : "b" (atomic), "r" (oldval), "r" (newval)
- : "cr0", "memory");
- return result == 0;
-}
-# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- __asm __volatile ("sync\n"
- "1: lwarx %0,0,%1\n"
- " extsw %0,%0\n"
- " subf. %0,%2,%0\n"
- " bne 2f\n"
- " stwcx. %3,0,%1\n"
- " bne- 1b\n"
- "2: isync"
- : "=&r" (result)
- : "b" (atomic), "r" (oldval), "r" (newval)
- : "cr0", "memory");
- return result == 0;
-}
-
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- gpointer result;
- __asm __volatile ("sync\n"
- "1: ldarx %0,0,%1\n"
- " subf. %0,%2,%0\n"
- " bne 2f\n"
- " stdcx. %3,0,%1\n"
- " bne- 1b\n"
- "2: isync"
- : "=&r" (result)
- : "b" (atomic), "r" (oldval), "r" (newval)
- : "cr0", "memory");
- return result == 0;
-}
-# else /* What's that */
-# error "Your system has an unsupported pointer size"
-# endif /* GLIB_SIZEOF_VOID_P */
-
-# define G_ATOMIC_MEMORY_BARRIER() __asm ("sync" : : : "memory")
-
-# elif defined(G_ATOMIC_INLINED_IMPLEMENTATION_IA64)
-/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
- */
-static inline gint32
-g_atomic_int_exchange_and_add (gint32 *atomic,
- gint32 val)
-{
- return __sync_fetch_and_add_si (atomic, val);
-}
-
-static inline void
-g_atomic_int_add (gint32 *atomic,
- gint32 val)
-{
- __sync_fetch_and_add_si (atomic, val);
-}
-
-static inline gboolean
-g_atomic_int_compare_and_exchange (gint32 *atomic,
- gint32 oldval,
- gint32 newval)
-{
- return __sync_bool_compare_and_exchange_si (atomic, oldval, newval);
-}
+#ifndef __G_ATOMIC_H__
+#define __G_ATOMIC_H__
-static inline gboolean
-g_atomic_pointer_compare_and_exchange (gpointer *atomic,
- gpointer oldval,
- gpointer newval)
-{
- return __sync_bool_compare_and_exchange_di ((long *)atomic,
- (long)oldval, (long)newval);
-}
+#if !defined (__GLIB_H_INSIDE__) && !defined (GLIB_COMPILATION)
+#error "Only <glib.h> can be included directly."
+#endif
-# define G_ATOMIC_MEMORY_BARRIER() __sync_synchronize ()
+#include <glib/gtypes.h>
-# else /* !G_ATOMIC_INLINED_IMPLEMENTATION_... */
-# define G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
-# endif /* G_ATOMIC_INLINED_IMPLEMENTATION_... */
-# else /* !__GNU__ */
-# define G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
-# endif /* __GNUC__ */
-#else /* !G_THREADS_ENABLED */
-gint32 g_atomic_int_exchange_and_add (gint32 *atomic, gint32 val);
-# define g_atomic_int_add(atomic, val) (void)(*(atomic) += (val))
-# define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \
- (*(atomic) == (oldval) ? (*(atomic) = (newval), TRUE) : FALSE)
-# define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \
- (*(atomic) == (oldval) ? (*(atomic) = (newval), TRUE) : FALSE)
-# define g_atomic_int_get(atomic) (*(atomic))
-# define g_atomic_pointer_get(atomic) (*(atomic))
-#endif /* G_THREADS_ENABLED */
+G_BEGIN_DECLS
-#ifdef G_ATOMIC_USE_FALLBACK_IMPLEMENTATION
-# define g_atomic_int_exchange_and_add \
- g_atomic_int_exchange_and_add_fallback
-# define g_atomic_int_add \
- g_atomic_int_add_fallback
-# define g_atomic_int_compare_and_exchange \
- g_atomic_int_compare_and_exchange_fallback
-# define g_atomic_pointer_compare_and_exchange \
- g_atomic_pointer_compare_and_exchange_fallback
-# define g_atomic_int_get \
- g_atomic_int_get_fallback
-# define g_atomic_pointer_get \
- g_atomic_pointer_get_fallback
-#else /* !G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
-static inline gint32
-g_atomic_int_get (gint32 *atomic)
-{
- gint32 result = *atomic;
- G_ATOMIC_MEMORY_BARRIER ();
- return result;
-}
+GLIB_AVAILABLE_IN_ALL
+gint g_atomic_int_get (const volatile gint *atomic);
+GLIB_AVAILABLE_IN_ALL
+void g_atomic_int_set (volatile gint *atomic,
+ gint newval);
+GLIB_AVAILABLE_IN_ALL
+void g_atomic_int_inc (volatile gint *atomic);
+GLIB_AVAILABLE_IN_ALL
+gboolean g_atomic_int_dec_and_test (volatile gint *atomic);
+GLIB_AVAILABLE_IN_ALL
+gboolean g_atomic_int_compare_and_exchange (volatile gint *atomic,
+ gint oldval,
+ gint newval);
+GLIB_AVAILABLE_IN_ALL
+gint g_atomic_int_add (volatile gint *atomic,
+ gint val);
+GLIB_AVAILABLE_IN_2_30
+guint g_atomic_int_and (volatile guint *atomic,
+ guint val);
+GLIB_AVAILABLE_IN_2_30
+guint g_atomic_int_or (volatile guint *atomic,
+ guint val);
+GLIB_AVAILABLE_IN_ALL
+guint g_atomic_int_xor (volatile guint *atomic,
+ guint val);
+
+GLIB_AVAILABLE_IN_ALL
+gpointer g_atomic_pointer_get (const volatile void *atomic);
+GLIB_AVAILABLE_IN_ALL
+void g_atomic_pointer_set (volatile void *atomic,
+ gpointer newval);
+GLIB_AVAILABLE_IN_ALL
+gboolean g_atomic_pointer_compare_and_exchange (volatile void *atomic,
+ gpointer oldval,
+ gpointer newval);
+GLIB_AVAILABLE_IN_ALL
+gssize g_atomic_pointer_add (volatile void *atomic,
+ gssize val);
+GLIB_AVAILABLE_IN_2_30
+gsize g_atomic_pointer_and (volatile void *atomic,
+ gsize val);
+GLIB_AVAILABLE_IN_2_30
+gsize g_atomic_pointer_or (volatile void *atomic,
+ gsize val);
+GLIB_AVAILABLE_IN_ALL
+gsize g_atomic_pointer_xor (volatile void *atomic,
+ gsize val);
+
+GLIB_DEPRECATED_IN_2_30_FOR(g_atomic_int_add)
+gint g_atomic_int_exchange_and_add (volatile gint *atomic,
+ gint val);
-static inline gpointer
-g_atomic_pointer_get (gpointer *atomic)
-{
- gpointer result = *atomic;
- G_ATOMIC_MEMORY_BARRIER ();
- return result;
-}
-#endif /* G_ATOMIC_USE_FALLBACK_IMPLEMENTATION */
+G_END_DECLS
-#define g_atomic_int_inc(atomic) (g_atomic_int_add ((atomic), 1))
-#define g_atomic_int_dec_and_test(atomic) \
- (g_atomic_int_exchange_and_add ((atomic), -1) == 1)
+#if defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+
+/* We prefer the new C11-style atomic extension of GCC if available */
+#if defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
+
+#define g_atomic_int_get(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ *(atomic) : 0); \
+ (gint) __atomic_load_4 ((atomic), __ATOMIC_SEQ_CST); \
+ }))
+#define g_atomic_int_set(atomic, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (newval) : 0); \
+ __atomic_store_4 ((atomic), (newval), __ATOMIC_SEQ_CST); \
+ }))
+
+#if GLIB_SIZEOF_VOID_P == 8
+
+#define g_atomic_pointer_get(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (gpointer) __atomic_load_8 ((atomic), __ATOMIC_SEQ_CST); \
+ }))
+#define g_atomic_pointer_set(atomic, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ __atomic_store_8 ((atomic), (gsize) (newval), __ATOMIC_SEQ_CST); \
+ }))
+
+#else /* GLIB_SIZEOF_VOID_P == 8 */
+
+#define g_atomic_pointer_get(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (gpointer) __atomic_load_4 ((atomic), __ATOMIC_SEQ_CST); \
+ }))
+#define g_atomic_pointer_set(atomic, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ __atomic_store_4 ((atomic), (gsize) (newval), __ATOMIC_SEQ_CST); \
+ }))
+
+#endif /* GLIB_SIZEOF_VOID_P == 8 */
+
+#else /* defined(__ATOMIC_SEQ_CST) */
+
+#define g_atomic_int_get(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ *(atomic) : 0); \
+ __sync_synchronize (); \
+ (gint) *(atomic); \
+ }))
+#define g_atomic_int_set(atomic, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (newval) : 0); \
+ *(atomic) = (newval); \
+ __sync_synchronize (); \
+ }))
+#define g_atomic_pointer_get(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ __sync_synchronize (); \
+ (gpointer) *(atomic); \
+ }))
+#define g_atomic_pointer_set(atomic, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ *(atomic) = (__typeof__ (*(atomic))) (gsize) (newval); \
+ __sync_synchronize (); \
+ }))
+
+#endif /* !defined(__ATOMIC_SEQ_CST) */
+
+#define g_atomic_int_inc(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ *(atomic) : 0); \
+ (void) __sync_fetch_and_add ((atomic), 1); \
+ }))
+#define g_atomic_int_dec_and_test(atomic) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ *(atomic) : 0); \
+ __sync_fetch_and_sub ((atomic), 1) == 1; \
+ }))
+#define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (newval) ^ (oldval) : 0); \
+ (gboolean) __sync_bool_compare_and_swap ((atomic), (oldval), (newval)); \
+ }))
+#define g_atomic_int_add(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (val) : 0); \
+ (gint) __sync_fetch_and_add ((atomic), (val)); \
+ }))
+#define g_atomic_int_and(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (val) : 0); \
+ (guint) __sync_fetch_and_and ((atomic), (val)); \
+ }))
+#define g_atomic_int_or(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (val) : 0); \
+ (guint) __sync_fetch_and_or ((atomic), (val)); \
+ }))
+#define g_atomic_int_xor(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \
+ (void) (0 ? *(atomic) ^ (val) : 0); \
+ (guint) __sync_fetch_and_xor ((atomic), (val)); \
+ }))
+
+#define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ (gboolean) __sync_bool_compare_and_swap ((atomic), (oldval), (newval)); \
+ }))
+#define g_atomic_pointer_add(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ (void) (0 ? (val) ^ (val) : 0); \
+ (gssize) __sync_fetch_and_add ((atomic), (val)); \
+ }))
+#define g_atomic_pointer_and(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ (void) (0 ? (val) ^ (val) : 0); \
+ (gsize) __sync_fetch_and_and ((atomic), (val)); \
+ }))
+#define g_atomic_pointer_or(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ (void) (0 ? (val) ^ (val) : 0); \
+ (gsize) __sync_fetch_and_or ((atomic), (val)); \
+ }))
+#define g_atomic_pointer_xor(atomic, val) \
+ (G_GNUC_EXTENSION ({ \
+ G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \
+ (void) (0 ? (gpointer) *(atomic) : 0); \
+ (void) (0 ? (val) ^ (val) : 0); \
+ (gsize) __sync_fetch_and_xor ((atomic), (val)); \
+ }))
+
+#else /* defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
+
+#define g_atomic_int_get(atomic) \
+ (g_atomic_int_get ((gint *) (atomic)))
+#define g_atomic_int_set(atomic, newval) \
+ (g_atomic_int_set ((gint *) (atomic), (gint) (newval)))
+#define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \
+ (g_atomic_int_compare_and_exchange ((gint *) (atomic), (oldval), (newval)))
+#define g_atomic_int_add(atomic, val) \
+ (g_atomic_int_add ((gint *) (atomic), (val)))
+#define g_atomic_int_and(atomic, val) \
+ (g_atomic_int_and ((guint *) (atomic), (val)))
+#define g_atomic_int_or(atomic, val) \
+ (g_atomic_int_or ((guint *) (atomic), (val)))
+#define g_atomic_int_xor(atomic, val) \
+ (g_atomic_int_xor ((guint *) (atomic), (val)))
+#define g_atomic_int_inc(atomic) \
+ (g_atomic_int_inc ((gint *) (atomic)))
+#define g_atomic_int_dec_and_test(atomic) \
+ (g_atomic_int_dec_and_test ((gint *) (atomic)))
+
+#define g_atomic_pointer_get(atomic) \
+ (g_atomic_pointer_get (atomic))
+#define g_atomic_pointer_set(atomic, newval) \
+ (g_atomic_pointer_set ((atomic), (gpointer) (newval)))
+#define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \
+ (g_atomic_pointer_compare_and_exchange ((atomic), (gpointer) (oldval), (gpointer) (newval)))
+#define g_atomic_pointer_add(atomic, val) \
+ (g_atomic_pointer_add ((atomic), (gssize) (val)))
+#define g_atomic_pointer_and(atomic, val) \
+ (g_atomic_pointer_and ((atomic), (gsize) (val)))
+#define g_atomic_pointer_or(atomic, val) \
+ (g_atomic_pointer_or ((atomic), (gsize) (val)))
+#define g_atomic_pointer_xor(atomic, val) \
+ (g_atomic_pointer_xor ((atomic), (gsize) (val)))
+
+#endif /* defined(__GNUC__) && defined(G_ATOMIC_OP_USE_GCC_BUILTINS) */
-G_END_DECLS
-
#endif /* __G_ATOMIC_H__ */