X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=glib%2Fgatomic.c;h=30a5c5d51f22af55eeb107a2d7ebd15eecb24695;hb=2a53b4d0e2c98a14aedf31e38f0ad1fb2e8fe26f;hp=afc532914cdf92dccc1a850571eba2e85bca73d8;hpb=3e847a090cfd8495add631d43388c461b1a85716;p=platform%2Fupstream%2Fglib.git diff --git a/glib/gatomic.c b/glib/gatomic.c index afc5329..30a5c5d 100644 --- a/glib/gatomic.c +++ b/glib/gatomic.c @@ -1,705 +1,917 @@ -/* GLIB - Library of useful routines for C programming - * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald +/* + * Copyright © 2011 Ryan Lortie * - * g_atomic_*: atomic operations. - * Copyright (C) 2003 Sebastian Wilhelmi + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2 of the + * licence, or (at your option) any later version. * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * License along with this library; if not, see . + * + * Author: Ryan Lortie */ - + #include "config.h" -#include "glib.h" -#include "gthreadinit.h" -#include "galias.h" +#include "gatomic.h" + +/** + * SECTION:atomic_operations + * @title: Atomic Operations + * @short_description: basic atomic integer and pointer operations + * @see_also: #GMutex + * + * The following is a collection of compiler macros to provide atomic + * access to integer and pointer-sized values. + * + * The macros that have 'int' in the name will operate on pointers to + * #gint and #guint. The macros with 'pointer' in the name will operate + * on pointers to any pointer-sized value, including #gsize. There is + * no support for 64bit operations on platforms with 32bit pointers + * because it is not generally possible to perform these operations + * atomically. + * + * The get, set and exchange operations for integers and pointers + * nominally operate on #gint and #gpointer, respectively. Of the + * arithmetic operations, the 'add' operation operates on (and returns) + * signed integer values (#gint and #gssize) and the 'and', 'or', and + * 'xor' operations operate on (and return) unsigned integer values + * (#guint and #gsize). + * + * All of the operations act as a full compiler and (where appropriate) + * hardware memory barrier. Acquire and release or producer and + * consumer barrier semantics are not available through this API. + * + * It is very important that all accesses to a particular integer or + * pointer be performed using only this API and that different sizes of + * operation are not mixed or used on overlapping memory regions. Never + * read or assign directly from or to a value -- always use this API. + * + * For simple reference counting purposes you should use + * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that + * fall outside of simple reference counting patterns are prone to + * subtle bugs and occasionally undefined behaviour. It is also worth + * noting that since all of these operations require global + * synchronisation of the entire machine, they can be quite slow. In * the case of performing multiple atomic operations it can often be + * faster to simply acquire a mutex lock around the critical area, + * perform the operations normally and then release the lock. + **/ + +/** + * G_ATOMIC_LOCK_FREE: + * + * This macro is defined if the atomic operations of GLib are + * implemented using real hardware atomic operations. This means that + * the GLib atomic API can be used between processes and safely mixed + * with other (hardware) atomic APIs. + * + * If this macro is not defined, the atomic operations may be + * emulated using a mutex. In that case, the GLib atomic operations are + * only atomic relative to themselves and within a single process. + **/ + +/* NOTE CAREFULLY: + * + * This file is the lowest-level part of GLib. + * + * Other lowlevel parts of GLib (threads, slice allocator, g_malloc, + * messages, etc) call into these functions and macros to get work done. + * + * As such, these functions can not call back into any part of GLib + * without risking recursion. + */ + +#ifdef G_ATOMIC_LOCK_FREE -#if defined (__GNUC__) -# if defined (G_ATOMIC_I486) -/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h +/* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST + * implement the atomic operations in a lock-free manner. */ + +#if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) +/** + * g_atomic_int_get: + * @atomic: a pointer to a #gint or #guint + * + * Gets the current value of @atomic. + * + * This call acts as a full compiler and hardware + * memory barrier (before the get). + * + * Returns: the value of the integer + * + * Since: 2.4 + **/ gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +(g_atomic_int_get) (const volatile gint *atomic) { - gint result; + return g_atomic_int_get (atomic); +} - __asm__ __volatile__ ("lock; xaddl %0,%1" - : "=r" (result), "=m" (*atomic) - : "0" (val), "m" (*atomic)); - return result; +/** + * g_atomic_int_set: + * @atomic: a pointer to a #gint or #guint + * @newval: a new value to store + * + * Sets the value of @atomic to @newval. + * + * This call acts as a full compiler and hardware + * memory barrier (after the set). + * + * Since: 2.4 + */ +void +(g_atomic_int_set) (volatile gint *atomic, + gint newval) +{ + g_atomic_int_set (atomic, newval); } - + +/** + * g_atomic_int_inc: + * @atomic: a pointer to a #gint or #guint + * + * Increments the value of @atomic by 1. + * + * Think of this operation as an atomic version of `{ *atomic += 1; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Since: 2.4 + **/ void -g_atomic_int_add (volatile gint *atomic, - gint val) +(g_atomic_int_inc) (volatile gint *atomic) { - __asm__ __volatile__ ("lock; addl %1,%0" - : "=m" (*atomic) - : "ir" (val), "m" (*atomic)); + g_atomic_int_inc (atomic); } +/** + * g_atomic_int_dec_and_test: + * @atomic: a pointer to a #gint or #guint + * + * Decrements the value of @atomic by 1. + * + * Think of this operation as an atomic version of + * `{ *atomic -= 1; return (*atomic == 0); }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: %TRUE if the resultant value is zero + * + * Since: 2.4 + **/ gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) +(g_atomic_int_dec_and_test) (volatile gint *atomic) { - gint result; - - __asm__ __volatile__ ("lock; cmpxchgl %2, %1" - : "=a" (result), "=m" (*atomic) - : "r" (newval), "m" (*atomic), "0" (oldval)); + return g_atomic_int_dec_and_test (atomic); +} - return result == oldval; +/** + * g_atomic_int_compare_and_exchange: + * @atomic: a pointer to a #gint or #guint + * @oldval: the value to compare with + * @newval: the value to conditionally replace with + * + * Compares @atomic to @oldval and, if equal, sets it to @newval. + * If @atomic was not equal to @oldval then no change occurs. + * + * This compare and exchange is done atomically. + * + * Think of this operation as an atomic version of + * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: %TRUE if the exchange took place + * + * Since: 2.4 + **/ +gboolean +(g_atomic_int_compare_and_exchange) (volatile gint *atomic, + gint oldval, + gint newval) +{ + return g_atomic_int_compare_and_exchange (atomic, oldval, newval); } -/* The same code as above, as on i386 gpointer is 32 bit as well. - * Duplicating the code here seems more natural than casting the - * arguments and calling the former function */ +/** + * g_atomic_int_add: + * @atomic: a pointer to a #gint or #guint + * @val: the value to add + * + * Atomically adds @val to the value of @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic += val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Before version 2.30, this function did not return a value + * (but g_atomic_int_exchange_and_add() did, and had the same meaning). + * + * Returns: the value of @atomic before the add, signed + * + * Since: 2.4 + **/ +gint +(g_atomic_int_add) (volatile gint *atomic, + gint val) +{ + return g_atomic_int_add (atomic, val); +} -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) +/** + * g_atomic_int_and: + * @atomic: a pointer to a #gint or #guint + * @val: the value to 'and' + * + * Performs an atomic bitwise 'and' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic &= val; return tmp; }`. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +guint +(g_atomic_int_and) (volatile guint *atomic, + guint val) { - gpointer result; - - __asm__ __volatile__ ("lock; cmpxchgl %2, %1" - : "=a" (result), "=m" (*atomic) - : "r" (newval), "m" (*atomic), "0" (oldval)); + return g_atomic_int_and (atomic, val); +} - return result == oldval; +/** + * g_atomic_int_or: + * @atomic: a pointer to a #gint or #guint + * @val: the value to 'or' + * + * Performs an atomic bitwise 'or' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic |= val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +guint +(g_atomic_int_or) (volatile guint *atomic, + guint val) +{ + return g_atomic_int_or (atomic, val); } -# elif defined (G_ATOMIC_SPARCV9) -/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h - */ -# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \ - ({ \ - gint __result; \ - __asm__ __volatile__ ("cas [%4], %2, %0" \ - : "=r" (__result), "=m" (*(atomic)) \ - : "r" (oldval), "m" (*(atomic)), "r" (atomic),\ - "0" (newval)); \ - __result == oldval; \ - }) - -# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */ -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gpointer result; - __asm__ __volatile__ ("cas [%4], %2, %0" - : "=r" (result), "=m" (*atomic) - : "r" (oldval), "m" (*atomic), "r" (atomic), - "0" (newval)); - return result == oldval; -} -# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */ -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gpointer result; - gpointer *a = atomic; - __asm__ __volatile__ ("casx [%4], %2, %0" - : "=r" (result), "=m" (*a) - : "r" (oldval), "m" (*a), "r" (a), - "0" (newval)); - return result == oldval; -} -# else /* What's that */ -# error "Your system has an unsupported pointer size" -# endif /* GLIB_SIZEOF_VOID_P */ -# define G_ATOMIC_MEMORY_BARRIER \ - __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \ - " | #StoreLoad | #StoreStore" : : : "memory") - -# elif defined (G_ATOMIC_ALPHA) -/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h - */ -# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \ - ({ \ - gint __result; \ - gint __prev; \ - __asm__ __volatile__ ( \ - " mb\n" \ - "1: ldl_l %0,%2\n" \ - " cmpeq %0,%3,%1\n" \ - " beq %1,2f\n" \ - " mov %4,%1\n" \ - " stl_c %1,%2\n" \ - " beq %1,1b\n" \ - " mb\n" \ - "2:" \ - : "=&r" (__prev), \ - "=&r" (__result) \ - : "m" (*(atomic)), \ - "Ir" (oldval), \ - "Ir" (newval) \ - : "memory"); \ - __result != 0; \ - }) -# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */ -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gint result; - gpointer prev; - __asm__ __volatile__ ( - " mb\n" - "1: ldl_l %0,%2\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,1b\n" - " mb\n" - "2:" - : "=&r" (prev), - "=&r" (result) - : "m" (*atomic), - "Ir" (oldval), - "Ir" (newval) - : "memory"); - return result != 0; -} -# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */ -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gint result; - gpointer prev; - __asm__ __volatile__ ( - " mb\n" - "1: ldq_l %0,%2\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,1b\n" - " mb\n" - "2:" - : "=&r" (prev), - "=&r" (result) - : "m" (*atomic), - "Ir" (oldval), - "Ir" (newval) - : "memory"); - return result != 0; -} -# else /* What's that */ -# error "Your system has an unsupported pointer size" -# endif /* GLIB_SIZEOF_VOID_P */ -# define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory") -# elif defined (G_ATOMIC_X86_64) -/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h - */ -gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +/** + * g_atomic_int_xor: + * @atomic: a pointer to a #gint or #guint + * @val: the value to 'xor' + * + * Performs an atomic bitwise 'xor' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +guint +(g_atomic_int_xor) (volatile guint *atomic, + guint val) { - gint result; + return g_atomic_int_xor (atomic, val); +} + - __asm__ __volatile__ ("lock; xaddl %0,%1" - : "=r" (result), "=m" (*atomic) - : "0" (val), "m" (*atomic)); - return result; +/** + * g_atomic_pointer_get: + * @atomic: a pointer to a #gpointer-sized value + * + * Gets the current value of @atomic. + * + * This call acts as a full compiler and hardware + * memory barrier (before the get). + * + * Returns: the value of the pointer + * + * Since: 2.4 + **/ +gpointer +(g_atomic_pointer_get) (const volatile void *atomic) +{ + return g_atomic_pointer_get ((const volatile gpointer *) atomic); } - + +/** + * g_atomic_pointer_set: + * @atomic: a pointer to a #gpointer-sized value + * @newval: a new value to store + * + * Sets the value of @atomic to @newval. + * + * This call acts as a full compiler and hardware + * memory barrier (after the set). + * + * Since: 2.4 + **/ void -g_atomic_int_add (volatile gint *atomic, - gint val) +(g_atomic_pointer_set) (volatile void *atomic, + gpointer newval) { - __asm__ __volatile__ ("lock; addl %1,%0" - : "=m" (*atomic) - : "ir" (val), "m" (*atomic)); + g_atomic_pointer_set ((volatile gpointer *) atomic, newval); } +/** + * g_atomic_pointer_compare_and_exchange: + * @atomic: a pointer to a #gpointer-sized value + * @oldval: the value to compare with + * @newval: the value to conditionally replace with + * + * Compares @atomic to @oldval and, if equal, sets it to @newval. + * If @atomic was not equal to @oldval then no change occurs. + * + * This compare and exchange is done atomically. + * + * Think of this operation as an atomic version of + * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: %TRUE if the exchange took place + * + * Since: 2.4 + **/ gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) +(g_atomic_pointer_compare_and_exchange) (volatile void *atomic, + gpointer oldval, + gpointer newval) { - gint result; - - __asm__ __volatile__ ("lock; cmpxchgl %2, %1" - : "=a" (result), "=m" (*atomic) - : "r" (newval), "m" (*atomic), "0" (oldval)); + return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic, + oldval, newval); +} - return result == oldval; +/** + * g_atomic_pointer_add: + * @atomic: a pointer to a #gpointer-sized value + * @val: the value to add + * + * Atomically adds @val to the value of @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic += val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the add, signed + * + * Since: 2.30 + **/ +gssize +(g_atomic_pointer_add) (volatile void *atomic, + gssize val) +{ + return g_atomic_pointer_add ((volatile gpointer *) atomic, val); } -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) +/** + * g_atomic_pointer_and: + * @atomic: a pointer to a #gpointer-sized value + * @val: the value to 'and' + * + * Performs an atomic bitwise 'and' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic &= val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +gsize +(g_atomic_pointer_and) (volatile void *atomic, + gsize val) +{ + return g_atomic_pointer_and ((volatile gpointer *) atomic, val); +} + +/** + * g_atomic_pointer_or: + * @atomic: a pointer to a #gpointer-sized value + * @val: the value to 'or' + * + * Performs an atomic bitwise 'or' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic |= val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +gsize +(g_atomic_pointer_or) (volatile void *atomic, + gsize val) +{ + return g_atomic_pointer_or ((volatile gpointer *) atomic, val); +} + +/** + * g_atomic_pointer_xor: + * @atomic: a pointer to a #gpointer-sized value + * @val: the value to 'xor' + * + * Performs an atomic bitwise 'xor' of the value of @atomic and @val, + * storing the result back in @atomic. + * + * Think of this operation as an atomic version of + * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Returns: the value of @atomic before the operation, unsigned + * + * Since: 2.30 + **/ +gsize +(g_atomic_pointer_xor) (volatile void *atomic, + gsize val) { - gpointer result; - - __asm__ __volatile__ ("lock; cmpxchgq %q2, %1" - : "=a" (result), "=m" (*atomic) - : "r" (newval), "m" (*atomic), "0" (oldval)); + return g_atomic_pointer_xor ((volatile gpointer *) atomic, val); +} + +#elif defined (G_PLATFORM_WIN32) + +#include +#if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200) +#define InterlockedAnd _InterlockedAnd +#define InterlockedOr _InterlockedOr +#define InterlockedXor _InterlockedXor +#endif + +#if !defined (_MSC_VER) || _MSC_VER <= 1200 +#include "gmessages.h" +/* Inlined versions for older compiler */ +static LONG +_gInterlockedAnd (volatile guint *atomic, + guint val) +{ + LONG i, j; + + j = *atomic; + do { + i = j; + j = InterlockedCompareExchange(atomic, i & val, i); + } while (i != j); - return result == oldval; + return j; } +#define InterlockedAnd(a,b) _gInterlockedAnd(a,b) +static LONG +_gInterlockedOr (volatile guint *atomic, + guint val) +{ + LONG i, j; + + j = *atomic; + do { + i = j; + j = InterlockedCompareExchange(atomic, i | val, i); + } while (i != j); -# elif defined (G_ATOMIC_POWERPC) -/* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h - * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h - * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h + return j; +} +#define InterlockedOr(a,b) _gInterlockedOr(a,b) +static LONG +_gInterlockedXor (volatile guint *atomic, + guint val) +{ + LONG i, j; + + j = *atomic; + do { + i = j; + j = InterlockedCompareExchange(atomic, i ^ val, i); + } while (i != j); + + return j; +} +#define InterlockedXor(a,b) _gInterlockedXor(a,b) +#endif + +/* + * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx */ -# ifdef __OPTIMIZE__ -/* Non-optimizing compile bails on the following two asm statements - * for reasons unknown to the author */ gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) -{ - gint result, temp; - __asm__ __volatile__ ("1: lwarx %0,0,%3\n" - " add %1,%0,%4\n" - " stwcx. %1,0,%3\n" - " bne- 1b" - : "=&b" (result), "=&r" (temp), "=m" (*atomic) - : "b" (atomic), "r" (val), "m" (*atomic) - : "cr0", "memory"); - return result; -} - -/* The same as above, to save a function call repeated here */ -void -g_atomic_int_add (volatile gint *atomic, - gint val) -{ - gint result, temp; - __asm__ __volatile__ ("1: lwarx %0,0,%3\n" - " add %1,%0,%4\n" - " stwcx. %1,0,%3\n" - " bne- 1b" - : "=&b" (result), "=&r" (temp), "=m" (*atomic) - : "b" (atomic), "r" (val), "m" (*atomic) - : "cr0", "memory"); -} -# else /* !__OPTIMIZE__ */ -gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +(g_atomic_int_get) (const volatile gint *atomic) { - gint result; - do - result = *atomic; - while (!g_atomic_int_compare_and_exchange (atomic, result, result + val)); + MemoryBarrier (); + return *atomic; +} - return result; +void +(g_atomic_int_set) (volatile gint *atomic, + gint newval) +{ + *atomic = newval; + MemoryBarrier (); } - + void -g_atomic_int_add (volatile gint *atomic, - gint val) +(g_atomic_int_inc) (volatile gint *atomic) { - gint result; - do - result = *atomic; - while (!g_atomic_int_compare_and_exchange (atomic, result, result + val)); + InterlockedIncrement (atomic); } -# endif /* !__OPTIMIZE__ */ -# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */ gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) -{ - gint result; - __asm__ __volatile__ ("sync\n" - "1: lwarx %0,0,%1\n" - " subf. %0,%2,%0\n" - " bne 2f\n" - " stwcx. %3,0,%1\n" - " bne- 1b\n" - "2: isync" - : "=&r" (result) - : "b" (atomic), "r" (oldval), "r" (newval) - : "cr0", "memory"); - return result == 0; +(g_atomic_int_dec_and_test) (volatile gint *atomic) +{ + return InterlockedDecrement (atomic) == 0; } gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gpointer result; - __asm__ __volatile__ ("sync\n" - "1: lwarx %0,0,%1\n" - " subf. %0,%2,%0\n" - " bne 2f\n" - " stwcx. %3,0,%1\n" - " bne- 1b\n" - "2: isync" - : "=&r" (result) - : "b" (atomic), "r" (oldval), "r" (newval) - : "cr0", "memory"); - return result == 0; -} -# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */ -gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) -{ - gpointer result; - __asm__ __volatile__ ("sync\n" - "1: lwarx %0,0,%1\n" - " extsw %0,%0\n" - " subf. %0,%2,%0\n" - " bne 2f\n" - " stwcx. %3,0,%1\n" - " bne- 1b\n" - "2: isync" - : "=&r" (result) - : "b" (atomic), "r" (oldval), "r" (newval) - : "cr0", "memory"); - return result == 0; +(g_atomic_int_compare_and_exchange) (volatile gint *atomic, + gint oldval, + gint newval) +{ + return InterlockedCompareExchange (atomic, newval, oldval) == oldval; } -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gpointer result; - __asm__ __volatile__ ("sync\n" - "1: ldarx %0,0,%1\n" - " subf. %0,%2,%0\n" - " bne 2f\n" - " stdcx. %3,0,%1\n" - " bne- 1b\n" - "2: isync" - : "=&r" (result) - : "b" (atomic), "r" (oldval), "r" (newval) - : "cr0", "memory"); - return result == 0; -} -# else /* What's that */ -# error "Your system has an unsupported pointer size" -# endif /* GLIB_SIZEOF_VOID_P */ - -# define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory") - -# elif defined (G_ATOMIC_IA64) -/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h - */ gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +(g_atomic_int_add) (volatile gint *atomic, + gint val) { - return __sync_fetch_and_add_si (atomic, val); + return InterlockedExchangeAdd (atomic, val); } - -void -g_atomic_int_add (gint *atomic, - gint val) + +guint +(g_atomic_int_and) (volatile guint *atomic, + guint val) { - __sync_fetch_and_add_si (atomic, val); + return InterlockedAnd (atomic, val); } -gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) +guint +(g_atomic_int_or) (volatile guint *atomic, + guint val) { - return __sync_bool_compare_and_swap_si (atomic, oldval, newval); + return InterlockedOr (atomic, val); } -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) +guint +(g_atomic_int_xor) (volatile guint *atomic, + guint val) { - return __sync_bool_compare_and_swap_di ((long *)atomic, - (long)oldval, (long)newval); + return InterlockedXor (atomic, val); } -# define G_ATOMIC_MEMORY_BARRIER __sync_synchronize () -# elif defined (G_ATOMIC_S390) -/* Adapted from glibc's sysdeps/s390/bits/atomic.h - */ -# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \ - ({ \ - gint __result = oldval; \ - __asm__ __volatile__ ("cs %0, %2, %1" \ - : "+d" (__result), "=Q" (*(atomic)) \ - : "d" (newval), "m" (*(atomic)) : "cc" ); \ - __result == oldval; \ - }) - -# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */ -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) + +gpointer +(g_atomic_pointer_get) (const volatile void *atomic) +{ + const volatile gpointer *ptr = atomic; + + MemoryBarrier (); + return *ptr; +} + +void +(g_atomic_pointer_set) (volatile void *atomic, + gpointer newval) { - gpointer result = oldval; - __asm__ __volatile__ ("cs %0, %2, %1" - : "+d" (result), "=Q" (*(atomic)) - : "d" (newval), "m" (*(atomic)) : "cc" ); - return result == oldval; + volatile gpointer *ptr = atomic; + + *ptr = newval; + MemoryBarrier (); } -# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */ + gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gpointer result = oldval; - gpointer *a = atomic; - __asm__ __volatile__ ("csg %0, %2, %1" - : "+d" (result), "=Q" (*a) - : "d" ((long)(newval)), "m" (*a) : "cc" ); - return result == oldval; -} -# else /* What's that */ -# error "Your system has an unsupported pointer size" -# endif /* GLIB_SIZEOF_VOID_P */ -# else /* !G_ATOMIC_IA64 */ -# define DEFINE_WITH_MUTEXES -# endif /* G_ATOMIC_IA64 */ -#else /* !__GNUC__ */ -# ifdef G_PLATFORM_WIN32 -# define DEFINE_WITH_WIN32_INTERLOCKED -# else -# define DEFINE_WITH_MUTEXES -# endif -#endif /* __GNUC__ */ - -#ifdef DEFINE_WITH_WIN32_INTERLOCKED -# include -gint32 -g_atomic_int_exchange_and_add (volatile gint32 *atomic, - gint32 val) +(g_atomic_pointer_compare_and_exchange) (volatile void *atomic, + gpointer oldval, + gpointer newval) +{ + return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval; +} + +gssize +(g_atomic_pointer_add) (volatile void *atomic, + gssize val) { +#if GLIB_SIZEOF_VOID_P == 8 + return InterlockedExchangeAdd64 (atomic, val); +#else return InterlockedExchangeAdd (atomic, val); +#endif } -void -g_atomic_int_add (volatile gint32 *atomic, - gint32 val) +gsize +(g_atomic_pointer_and) (volatile void *atomic, + gsize val) { - InterlockedExchangeAdd (atomic, val); +#if GLIB_SIZEOF_VOID_P == 8 + return InterlockedAnd64 (atomic, val); +#else + return InterlockedAnd (atomic, val); +#endif } -gboolean -g_atomic_int_compare_and_exchange (volatile gint32 *atomic, - gint32 oldval, - gint32 newval) +gsize +(g_atomic_pointer_or) (volatile void *atomic, + gsize val) { - return (guint32) InterlockedCompareExchange ((PVOID*)atomic, - (PVOID)newval, - (PVOID)oldval) == oldval; +#if GLIB_SIZEOF_VOID_P == 8 + return InterlockedOr64 (atomic, val); +#else + return InterlockedOr (atomic, val); +#endif } -gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) +gsize +(g_atomic_pointer_xor) (volatile void *atomic, + gsize val) { -# if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */ -# error "InterlockedCompareExchangePointer needed" -# else - return InterlockedCompareExchange (atomic, newval, oldval) == oldval; -# endif +#if GLIB_SIZEOF_VOID_P == 8 + return InterlockedXor64 (atomic, val); +#else + return InterlockedXor (atomic, val); +#endif } -#endif /* DEFINE_WITH_WIN32_INTERLOCKED */ +#else + +/* This error occurs when ./configure decided that we should be capable + * of lock-free atomics but we find at compile-time that we are not. + */ +#error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics. + +#endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */ + +#else /* G_ATOMIC_LOCK_FREE */ + +/* We are not permitted to call into any GLib functions from here, so we + * can not use GMutex. + * + * Fortunately, we already take care of the Windows case above, and all + * non-Windows platforms on which glib runs have pthreads. Use those. + */ +#include -#ifdef DEFINE_WITH_MUTEXES -/* We have to use the slow, but safe locking method */ -static GMutex *g_atomic_mutex; +static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER; gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +(g_atomic_int_get) (const volatile gint *atomic) { - gint result; - - g_mutex_lock (g_atomic_mutex); - result = *atomic; - *atomic += val; - g_mutex_unlock (g_atomic_mutex); + gint value; - return result; + pthread_mutex_lock (&g_atomic_lock); + value = *atomic; + pthread_mutex_unlock (&g_atomic_lock); + + return value; } +void +(g_atomic_int_set) (volatile gint *atomic, + gint value) +{ + pthread_mutex_lock (&g_atomic_lock); + *atomic = value; + pthread_mutex_unlock (&g_atomic_lock); +} void -g_atomic_int_add (volatile gint *atomic, - gint val) +(g_atomic_int_inc) (volatile gint *atomic) { - g_mutex_lock (g_atomic_mutex); - *atomic += val; - g_mutex_unlock (g_atomic_mutex); + pthread_mutex_lock (&g_atomic_lock); + (*atomic)++; + pthread_mutex_unlock (&g_atomic_lock); } gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) +(g_atomic_int_dec_and_test) (volatile gint *atomic) { - gboolean result; - - g_mutex_lock (g_atomic_mutex); - if (*atomic == oldval) - { - result = TRUE; - *atomic = newval; - } - else - result = FALSE; - g_mutex_unlock (g_atomic_mutex); + gboolean is_zero; - return result; + pthread_mutex_lock (&g_atomic_lock); + is_zero = --(*atomic) == 0; + pthread_mutex_unlock (&g_atomic_lock); + + return is_zero; } gboolean -g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, - gpointer oldval, - gpointer newval) -{ - gboolean result; - - g_mutex_lock (g_atomic_mutex); - if (*atomic == oldval) - { - result = TRUE; - *atomic = newval; - } - else - result = FALSE; - g_mutex_unlock (g_atomic_mutex); - - return result; -} - -#ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED +(g_atomic_int_compare_and_exchange) (volatile gint *atomic, + gint oldval, + gint newval) +{ + gboolean success; + + pthread_mutex_lock (&g_atomic_lock); + + if ((success = (*atomic == oldval))) + *atomic = newval; + + pthread_mutex_unlock (&g_atomic_lock); + + return success; +} + gint -g_atomic_int_get (volatile gint *atomic) +(g_atomic_int_add) (volatile gint *atomic, + gint val) { - gint result; + gint oldval; - g_mutex_lock (g_atomic_mutex); - result = *atomic; - g_mutex_unlock (g_atomic_mutex); + pthread_mutex_lock (&g_atomic_lock); + oldval = *atomic; + *atomic = oldval + val; + pthread_mutex_unlock (&g_atomic_lock); - return result; + return oldval; } -gpointer -g_atomic_pointer_get (volatile gpointer *atomic) +guint +(g_atomic_int_and) (volatile guint *atomic, + guint val) { - gpointer result; + guint oldval; - g_mutex_lock (g_atomic_mutex); - result = *atomic; - g_mutex_unlock (g_atomic_mutex); + pthread_mutex_lock (&g_atomic_lock); + oldval = *atomic; + *atomic = oldval & val; + pthread_mutex_unlock (&g_atomic_lock); - return result; + return oldval; } -#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */ -#elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED) -gint -g_atomic_int_get (volatile gint *atomic) + +guint +(g_atomic_int_or) (volatile guint *atomic, + guint val) { - gint result = *atomic; + guint oldval; - G_ATOMIC_MEMORY_BARRIER; + pthread_mutex_lock (&g_atomic_lock); + oldval = *atomic; + *atomic = oldval | val; + pthread_mutex_unlock (&g_atomic_lock); - return result; + return oldval; } +guint +(g_atomic_int_xor) (volatile guint *atomic, + guint val) +{ + guint oldval; + + pthread_mutex_lock (&g_atomic_lock); + oldval = *atomic; + *atomic = oldval ^ val; + pthread_mutex_unlock (&g_atomic_lock); + + return oldval; +} + + gpointer -g_atomic_pointer_get (volatile gpointer *atomic) +(g_atomic_pointer_get) (const volatile void *atomic) { - gpointer result = *atomic; + const volatile gpointer *ptr = atomic; + gpointer value; - G_ATOMIC_MEMORY_BARRIER; + pthread_mutex_lock (&g_atomic_lock); + value = *ptr; + pthread_mutex_unlock (&g_atomic_lock); - return result; -} -#endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */ + return value; +} -#ifdef ATOMIC_INT_CMP_XCHG -gboolean -g_atomic_int_compare_and_exchange (volatile gint *atomic, - gint oldval, - gint newval) +void +(g_atomic_pointer_set) (volatile void *atomic, + gpointer newval) { - return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval); + volatile gpointer *ptr = atomic; + + pthread_mutex_lock (&g_atomic_lock); + *ptr = newval; + pthread_mutex_unlock (&g_atomic_lock); } -gint -g_atomic_int_exchange_and_add (volatile gint *atomic, - gint val) +gboolean +(g_atomic_pointer_compare_and_exchange) (volatile void *atomic, + gpointer oldval, + gpointer newval) { - gint result; - do - result = *atomic; - while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val)); + volatile gpointer *ptr = atomic; + gboolean success; + + pthread_mutex_lock (&g_atomic_lock); + + if ((success = (*ptr == oldval))) + *ptr = newval; - return result; + pthread_mutex_unlock (&g_atomic_lock); + + return success; } - -void -g_atomic_int_add (volatile gint *atomic, - gint val) + +gssize +(g_atomic_pointer_add) (volatile void *atomic, + gssize val) { - gint result; - do - result = *atomic; - while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val)); + volatile gssize *ptr = atomic; + gssize oldval; + + pthread_mutex_lock (&g_atomic_lock); + oldval = *ptr; + *ptr = oldval + val; + pthread_mutex_unlock (&g_atomic_lock); + + return oldval; } -#endif /* ATOMIC_INT_CMP_XCHG */ -void -_g_atomic_thread_init (void) +gsize +(g_atomic_pointer_and) (volatile void *atomic, + gsize val) { -#ifdef DEFINE_WITH_MUTEXES - g_atomic_mutex = g_mutex_new (); -#endif /* DEFINE_WITH_MUTEXES */ + volatile gsize *ptr = atomic; + gsize oldval; + + pthread_mutex_lock (&g_atomic_lock); + oldval = *ptr; + *ptr = oldval & val; + pthread_mutex_unlock (&g_atomic_lock); + + return oldval; } -#ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED -gint -(g_atomic_int_get) (volatile gint *atomic) +gsize +(g_atomic_pointer_or) (volatile void *atomic, + gsize val) { - return g_atomic_int_get (atomic); + volatile gsize *ptr = atomic; + gsize oldval; + + pthread_mutex_lock (&g_atomic_lock); + oldval = *ptr; + *ptr = oldval | val; + pthread_mutex_unlock (&g_atomic_lock); + + return oldval; } -gpointer -(g_atomic_pointer_get) (volatile gpointer *atomic) +gsize +(g_atomic_pointer_xor) (volatile void *atomic, + gsize val) { - return g_atomic_pointer_get (atomic); + volatile gsize *ptr = atomic; + gsize oldval; + + pthread_mutex_lock (&g_atomic_lock); + oldval = *ptr; + *ptr = oldval ^ val; + pthread_mutex_unlock (&g_atomic_lock); + + return oldval; } -#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */ -#define __G_ATOMIC_C__ -#include "galiasdef.c" +#endif + +/** + * g_atomic_int_exchange_and_add: + * @atomic: a pointer to a #gint + * @val: the value to add + * + * This function existed before g_atomic_int_add() returned the prior + * value of the integer (which it now does). It is retained only for + * compatibility reasons. Don't use this function in new code. + * + * Returns: the value of @atomic before the add, signed + * Since: 2.4 + * Deprecated: 2.30: Use g_atomic_int_add() instead. + **/ +gint +g_atomic_int_exchange_and_add (volatile gint *atomic, + gint val) +{ + return (g_atomic_int_add) (atomic, val); +}