1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * g_atomic_*: atomic operations.
5 * Copyright (C) 2003 Sebastian Wilhelmi
6 * Copyright (C) 2007 Nokia Corporation
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
24 #if defined (G_ATOMIC_ARM)
31 #include "gthreadprivate.h"
34 #if defined (__GNUC__)
35 # if defined (G_ATOMIC_I486)
36 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
39 g_atomic_int_exchange_and_add (volatile gint *atomic,
44 __asm__ __volatile__ ("lock; xaddl %0,%1"
45 : "=r" (result), "=m" (*atomic)
46 : "0" (val), "m" (*atomic));
51 g_atomic_int_add (volatile gint *atomic,
54 __asm__ __volatile__ ("lock; addl %1,%0"
56 : "ir" (val), "m" (*atomic));
60 g_atomic_int_compare_and_exchange (volatile gint *atomic,
66 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
67 : "=a" (result), "=m" (*atomic)
68 : "r" (newval), "m" (*atomic), "0" (oldval));
70 return result == oldval;
73 /* The same code as above, as on i386 gpointer is 32 bit as well.
74 * Duplicating the code here seems more natural than casting the
75 * arguments and calling the former function */
78 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
84 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
85 : "=a" (result), "=m" (*atomic)
86 : "r" (newval), "m" (*atomic), "0" (oldval));
88 return result == oldval;
91 # elif defined (G_ATOMIC_SPARCV9)
92 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
94 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
97 __asm__ __volatile__ ("cas [%4], %2, %0" \
98 : "=r" (__result), "=m" (*(atomic)) \
99 : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
101 __result == oldval; \
104 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
106 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
111 __asm__ __volatile__ ("cas [%4], %2, %0"
112 : "=r" (result), "=m" (*atomic)
113 : "r" (oldval), "m" (*atomic), "r" (atomic),
115 return result == oldval;
117 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
119 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
124 gpointer *a = atomic;
125 __asm__ __volatile__ ("casx [%4], %2, %0"
126 : "=r" (result), "=m" (*a)
127 : "r" (oldval), "m" (*a), "r" (a),
129 return result == oldval;
131 # else /* What's that */
132 # error "Your system has an unsupported pointer size"
133 # endif /* GLIB_SIZEOF_VOID_P */
134 # define G_ATOMIC_MEMORY_BARRIER \
135 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \
136 " | #StoreLoad | #StoreStore" : : : "memory")
138 # elif defined (G_ATOMIC_ALPHA)
139 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
141 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
145 __asm__ __volatile__ ( \
148 " cmpeq %0,%3,%1\n" \
163 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
165 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
171 __asm__ __volatile__ (
189 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
191 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
197 __asm__ __volatile__ (
215 # else /* What's that */
216 # error "Your system has an unsupported pointer size"
217 # endif /* GLIB_SIZEOF_VOID_P */
218 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory")
219 # elif defined (G_ATOMIC_X86_64)
220 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
223 g_atomic_int_exchange_and_add (volatile gint *atomic,
228 __asm__ __volatile__ ("lock; xaddl %0,%1"
229 : "=r" (result), "=m" (*atomic)
230 : "0" (val), "m" (*atomic));
235 g_atomic_int_add (volatile gint *atomic,
238 __asm__ __volatile__ ("lock; addl %1,%0"
240 : "ir" (val), "m" (*atomic));
244 g_atomic_int_compare_and_exchange (volatile gint *atomic,
250 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
251 : "=a" (result), "=m" (*atomic)
252 : "r" (newval), "m" (*atomic), "0" (oldval));
254 return result == oldval;
258 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
264 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
265 : "=a" (result), "=m" (*atomic)
266 : "r" (newval), "m" (*atomic), "0" (oldval));
268 return result == oldval;
271 # elif defined (G_ATOMIC_POWERPC)
272 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
273 * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
274 * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
277 /* Non-optimizing compile bails on the following two asm statements
278 * for reasons unknown to the author */
280 g_atomic_int_exchange_and_add (volatile gint *atomic,
284 __asm__ __volatile__ (".Lieaa%=: lwarx %0,0,%3\n"
288 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
289 : "b" (atomic), "r" (val), "m" (*atomic)
294 /* The same as above, to save a function call repeated here */
296 g_atomic_int_add (volatile gint *atomic,
300 __asm__ __volatile__ (".Lia%=: lwarx %0,0,%3\n"
304 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
305 : "b" (atomic), "r" (val), "m" (*atomic)
308 # else /* !__OPTIMIZE__ */
310 g_atomic_int_exchange_and_add (volatile gint *atomic,
316 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
322 g_atomic_int_add (volatile gint *atomic,
328 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
330 # endif /* !__OPTIMIZE__ */
332 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
334 g_atomic_int_compare_and_exchange (volatile gint *atomic,
339 __asm__ __volatile__ ("sync\n"
340 ".L1icae%=: lwarx %0,0,%1\n"
347 : "b" (atomic), "r" (oldval), "r" (newval)
353 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
358 __asm__ __volatile__ ("sync\n"
359 ".L1pcae%=: lwarx %0,0,%1\n"
366 : "b" (atomic), "r" (oldval), "r" (newval)
370 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
372 g_atomic_int_compare_and_exchange (volatile gint *atomic,
377 __asm__ __volatile__ ("sync\n"
378 ".L1icae%=: lwarx %0,0,%1\n"
386 : "b" (atomic), "r" (oldval), "r" (newval)
392 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
397 __asm__ __volatile__ ("sync\n"
398 ".L1pcae%=: ldarx %0,0,%1\n"
405 : "b" (atomic), "r" (oldval), "r" (newval)
409 # else /* What's that */
410 # error "Your system has an unsupported pointer size"
411 # endif /* GLIB_SIZEOF_VOID_P */
413 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
415 # elif defined (G_ATOMIC_IA64)
416 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
419 g_atomic_int_exchange_and_add (volatile gint *atomic,
422 return __sync_fetch_and_add (atomic, val);
426 g_atomic_int_add (volatile gint *atomic,
429 __sync_fetch_and_add (atomic, val);
433 g_atomic_int_compare_and_exchange (volatile gint *atomic,
437 return __sync_bool_compare_and_swap (atomic, oldval, newval);
441 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
445 return __sync_bool_compare_and_swap ((long *)atomic,
446 (long)oldval, (long)newval);
449 # define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
450 # elif defined (G_ATOMIC_S390)
451 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
453 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
455 gint __result = oldval; \
456 __asm__ __volatile__ ("cs %0, %2, %1" \
457 : "+d" (__result), "=Q" (*(atomic)) \
458 : "d" (newval), "m" (*(atomic)) : "cc" ); \
459 __result == oldval; \
462 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
464 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
468 gpointer result = oldval;
469 __asm__ __volatile__ ("cs %0, %2, %1"
470 : "+d" (result), "=Q" (*(atomic))
471 : "d" (newval), "m" (*(atomic)) : "cc" );
472 return result == oldval;
474 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
476 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
480 gpointer result = oldval;
481 gpointer *a = atomic;
482 __asm__ __volatile__ ("csg %0, %2, %1"
483 : "+d" (result), "=Q" (*a)
484 : "d" ((long)(newval)), "m" (*a) : "cc" );
485 return result == oldval;
487 # else /* What's that */
488 # error "Your system has an unsupported pointer size"
489 # endif /* GLIB_SIZEOF_VOID_P */
490 # elif defined (G_ATOMIC_ARM)
491 static volatile int atomic_spin = 0;
493 static int atomic_spin_trylock (void)
500 : "r,0" (1), "r,r" (&atomic_spin)
508 static void atomic_spin_lock (void)
510 while (atomic_spin_trylock())
514 static void atomic_spin_unlock (void)
520 g_atomic_int_exchange_and_add (volatile gint *atomic,
528 atomic_spin_unlock();
534 g_atomic_int_add (volatile gint *atomic,
539 atomic_spin_unlock();
543 g_atomic_int_compare_and_exchange (volatile gint *atomic,
550 if (*atomic == oldval)
557 atomic_spin_unlock();
563 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
570 if (*atomic == oldval)
577 atomic_spin_unlock();
581 # else /* !G_ATOMIC_ARM */
582 # define DEFINE_WITH_MUTEXES
583 # endif /* G_ATOMIC_IA64 */
584 #else /* !__GNUC__ */
585 # ifdef G_PLATFORM_WIN32
586 # define DEFINE_WITH_WIN32_INTERLOCKED
588 # define DEFINE_WITH_MUTEXES
590 #endif /* __GNUC__ */
592 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
593 # include <windows.h>
594 /* Following indicates that InterlockedCompareExchangePointer is
595 * declared in winbase.h (included by windows.h) and needs to be
596 * commented out if not true. It is defined iff WINVER > 0x0400,
597 * which is usually correct but can be wrong if WINVER is set before
598 * windows.h is included.
601 # define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
605 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
608 return InterlockedExchangeAdd (atomic, val);
612 g_atomic_int_add (volatile gint32 *atomic,
615 InterlockedExchangeAdd (atomic, val);
619 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
623 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
624 return (guint32) InterlockedCompareExchange ((PVOID*)atomic,
626 (PVOID)oldval) == oldval;
628 return InterlockedCompareExchange (atomic,
635 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
639 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
640 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
642 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
643 # error "InterlockedCompareExchangePointer needed"
645 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
649 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
651 #ifdef DEFINE_WITH_MUTEXES
652 /* We have to use the slow, but safe locking method */
653 static GMutex *g_atomic_mutex;
656 g_atomic_int_exchange_and_add (volatile gint *atomic,
661 g_mutex_lock (g_atomic_mutex);
664 g_mutex_unlock (g_atomic_mutex);
671 g_atomic_int_add (volatile gint *atomic,
674 g_mutex_lock (g_atomic_mutex);
676 g_mutex_unlock (g_atomic_mutex);
680 g_atomic_int_compare_and_exchange (volatile gint *atomic,
686 g_mutex_lock (g_atomic_mutex);
687 if (*atomic == oldval)
694 g_mutex_unlock (g_atomic_mutex);
700 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
706 g_mutex_lock (g_atomic_mutex);
707 if (*atomic == oldval)
714 g_mutex_unlock (g_atomic_mutex);
719 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
721 g_atomic_int_get (volatile gint *atomic)
725 g_mutex_lock (g_atomic_mutex);
727 g_mutex_unlock (g_atomic_mutex);
733 g_atomic_int_set (volatile gint *atomic,
736 g_mutex_lock (g_atomic_mutex);
738 g_mutex_unlock (g_atomic_mutex);
742 g_atomic_pointer_get (volatile gpointer *atomic)
746 g_mutex_lock (g_atomic_mutex);
748 g_mutex_unlock (g_atomic_mutex);
754 g_atomic_pointer_set (volatile gpointer *atomic,
757 g_mutex_lock (g_atomic_mutex);
759 g_mutex_unlock (g_atomic_mutex);
761 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
762 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
764 g_atomic_int_get (volatile gint *atomic)
766 G_ATOMIC_MEMORY_BARRIER;
771 g_atomic_int_set (volatile gint *atomic,
775 G_ATOMIC_MEMORY_BARRIER;
779 g_atomic_pointer_get (volatile gpointer *atomic)
781 G_ATOMIC_MEMORY_BARRIER;
786 g_atomic_pointer_set (volatile gpointer *atomic,
790 G_ATOMIC_MEMORY_BARRIER;
792 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
794 #ifdef ATOMIC_INT_CMP_XCHG
796 g_atomic_int_compare_and_exchange (volatile gint *atomic,
800 return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
804 g_atomic_int_exchange_and_add (volatile gint *atomic,
810 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
816 g_atomic_int_add (volatile gint *atomic,
822 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
824 #endif /* ATOMIC_INT_CMP_XCHG */
827 _g_atomic_thread_init (void)
829 #ifdef DEFINE_WITH_MUTEXES
830 g_atomic_mutex = g_mutex_new ();
831 #endif /* DEFINE_WITH_MUTEXES */
834 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
836 (g_atomic_int_get) (volatile gint *atomic)
838 return g_atomic_int_get (atomic);
842 (g_atomic_int_set) (volatile gint *atomic,
845 g_atomic_int_set (atomic, newval);
849 (g_atomic_pointer_get) (volatile gpointer *atomic)
851 return g_atomic_pointer_get (atomic);
855 (g_atomic_pointer_set) (volatile gpointer *atomic,
858 g_atomic_pointer_set (atomic, newval);
860 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
862 #define __G_ATOMIC_C__
863 #include "galiasdef.c"