1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * g_atomic_*: atomic operations.
5 * Copyright (C) 2003 Sebastian Wilhelmi
6 * Copyright (C) 2007 Nokia Corporation
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
26 #if defined (G_ATOMIC_ARM)
31 #include "gthreadprivate.h"
34 # if HAVE_API_WIN32_BASE
42 #if defined (__GNUC__)
43 # if defined (G_ATOMIC_I486)
44 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
47 g_atomic_int_exchange_and_add (volatile gint *atomic,
52 __asm__ __volatile__ ("lock; xaddl %0,%1"
53 : "=r" (result), "=m" (*atomic)
54 : "0" (val), "m" (*atomic));
59 g_atomic_int_add (volatile gint *atomic,
62 __asm__ __volatile__ ("lock; addl %1,%0"
64 : "ir" (val), "m" (*atomic));
68 g_atomic_int_compare_and_exchange (volatile gint *atomic,
74 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
75 : "=a" (result), "=m" (*atomic)
76 : "r" (newval), "m" (*atomic), "0" (oldval));
78 return result == oldval;
81 /* The same code as above, as on i386 gpointer is 32 bit as well.
82 * Duplicating the code here seems more natural than casting the
83 * arguments and calling the former function */
86 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
92 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
93 : "=a" (result), "=m" (*atomic)
94 : "r" (newval), "m" (*atomic), "0" (oldval));
96 return result == oldval;
99 # elif defined (G_ATOMIC_SPARCV9)
100 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
102 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
105 __asm__ __volatile__ ("cas [%4], %2, %0" \
106 : "=r" (__result), "=m" (*(atomic)) \
107 : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
109 __result == oldval; \
112 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
114 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
119 __asm__ __volatile__ ("cas [%4], %2, %0"
120 : "=r" (result), "=m" (*atomic)
121 : "r" (oldval), "m" (*atomic), "r" (atomic),
123 return result == oldval;
125 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
127 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
132 gpointer *a = atomic;
133 __asm__ __volatile__ ("casx [%4], %2, %0"
134 : "=r" (result), "=m" (*a)
135 : "r" (oldval), "m" (*a), "r" (a),
137 return result == oldval;
139 # else /* What's that */
140 # error "Your system has an unsupported pointer size"
141 # endif /* GLIB_SIZEOF_VOID_P */
142 # define G_ATOMIC_MEMORY_BARRIER \
143 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \
144 " | #StoreLoad | #StoreStore" : : : "memory")
146 # elif defined (G_ATOMIC_ALPHA)
147 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
149 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
153 __asm__ __volatile__ ( \
156 " cmpeq %0,%3,%1\n" \
171 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
173 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
179 __asm__ __volatile__ (
197 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
199 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
205 __asm__ __volatile__ (
223 # else /* What's that */
224 # error "Your system has an unsupported pointer size"
225 # endif /* GLIB_SIZEOF_VOID_P */
226 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory")
227 # elif defined (G_ATOMIC_X86_64)
228 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
231 g_atomic_int_exchange_and_add (volatile gint *atomic,
236 __asm__ __volatile__ ("lock; xaddl %0,%1"
237 : "=r" (result), "=m" (*atomic)
238 : "0" (val), "m" (*atomic));
243 g_atomic_int_add (volatile gint *atomic,
246 __asm__ __volatile__ ("lock; addl %1,%0"
248 : "ir" (val), "m" (*atomic));
252 g_atomic_int_compare_and_exchange (volatile gint *atomic,
258 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
259 : "=a" (result), "=m" (*atomic)
260 : "r" (newval), "m" (*atomic), "0" (oldval));
262 return result == oldval;
266 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
272 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
273 : "=a" (result), "=m" (*atomic)
274 : "r" (newval), "m" (*atomic), "0" (oldval));
276 return result == oldval;
279 # elif defined (G_ATOMIC_POWERPC)
280 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
281 * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
282 * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
285 /* Non-optimizing compile bails on the following two asm statements
286 * for reasons unknown to the author */
288 g_atomic_int_exchange_and_add (volatile gint *atomic,
292 #if ASM_NUMERIC_LABELS
293 __asm__ __volatile__ ("1: lwarx %0,0,%3\n"
297 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
298 : "b" (atomic), "r" (val), "m" (*atomic)
301 __asm__ __volatile__ (".Lieaa%=: lwarx %0,0,%3\n"
305 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
306 : "b" (atomic), "r" (val), "m" (*atomic)
312 /* The same as above, to save a function call repeated here */
314 g_atomic_int_add (volatile gint *atomic,
318 #if ASM_NUMERIC_LABELS
319 __asm__ __volatile__ ("1: lwarx %0,0,%3\n"
323 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
324 : "b" (atomic), "r" (val), "m" (*atomic)
327 __asm__ __volatile__ (".Lia%=: lwarx %0,0,%3\n"
331 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
332 : "b" (atomic), "r" (val), "m" (*atomic)
336 # else /* !__OPTIMIZE__ */
338 g_atomic_int_exchange_and_add (volatile gint *atomic,
344 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
350 g_atomic_int_add (volatile gint *atomic,
356 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
358 # endif /* !__OPTIMIZE__ */
360 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
362 g_atomic_int_compare_and_exchange (volatile gint *atomic,
367 #if ASM_NUMERIC_LABELS
368 __asm__ __volatile__ ("sync\n"
376 : "b" (atomic), "r" (oldval), "r" (newval)
379 __asm__ __volatile__ ("sync\n"
380 ".L1icae%=: lwarx %0,0,%1\n"
387 : "b" (atomic), "r" (oldval), "r" (newval)
394 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
399 #if ASM_NUMERIC_LABELS
400 __asm__ __volatile__ ("sync\n"
408 : "b" (atomic), "r" (oldval), "r" (newval)
411 __asm__ __volatile__ ("sync\n"
412 ".L1pcae%=: lwarx %0,0,%1\n"
419 : "b" (atomic), "r" (oldval), "r" (newval)
424 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
426 g_atomic_int_compare_and_exchange (volatile gint *atomic,
431 #if ASM_NUMERIC_LABELS
432 __asm__ __volatile__ ("sync\n"
441 : "b" (atomic), "r" (oldval), "r" (newval)
444 __asm__ __volatile__ ("sync\n"
445 ".L1icae%=: lwarx %0,0,%1\n"
453 : "b" (atomic), "r" (oldval), "r" (newval)
460 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
465 #if ASM_NUMERIC_LABELS
466 __asm__ __volatile__ ("sync\n"
474 : "b" (atomic), "r" (oldval), "r" (newval)
477 __asm__ __volatile__ ("sync\n"
478 ".L1pcae%=: ldarx %0,0,%1\n"
485 : "b" (atomic), "r" (oldval), "r" (newval)
490 # else /* What's that */
491 # error "Your system has an unsupported pointer size"
492 # endif /* GLIB_SIZEOF_VOID_P */
494 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
496 # elif defined (G_ATOMIC_IA64)
497 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
500 g_atomic_int_exchange_and_add (volatile gint *atomic,
503 return __sync_fetch_and_add (atomic, val);
507 g_atomic_int_add (volatile gint *atomic,
510 __sync_fetch_and_add (atomic, val);
514 g_atomic_int_compare_and_exchange (volatile gint *atomic,
518 return __sync_bool_compare_and_swap (atomic, oldval, newval);
522 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
526 return __sync_bool_compare_and_swap ((long *)atomic,
527 (long)oldval, (long)newval);
530 # define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
531 # elif defined (G_ATOMIC_S390)
532 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
534 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
536 gint __result = oldval; \
537 __asm__ __volatile__ ("cs %0, %2, %1" \
538 : "+d" (__result), "=Q" (*(atomic)) \
539 : "d" (newval), "m" (*(atomic)) : "cc" ); \
540 __result == oldval; \
543 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
545 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
549 gpointer result = oldval;
550 __asm__ __volatile__ ("cs %0, %2, %1"
551 : "+d" (result), "=Q" (*(atomic))
552 : "d" (newval), "m" (*(atomic)) : "cc" );
553 return result == oldval;
555 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
557 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
561 gpointer result = oldval;
562 gpointer *a = atomic;
563 __asm__ __volatile__ ("csg %0, %2, %1"
564 : "+d" (result), "=Q" (*a)
565 : "d" ((long)(newval)), "m" (*a) : "cc" );
566 return result == oldval;
568 # else /* What's that */
569 # error "Your system has an unsupported pointer size"
570 # endif /* GLIB_SIZEOF_VOID_P */
571 # elif defined (G_ATOMIC_ARM)
572 static volatile int atomic_spin = 0;
574 static int atomic_spin_trylock (void)
581 : "r,0" (1), "r,r" (&atomic_spin)
589 static void atomic_spin_lock (void)
591 while (atomic_spin_trylock())
595 static void atomic_spin_unlock (void)
601 g_atomic_int_exchange_and_add (volatile gint *atomic,
609 atomic_spin_unlock();
615 g_atomic_int_add (volatile gint *atomic,
620 atomic_spin_unlock();
624 g_atomic_int_compare_and_exchange (volatile gint *atomic,
631 if (*atomic == oldval)
638 atomic_spin_unlock();
644 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
651 if (*atomic == oldval)
658 atomic_spin_unlock();
662 # elif defined(G_PLATFORM_WIN32)
663 # define DEFINE_WITH_WIN32_INTERLOCKED
665 # define DEFINE_WITH_MUTEXES
666 # endif /* G_ATOMIC_IA64 */
667 #else /* !__GNUC__ */
668 # ifdef G_PLATFORM_WIN32
669 # define DEFINE_WITH_WIN32_INTERLOCKED
671 # define DEFINE_WITH_MUTEXES
673 #endif /* __GNUC__ */
675 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
676 # include <windows.h>
677 /* Following indicates that InterlockedCompareExchangePointer is
678 * declared in winbase.h (included by windows.h) and needs to be
679 * commented out if not true. It is defined iff WINVER > 0x0400,
680 * which is usually correct but can be wrong if WINVER is set before
681 * windows.h is included.
684 # define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
688 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
691 return InterlockedExchangeAdd (atomic, val);
695 g_atomic_int_add (volatile gint32 *atomic,
698 InterlockedExchangeAdd (atomic, val);
702 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
706 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
707 return (guint32) InterlockedCompareExchange ((PVOID*)atomic,
709 (PVOID)oldval) == oldval;
711 return InterlockedCompareExchange (atomic,
718 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
722 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
723 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
725 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
726 # error "InterlockedCompareExchangePointer needed"
728 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
732 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
734 #ifdef DEFINE_WITH_MUTEXES
735 /* We have to use the slow, but safe locking method */
736 static GMutex *g_atomic_mutex;
739 g_atomic_int_exchange_and_add (volatile gint *atomic,
744 g_mutex_lock (g_atomic_mutex);
747 g_mutex_unlock (g_atomic_mutex);
754 g_atomic_int_add (volatile gint *atomic,
757 g_mutex_lock (g_atomic_mutex);
759 g_mutex_unlock (g_atomic_mutex);
763 g_atomic_int_compare_and_exchange (volatile gint *atomic,
769 g_mutex_lock (g_atomic_mutex);
770 if (*atomic == oldval)
777 g_mutex_unlock (g_atomic_mutex);
783 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
789 g_mutex_lock (g_atomic_mutex);
790 if (*atomic == oldval)
797 g_mutex_unlock (g_atomic_mutex);
802 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
804 g_atomic_int_get (volatile gint *atomic)
808 g_mutex_lock (g_atomic_mutex);
810 g_mutex_unlock (g_atomic_mutex);
816 g_atomic_int_set (volatile gint *atomic,
819 g_mutex_lock (g_atomic_mutex);
821 g_mutex_unlock (g_atomic_mutex);
825 g_atomic_pointer_get (volatile gpointer *atomic)
829 g_mutex_lock (g_atomic_mutex);
831 g_mutex_unlock (g_atomic_mutex);
837 g_atomic_pointer_set (volatile gpointer *atomic,
840 g_mutex_lock (g_atomic_mutex);
842 g_mutex_unlock (g_atomic_mutex);
844 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
845 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
847 g_atomic_int_get (volatile gint *atomic)
849 G_ATOMIC_MEMORY_BARRIER;
854 g_atomic_int_set (volatile gint *atomic,
858 G_ATOMIC_MEMORY_BARRIER;
862 g_atomic_pointer_get (volatile gpointer *atomic)
864 G_ATOMIC_MEMORY_BARRIER;
869 g_atomic_pointer_set (volatile gpointer *atomic,
873 G_ATOMIC_MEMORY_BARRIER;
875 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
877 #ifdef ATOMIC_INT_CMP_XCHG
879 g_atomic_int_compare_and_exchange (volatile gint *atomic,
883 return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
887 g_atomic_int_exchange_and_add (volatile gint *atomic,
893 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
899 g_atomic_int_add (volatile gint *atomic,
905 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
907 #endif /* ATOMIC_INT_CMP_XCHG */
910 _g_atomic_thread_init (void)
912 #ifdef DEFINE_WITH_MUTEXES
913 g_atomic_mutex = g_mutex_new ();
914 #endif /* DEFINE_WITH_MUTEXES */
917 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
919 (g_atomic_int_get) (volatile gint *atomic)
921 return g_atomic_int_get (atomic);
925 (g_atomic_int_set) (volatile gint *atomic,
928 g_atomic_int_set (atomic, newval);
932 (g_atomic_pointer_get) (volatile gpointer *atomic)
934 return g_atomic_pointer_get (atomic);
938 (g_atomic_pointer_set) (volatile gpointer *atomic,
941 g_atomic_pointer_set (atomic, newval);
943 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
945 #define __G_ATOMIC_C__
946 #include "galiasdef.c"