Adapt to the changed prototype of InterlockedCompareExchange() in newer
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 02111-1307, USA.
21  */
22  
23 #include "config.h"
24
25 #include "glib.h"
26 #include "gthreadinit.h"
27 #include "galias.h"
28
29 #if defined (__GNUC__)
30 # if defined (G_ATOMIC_I486)
31 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
32  */
33 gint
34 g_atomic_int_exchange_and_add (volatile gint *atomic, 
35                                gint           val)
36 {
37   gint result;
38
39   __asm__ __volatile__ ("lock; xaddl %0,%1"
40                         : "=r" (result), "=m" (*atomic) 
41                         : "0" (val), "m" (*atomic));
42   return result;
43 }
44  
45 void
46 g_atomic_int_add (volatile gint *atomic, 
47                   gint           val)
48 {
49   __asm__ __volatile__ ("lock; addl %1,%0"
50                         : "=m" (*atomic) 
51                         : "ir" (val), "m" (*atomic));
52 }
53
54 gboolean
55 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
56                                    gint           oldval, 
57                                    gint           newval)
58 {
59   gint result;
60  
61   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
62                         : "=a" (result), "=m" (*atomic)
63                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
64
65   return result == oldval;
66 }
67
68 /* The same code as above, as on i386 gpointer is 32 bit as well.
69  * Duplicating the code here seems more natural than casting the
70  * arguments and calling the former function */
71
72 gboolean
73 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
74                                        gpointer           oldval, 
75                                        gpointer           newval)
76 {
77   gpointer result;
78  
79   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
80                         : "=a" (result), "=m" (*atomic)
81                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
82
83   return result == oldval;
84 }
85
86 # elif defined (G_ATOMIC_SPARCV9)
87 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
88  */
89 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
90   ({                                                                    \
91      gint __result;                                                     \
92      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
93                            : "=r" (__result), "=m" (*(atomic))          \
94                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
95                            "0" (newval));                               \
96      __result == oldval;                                                \
97   })
98
99 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
100 gboolean
101 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
102                                        gpointer           oldval, 
103                                        gpointer           newval)
104 {
105   gpointer result;
106   __asm__ __volatile__ ("cas [%4], %2, %0"
107                         : "=r" (result), "=m" (*atomic)
108                         : "r" (oldval), "m" (*atomic), "r" (atomic),
109                         "0" (newval));
110   return result == oldval;
111 }
112 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
113 gboolean
114 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
115                                        gpointer           oldval, 
116                                        gpointer           newval)
117 {
118   gpointer result;
119   gpointer *a = atomic;
120   __asm__ __volatile__ ("casx [%4], %2, %0"
121                         : "=r" (result), "=m" (*a)
122                         : "r" (oldval), "m" (*a), "r" (a),
123                         "0" (newval));
124   return result == oldval;
125 }
126 #  else /* What's that */
127 #    error "Your system has an unsupported pointer size"
128 #  endif /* GLIB_SIZEOF_VOID_P */
129 #  define G_ATOMIC_MEMORY_BARRIER                                       \
130   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
131                         " | #StoreLoad | #StoreStore" : : : "memory")
132
133 # elif defined (G_ATOMIC_ALPHA)
134 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
135  */
136 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
137   ({                                                                    \
138      gint __result;                                                     \
139      gint __prev;                                                       \
140      __asm__ __volatile__ (                                             \
141         "       mb\n"                                                   \
142         "1:     ldl_l   %0,%2\n"                                        \
143         "       cmpeq   %0,%3,%1\n"                                     \
144         "       beq     %1,2f\n"                                        \
145         "       mov     %4,%1\n"                                        \
146         "       stl_c   %1,%2\n"                                        \
147         "       beq     %1,1b\n"                                        \
148         "       mb\n"                                                   \
149         "2:"                                                            \
150         : "=&r" (__prev),                                               \
151           "=&r" (__result)                                              \
152         : "m" (*(atomic)),                                              \
153           "Ir" (oldval),                                                \
154           "Ir" (newval)                                                 \
155         : "memory");                                                    \
156      __result != 0;                                                     \
157   })
158 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
159 gboolean
160 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
161                                        gpointer           oldval, 
162                                        gpointer           newval)
163 {
164   gint result;
165   gpointer prev;
166   __asm__ __volatile__ (
167         "       mb\n"
168         "1:     ldl_l   %0,%2\n"
169         "       cmpeq   %0,%3,%1\n"
170         "       beq     %1,2f\n"
171         "       mov     %4,%1\n"
172         "       stl_c   %1,%2\n"
173         "       beq     %1,1b\n"
174         "       mb\n"
175         "2:"
176         : "=&r" (prev), 
177           "=&r" (result)
178         : "m" (*atomic),
179           "Ir" (oldval),
180           "Ir" (newval)
181         : "memory");
182   return result != 0;
183 }
184 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
185 gboolean
186 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
187                                        gpointer           oldval, 
188                                        gpointer           newval)
189 {
190   gint result;
191   gpointer prev;
192   __asm__ __volatile__ (
193         "       mb\n"
194         "1:     ldq_l   %0,%2\n"
195         "       cmpeq   %0,%3,%1\n"
196         "       beq     %1,2f\n"
197         "       mov     %4,%1\n"
198         "       stq_c   %1,%2\n"
199         "       beq     %1,1b\n"
200         "       mb\n"
201         "2:"
202         : "=&r" (prev), 
203           "=&r" (result)
204         : "m" (*atomic),
205           "Ir" (oldval),
206           "Ir" (newval)
207         : "memory");
208   return result != 0;
209 }
210 #  else /* What's that */
211 #   error "Your system has an unsupported pointer size"
212 #  endif /* GLIB_SIZEOF_VOID_P */
213 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
214 # elif defined (G_ATOMIC_X86_64)
215 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
216  */
217 gint
218 g_atomic_int_exchange_and_add (volatile gint *atomic,
219                                gint           val)
220 {
221   gint result;
222
223   __asm__ __volatile__ ("lock; xaddl %0,%1"
224                         : "=r" (result), "=m" (*atomic) 
225                         : "0" (val), "m" (*atomic));
226   return result;
227 }
228  
229 void
230 g_atomic_int_add (volatile gint *atomic, 
231                   gint           val)
232 {
233   __asm__ __volatile__ ("lock; addl %1,%0"
234                         : "=m" (*atomic) 
235                         : "ir" (val), "m" (*atomic));
236 }
237
238 gboolean
239 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
240                                    gint           oldval, 
241                                    gint           newval)
242 {
243   gint result;
244  
245   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
246                         : "=a" (result), "=m" (*atomic)
247                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
248
249   return result == oldval;
250 }
251
252 gboolean
253 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
254                                        gpointer           oldval, 
255                                        gpointer           newval)
256 {
257   gpointer result;
258  
259   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
260                         : "=a" (result), "=m" (*atomic)
261                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
262
263   return result == oldval;
264 }
265
266 # elif defined (G_ATOMIC_POWERPC)
267 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h 
268  * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
269  * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
270  */
271 #   ifdef __OPTIMIZE__
272 /* Non-optimizing compile bails on the following two asm statements
273  * for reasons unknown to the author */
274 gint
275 g_atomic_int_exchange_and_add (volatile gint *atomic, 
276                                gint           val)
277 {
278   gint result, temp;
279   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
280                         "         add     %1,%0,%4\n"
281                         "         stwcx.  %1,0,%3\n"
282                         "         bne-    1b"
283                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
284                         : "b" (atomic), "r" (val), "m" (*atomic)
285                         : "cr0", "memory");
286   return result;
287 }
288  
289 /* The same as above, to save a function call repeated here */
290 void
291 g_atomic_int_add (volatile gint *atomic, 
292                   gint           val)
293 {
294   gint result, temp;  
295   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
296                         "         add     %1,%0,%4\n"
297                         "         stwcx.  %1,0,%3\n"
298                         "         bne-    1b"
299                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
300                         : "b" (atomic), "r" (val), "m" (*atomic)
301                         : "cr0", "memory");
302 }
303 #   else /* !__OPTIMIZE__ */
304 gint
305 g_atomic_int_exchange_and_add (volatile gint *atomic, 
306                                gint           val)
307 {
308   gint result;
309   do
310     result = *atomic;
311   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
312
313   return result;
314 }
315  
316 void
317 g_atomic_int_add (volatile gint *atomic,
318                   gint           val)
319 {
320   gint result;
321   do
322     result = *atomic;
323   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
324 }
325 #   endif /* !__OPTIMIZE__ */
326
327 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
328 gboolean
329 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
330                                    gint           oldval, 
331                                    gint           newval)
332 {
333   gint result;
334   __asm__ __volatile__ ("sync\n"
335                         "1: lwarx   %0,0,%1\n"
336                         "   subf.   %0,%2,%0\n"
337                         "   bne     2f\n"
338                         "   stwcx.  %3,0,%1\n"
339                         "   bne-    1b\n"
340                         "2: isync"
341                         : "=&r" (result)
342                         : "b" (atomic), "r" (oldval), "r" (newval)
343                         : "cr0", "memory"); 
344   return result == 0;
345 }
346
347 gboolean
348 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
349                                        gpointer           oldval, 
350                                        gpointer           newval)
351 {
352   gpointer result;
353   __asm__ __volatile__ ("sync\n"
354                         "1: lwarx   %0,0,%1\n"
355                         "   subf.   %0,%2,%0\n"
356                         "   bne     2f\n"
357                         "   stwcx.  %3,0,%1\n"
358                         "   bne-    1b\n"
359                         "2: isync"
360                         : "=&r" (result)
361                         : "b" (atomic), "r" (oldval), "r" (newval)
362                         : "cr0", "memory"); 
363   return result == 0;
364 }
365 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
366 gboolean
367 g_atomic_int_compare_and_exchange (volatile gint *atomic,
368                                    gint           oldval, 
369                                    gint           newval)
370 {
371   gpointer result;
372   __asm__ __volatile__ ("sync\n"
373                         "1: lwarx   %0,0,%1\n"
374                         "   extsw   %0,%0\n"
375                         "   subf.   %0,%2,%0\n"
376                         "   bne     2f\n"
377                         "   stwcx.  %3,0,%1\n"
378                         "   bne-    1b\n"
379                         "2: isync"
380                         : "=&r" (result)
381                         : "b" (atomic), "r" (oldval), "r" (newval)
382                         : "cr0", "memory"); 
383   return result == 0;
384 }
385
386 gboolean
387 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
388                                        gpointer           oldval, 
389                                        gpointer           newval)
390 {
391   gpointer result;
392   __asm__ __volatile__ ("sync\n"
393                         "1: ldarx   %0,0,%1\n"
394                         "   subf.   %0,%2,%0\n"
395                         "   bne     2f\n"
396                         "   stdcx.  %3,0,%1\n"
397                         "   bne-    1b\n"
398                         "2: isync"
399                         : "=&r" (result)
400                         : "b" (atomic), "r" (oldval), "r" (newval)
401                         : "cr0", "memory"); 
402   return result == 0;
403 }
404 #  else /* What's that */
405 #   error "Your system has an unsupported pointer size"
406 #  endif /* GLIB_SIZEOF_VOID_P */
407
408 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
409
410 # elif defined (G_ATOMIC_IA64)
411 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
412  */
413 gint
414 g_atomic_int_exchange_and_add (volatile gint *atomic,
415                                gint           val)
416 {
417   return __sync_fetch_and_add (atomic, val);
418 }
419  
420 void
421 g_atomic_int_add (volatile gint *atomic, 
422                   gint val)
423 {
424   __sync_fetch_and_add (atomic, val);
425 }
426
427 gboolean
428 g_atomic_int_compare_and_exchange (volatile gint *atomic,
429                                    gint           oldval, 
430                                    gint           newval)
431 {
432   return __sync_bool_compare_and_swap (atomic, oldval, newval);
433 }
434
435 gboolean
436 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
437                                        gpointer           oldval, 
438                                        gpointer           newval)
439 {
440   return __sync_bool_compare_and_swap ((long *)atomic, 
441                                        (long)oldval, (long)newval);
442 }
443
444 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
445 # elif defined (G_ATOMIC_S390)
446 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
447  */
448 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
449   ({                                                                    \
450      gint __result = oldval;                                    \
451      __asm__ __volatile__ ("cs %0, %2, %1"                              \
452                            : "+d" (__result), "=Q" (*(atomic))          \
453                            : "d" (newval), "m" (*(atomic)) : "cc" );    \
454      __result == oldval;                                                \
455   })
456
457 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
458 gboolean
459 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
460                                        gpointer           oldval,
461                                        gpointer           newval)
462 {
463   gpointer result = oldval;
464   __asm__ __volatile__ ("cs %0, %2, %1"
465                         : "+d" (result), "=Q" (*(atomic))
466                         : "d" (newval), "m" (*(atomic)) : "cc" );
467   return result == oldval;
468 }
469 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
470 gboolean
471 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
472                                        gpointer           oldval,
473                                        gpointer           newval)
474 {
475   gpointer result = oldval;
476   gpointer *a = atomic;
477   __asm__ __volatile__ ("csg %0, %2, %1"
478                         : "+d" (result), "=Q" (*a)
479                         : "d" ((long)(newval)), "m" (*a) : "cc" );
480   return result == oldval;
481 }
482 #  else /* What's that */
483 #    error "Your system has an unsupported pointer size"
484 #  endif /* GLIB_SIZEOF_VOID_P */
485 # else /* !G_ATOMIC_IA64 */
486 #  define DEFINE_WITH_MUTEXES
487 # endif /* G_ATOMIC_IA64 */
488 #else /* !__GNUC__ */
489 # ifdef G_PLATFORM_WIN32
490 #  define DEFINE_WITH_WIN32_INTERLOCKED
491 # else
492 #  define DEFINE_WITH_MUTEXES
493 # endif
494 #endif /* __GNUC__ */
495
496 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
497 # include <windows.h>
498 /* Following indicates that InterlockedCompareExchangePointer is
499  * declared in winbase.h (included by windows.h) and needs to be
500  * commented out if not true. It is defined iff WINVER > 0x0400,
501  * which is usually correct but can be wrong if WINVER is set before
502  * windows.h is included.
503  */
504 # if WINVER > 0x0400
505 #  define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
506 # endif
507
508 gint32
509 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
510                                gint32           val)
511 {
512   return InterlockedExchangeAdd (atomic, val);
513 }
514
515 void     
516 g_atomic_int_add (volatile gint32 *atomic, 
517                   gint32           val)
518 {
519   InterlockedExchangeAdd (atomic, val);
520 }
521
522 gboolean 
523 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
524                                    gint32           oldval,
525                                    gint32           newval)
526 {
527 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
528   return (guint32) InterlockedCompareExchange ((PVOID*)atomic, 
529                                                (PVOID)newval, 
530                                                (PVOID)oldval) == oldval;
531 #else
532   return InterlockedCompareExchange (atomic, 
533                                      newval, 
534                                      oldval) == oldval;
535 #endif
536 }
537
538 gboolean 
539 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
540                                        gpointer           oldval,
541                                        gpointer           newval)
542 {
543 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
544   return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
545 # else
546 #  if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
547 #   error "InterlockedCompareExchangePointer needed"
548 #  else
549    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
550 #  endif
551 # endif
552 }
553 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
554
555 #ifdef DEFINE_WITH_MUTEXES
556 /* We have to use the slow, but safe locking method */
557 static GMutex *g_atomic_mutex; 
558
559 gint
560 g_atomic_int_exchange_and_add (volatile gint *atomic, 
561                                gint           val)
562 {
563   gint result;
564     
565   g_mutex_lock (g_atomic_mutex);
566   result = *atomic;
567   *atomic += val;
568   g_mutex_unlock (g_atomic_mutex);
569
570   return result;
571 }
572
573
574 void
575 g_atomic_int_add (volatile gint *atomic,
576                   gint           val)
577 {
578   g_mutex_lock (g_atomic_mutex);
579   *atomic += val;
580   g_mutex_unlock (g_atomic_mutex);
581 }
582
583 gboolean
584 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
585                                    gint           oldval, 
586                                    gint           newval)
587 {
588   gboolean result;
589     
590   g_mutex_lock (g_atomic_mutex);
591   if (*atomic == oldval)
592     {
593       result = TRUE;
594       *atomic = newval;
595     }
596   else
597     result = FALSE;
598   g_mutex_unlock (g_atomic_mutex);
599
600   return result;
601 }
602
603 gboolean
604 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
605                                        gpointer           oldval, 
606                                        gpointer           newval)
607 {
608   gboolean result;
609     
610   g_mutex_lock (g_atomic_mutex);
611   if (*atomic == oldval)
612     {
613       result = TRUE;
614       *atomic = newval;
615     }
616   else
617     result = FALSE;
618   g_mutex_unlock (g_atomic_mutex);
619
620   return result;
621 }
622
623 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
624 gint
625 g_atomic_int_get (volatile gint *atomic)
626 {
627   gint result;
628
629   g_mutex_lock (g_atomic_mutex);
630   result = *atomic;
631   g_mutex_unlock (g_atomic_mutex);
632
633   return result;
634 }
635
636 void
637 g_atomic_int_set (volatile gint *atomic,
638                   gint           newval)
639 {
640   g_mutex_lock (g_atomic_mutex);
641   *atomic = newval;
642   g_mutex_unlock (g_atomic_mutex);
643 }
644
645 gpointer
646 g_atomic_pointer_get (volatile gpointer *atomic)
647 {
648   gpointer result;
649
650   g_mutex_lock (g_atomic_mutex);
651   result = *atomic;
652   g_mutex_unlock (g_atomic_mutex);
653
654   return result;
655 }
656
657 void
658 g_atomic_pointer_set (volatile gpointer *atomic,
659                       gpointer           newval)
660 {
661   g_mutex_lock (g_atomic_mutex);
662   *atomic = newval;
663   g_mutex_unlock (g_atomic_mutex);
664 }
665 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
666 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
667 gint
668 g_atomic_int_get (volatile gint *atomic)
669 {
670   G_ATOMIC_MEMORY_BARRIER;
671   return *atomic;
672 }
673
674 void
675 g_atomic_int_set (volatile gint *atomic,
676                   gint           newval)
677 {
678   *atomic = newval;
679   G_ATOMIC_MEMORY_BARRIER; 
680 }
681
682 gpointer
683 g_atomic_pointer_get (volatile gpointer *atomic)
684 {
685   G_ATOMIC_MEMORY_BARRIER;
686   return *atomic;
687 }   
688
689 void
690 g_atomic_pointer_set (volatile gpointer *atomic,
691                       gpointer           newval)
692 {
693   *atomic = newval;
694   G_ATOMIC_MEMORY_BARRIER; 
695 }
696 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
697
698 #ifdef ATOMIC_INT_CMP_XCHG
699 gboolean
700 g_atomic_int_compare_and_exchange (volatile gint *atomic,
701                                    gint           oldval,
702                                    gint           newval)
703 {
704   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
705 }
706
707 gint
708 g_atomic_int_exchange_and_add (volatile gint *atomic,
709                                gint           val)
710 {
711   gint result;
712   do
713     result = *atomic;
714   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
715
716   return result;
717 }
718  
719 void
720 g_atomic_int_add (volatile gint *atomic,
721                   gint           val)
722 {
723   gint result;
724   do
725     result = *atomic;
726   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
727 }
728 #endif /* ATOMIC_INT_CMP_XCHG */
729
730 void 
731 _g_atomic_thread_init (void)
732 {
733 #ifdef DEFINE_WITH_MUTEXES
734   g_atomic_mutex = g_mutex_new ();
735 #endif /* DEFINE_WITH_MUTEXES */
736 }
737
738 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
739 gint
740 (g_atomic_int_get) (volatile gint *atomic)
741 {
742   return g_atomic_int_get (atomic);
743 }
744
745 void
746 (g_atomic_int_set) (volatile gint *atomic,
747                     gint           newval)
748 {
749   g_atomic_int_set (atomic, newval);
750 }
751
752 gpointer
753 (g_atomic_pointer_get) (volatile gpointer *atomic)
754 {
755   return g_atomic_pointer_get (atomic);
756 }
757
758 void
759 (g_atomic_pointer_set) (volatile gpointer *atomic,
760                         gpointer           newval)
761 {
762   g_atomic_pointer_set (atomic, newval);
763 }
764 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
765
766 #define __G_ATOMIC_C__
767 #include "galiasdef.c"