Add arm atomic operations
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  * Copyright (C) 2007 Nokia Corporation
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21  * Boston, MA 02111-1307, USA.
22  */
23
24 #if defined (G_ATOMIC_ARM)
25 #include <sched.h>
26 #endif
27
28 #include "config.h"
29
30 #include "glib.h"
31 #include "gthreadprivate.h"
32 #include "galias.h"
33
34 #if defined (__GNUC__)
35 # if defined (G_ATOMIC_I486)
36 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
37  */
38 gint
39 g_atomic_int_exchange_and_add (volatile gint *atomic, 
40                                gint           val)
41 {
42   gint result;
43
44   __asm__ __volatile__ ("lock; xaddl %0,%1"
45                         : "=r" (result), "=m" (*atomic) 
46                         : "0" (val), "m" (*atomic));
47   return result;
48 }
49  
50 void
51 g_atomic_int_add (volatile gint *atomic, 
52                   gint           val)
53 {
54   __asm__ __volatile__ ("lock; addl %1,%0"
55                         : "=m" (*atomic) 
56                         : "ir" (val), "m" (*atomic));
57 }
58
59 gboolean
60 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
61                                    gint           oldval, 
62                                    gint           newval)
63 {
64   gint result;
65  
66   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
67                         : "=a" (result), "=m" (*atomic)
68                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
69
70   return result == oldval;
71 }
72
73 /* The same code as above, as on i386 gpointer is 32 bit as well.
74  * Duplicating the code here seems more natural than casting the
75  * arguments and calling the former function */
76
77 gboolean
78 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
79                                        gpointer           oldval, 
80                                        gpointer           newval)
81 {
82   gpointer result;
83  
84   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
85                         : "=a" (result), "=m" (*atomic)
86                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
87
88   return result == oldval;
89 }
90
91 # elif defined (G_ATOMIC_SPARCV9)
92 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
93  */
94 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
95   ({                                                                    \
96      gint __result;                                                     \
97      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
98                            : "=r" (__result), "=m" (*(atomic))          \
99                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
100                            "0" (newval));                               \
101      __result == oldval;                                                \
102   })
103
104 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
105 gboolean
106 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
107                                        gpointer           oldval, 
108                                        gpointer           newval)
109 {
110   gpointer result;
111   __asm__ __volatile__ ("cas [%4], %2, %0"
112                         : "=r" (result), "=m" (*atomic)
113                         : "r" (oldval), "m" (*atomic), "r" (atomic),
114                         "0" (newval));
115   return result == oldval;
116 }
117 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
118 gboolean
119 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
120                                        gpointer           oldval, 
121                                        gpointer           newval)
122 {
123   gpointer result;
124   gpointer *a = atomic;
125   __asm__ __volatile__ ("casx [%4], %2, %0"
126                         : "=r" (result), "=m" (*a)
127                         : "r" (oldval), "m" (*a), "r" (a),
128                         "0" (newval));
129   return result == oldval;
130 }
131 #  else /* What's that */
132 #    error "Your system has an unsupported pointer size"
133 #  endif /* GLIB_SIZEOF_VOID_P */
134 #  define G_ATOMIC_MEMORY_BARRIER                                       \
135   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
136                         " | #StoreLoad | #StoreStore" : : : "memory")
137
138 # elif defined (G_ATOMIC_ALPHA)
139 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
140  */
141 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
142   ({                                                                    \
143      gint __result;                                                     \
144      gint __prev;                                                       \
145      __asm__ __volatile__ (                                             \
146         "       mb\n"                                                   \
147         "1:     ldl_l   %0,%2\n"                                        \
148         "       cmpeq   %0,%3,%1\n"                                     \
149         "       beq     %1,2f\n"                                        \
150         "       mov     %4,%1\n"                                        \
151         "       stl_c   %1,%2\n"                                        \
152         "       beq     %1,1b\n"                                        \
153         "       mb\n"                                                   \
154         "2:"                                                            \
155         : "=&r" (__prev),                                               \
156           "=&r" (__result)                                              \
157         : "m" (*(atomic)),                                              \
158           "Ir" (oldval),                                                \
159           "Ir" (newval)                                                 \
160         : "memory");                                                    \
161      __result != 0;                                                     \
162   })
163 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
164 gboolean
165 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
166                                        gpointer           oldval, 
167                                        gpointer           newval)
168 {
169   gint result;
170   gpointer prev;
171   __asm__ __volatile__ (
172         "       mb\n"
173         "1:     ldl_l   %0,%2\n"
174         "       cmpeq   %0,%3,%1\n"
175         "       beq     %1,2f\n"
176         "       mov     %4,%1\n"
177         "       stl_c   %1,%2\n"
178         "       beq     %1,1b\n"
179         "       mb\n"
180         "2:"
181         : "=&r" (prev), 
182           "=&r" (result)
183         : "m" (*atomic),
184           "Ir" (oldval),
185           "Ir" (newval)
186         : "memory");
187   return result != 0;
188 }
189 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
190 gboolean
191 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
192                                        gpointer           oldval, 
193                                        gpointer           newval)
194 {
195   gint result;
196   gpointer prev;
197   __asm__ __volatile__ (
198         "       mb\n"
199         "1:     ldq_l   %0,%2\n"
200         "       cmpeq   %0,%3,%1\n"
201         "       beq     %1,2f\n"
202         "       mov     %4,%1\n"
203         "       stq_c   %1,%2\n"
204         "       beq     %1,1b\n"
205         "       mb\n"
206         "2:"
207         : "=&r" (prev), 
208           "=&r" (result)
209         : "m" (*atomic),
210           "Ir" (oldval),
211           "Ir" (newval)
212         : "memory");
213   return result != 0;
214 }
215 #  else /* What's that */
216 #   error "Your system has an unsupported pointer size"
217 #  endif /* GLIB_SIZEOF_VOID_P */
218 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
219 # elif defined (G_ATOMIC_X86_64)
220 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
221  */
222 gint
223 g_atomic_int_exchange_and_add (volatile gint *atomic,
224                                gint           val)
225 {
226   gint result;
227
228   __asm__ __volatile__ ("lock; xaddl %0,%1"
229                         : "=r" (result), "=m" (*atomic) 
230                         : "0" (val), "m" (*atomic));
231   return result;
232 }
233  
234 void
235 g_atomic_int_add (volatile gint *atomic, 
236                   gint           val)
237 {
238   __asm__ __volatile__ ("lock; addl %1,%0"
239                         : "=m" (*atomic) 
240                         : "ir" (val), "m" (*atomic));
241 }
242
243 gboolean
244 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
245                                    gint           oldval, 
246                                    gint           newval)
247 {
248   gint result;
249  
250   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
251                         : "=a" (result), "=m" (*atomic)
252                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
253
254   return result == oldval;
255 }
256
257 gboolean
258 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
259                                        gpointer           oldval, 
260                                        gpointer           newval)
261 {
262   gpointer result;
263  
264   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
265                         : "=a" (result), "=m" (*atomic)
266                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
267
268   return result == oldval;
269 }
270
271 # elif defined (G_ATOMIC_POWERPC)
272 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h 
273  * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
274  * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
275  */
276 #   ifdef __OPTIMIZE__
277 /* Non-optimizing compile bails on the following two asm statements
278  * for reasons unknown to the author */
279 gint
280 g_atomic_int_exchange_and_add (volatile gint *atomic, 
281                                gint           val)
282 {
283   gint result, temp;
284   __asm__ __volatile__ (".Lieaa%=:       lwarx   %0,0,%3\n"
285                         "         add     %1,%0,%4\n"
286                         "         stwcx.  %1,0,%3\n"
287                         "         bne-    .Lieaa%="
288                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
289                         : "b" (atomic), "r" (val), "m" (*atomic)
290                         : "cr0", "memory");
291   return result;
292 }
293  
294 /* The same as above, to save a function call repeated here */
295 void
296 g_atomic_int_add (volatile gint *atomic, 
297                   gint           val)
298 {
299   gint result, temp;  
300   __asm__ __volatile__ (".Lia%=:       lwarx   %0,0,%3\n"
301                         "         add     %1,%0,%4\n"
302                         "         stwcx.  %1,0,%3\n"
303                         "         bne-    .Lia%="
304                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
305                         : "b" (atomic), "r" (val), "m" (*atomic)
306                         : "cr0", "memory");
307 }
308 #   else /* !__OPTIMIZE__ */
309 gint
310 g_atomic_int_exchange_and_add (volatile gint *atomic, 
311                                gint           val)
312 {
313   gint result;
314   do
315     result = *atomic;
316   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
317
318   return result;
319 }
320  
321 void
322 g_atomic_int_add (volatile gint *atomic,
323                   gint           val)
324 {
325   gint result;
326   do
327     result = *atomic;
328   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
329 }
330 #   endif /* !__OPTIMIZE__ */
331
332 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
333 gboolean
334 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
335                                    gint           oldval, 
336                                    gint           newval)
337 {
338   gint result;
339   __asm__ __volatile__ ("sync\n"
340                         ".L1icae%=: lwarx   %0,0,%1\n"
341                         "   subf.   %0,%2,%0\n"
342                         "   bne     .L2icae%=\n"
343                         "   stwcx.  %3,0,%1\n"
344                         "   bne-    .L1icae%=\n"
345                         ".L2icae%=: isync"
346                         : "=&r" (result)
347                         : "b" (atomic), "r" (oldval), "r" (newval)
348                         : "cr0", "memory"); 
349   return result == 0;
350 }
351
352 gboolean
353 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
354                                        gpointer           oldval, 
355                                        gpointer           newval)
356 {
357   gpointer result;
358   __asm__ __volatile__ ("sync\n"
359                         ".L1pcae%=: lwarx   %0,0,%1\n"
360                         "   subf.   %0,%2,%0\n"
361                         "   bne     .L2pcae%=\n"
362                         "   stwcx.  %3,0,%1\n"
363                         "   bne-    .L1pcae%=\n"
364                         ".L2pcae%=: isync"
365                         : "=&r" (result)
366                         : "b" (atomic), "r" (oldval), "r" (newval)
367                         : "cr0", "memory"); 
368   return result == 0;
369 }
370 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
371 gboolean
372 g_atomic_int_compare_and_exchange (volatile gint *atomic,
373                                    gint           oldval, 
374                                    gint           newval)
375 {
376   gpointer result;
377   __asm__ __volatile__ ("sync\n"
378                         ".L1icae%=: lwarx   %0,0,%1\n"
379                         "   extsw   %0,%0\n"
380                         "   subf.   %0,%2,%0\n"
381                         "   bne     .L2icae%=\n"
382                         "   stwcx.  %3,0,%1\n"
383                         "   bne-    .L1icae%=\n"
384                         ".L2icae%=: isync"
385                         : "=&r" (result)
386                         : "b" (atomic), "r" (oldval), "r" (newval)
387                         : "cr0", "memory"); 
388   return result == 0;
389 }
390
391 gboolean
392 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
393                                        gpointer           oldval, 
394                                        gpointer           newval)
395 {
396   gpointer result;
397   __asm__ __volatile__ ("sync\n"
398                         ".L1pcae%=: ldarx   %0,0,%1\n"
399                         "   subf.   %0,%2,%0\n"
400                         "   bne     .L2pcae%=\n"
401                         "   stdcx.  %3,0,%1\n"
402                         "   bne-    .L1pcae%=\n"
403                         ".L2pcae%=: isync"
404                         : "=&r" (result)
405                         : "b" (atomic), "r" (oldval), "r" (newval)
406                         : "cr0", "memory"); 
407   return result == 0;
408 }
409 #  else /* What's that */
410 #   error "Your system has an unsupported pointer size"
411 #  endif /* GLIB_SIZEOF_VOID_P */
412
413 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
414
415 # elif defined (G_ATOMIC_IA64)
416 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
417  */
418 gint
419 g_atomic_int_exchange_and_add (volatile gint *atomic,
420                                gint           val)
421 {
422   return __sync_fetch_and_add (atomic, val);
423 }
424  
425 void
426 g_atomic_int_add (volatile gint *atomic, 
427                   gint val)
428 {
429   __sync_fetch_and_add (atomic, val);
430 }
431
432 gboolean
433 g_atomic_int_compare_and_exchange (volatile gint *atomic,
434                                    gint           oldval, 
435                                    gint           newval)
436 {
437   return __sync_bool_compare_and_swap (atomic, oldval, newval);
438 }
439
440 gboolean
441 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
442                                        gpointer           oldval, 
443                                        gpointer           newval)
444 {
445   return __sync_bool_compare_and_swap ((long *)atomic, 
446                                        (long)oldval, (long)newval);
447 }
448
449 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
450 # elif defined (G_ATOMIC_S390)
451 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
452  */
453 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
454   ({                                                                    \
455      gint __result = oldval;                                    \
456      __asm__ __volatile__ ("cs %0, %2, %1"                              \
457                            : "+d" (__result), "=Q" (*(atomic))          \
458                            : "d" (newval), "m" (*(atomic)) : "cc" );    \
459      __result == oldval;                                                \
460   })
461
462 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
463 gboolean
464 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
465                                        gpointer           oldval,
466                                        gpointer           newval)
467 {
468   gpointer result = oldval;
469   __asm__ __volatile__ ("cs %0, %2, %1"
470                         : "+d" (result), "=Q" (*(atomic))
471                         : "d" (newval), "m" (*(atomic)) : "cc" );
472   return result == oldval;
473 }
474 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
475 gboolean
476 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
477                                        gpointer           oldval,
478                                        gpointer           newval)
479 {
480   gpointer result = oldval;
481   gpointer *a = atomic;
482   __asm__ __volatile__ ("csg %0, %2, %1"
483                         : "+d" (result), "=Q" (*a)
484                         : "d" ((long)(newval)), "m" (*a) : "cc" );
485   return result == oldval;
486 }
487 #  else /* What's that */
488 #    error "Your system has an unsupported pointer size"
489 #  endif /* GLIB_SIZEOF_VOID_P */
490 # elif defined (G_ATOMIC_ARM)
491 static volatile int atomic_spin = 0;
492
493 static int atomic_spin_trylock (void)
494 {
495   int result;
496
497   asm volatile (
498     "swp %0, %1, [%2]\n"
499     : "=&r,&r" (result)
500     : "r,0" (1), "r,r" (&atomic_spin)
501     : "memory");
502   if (result == 0)
503     return 0;
504   else
505     return -1;
506 }
507
508 static void atomic_spin_lock (void)
509 {
510   while (atomic_spin_trylock())
511     sched_yield();
512 }
513
514 static void atomic_spin_unlock (void)
515 {
516   atomic_spin = 0;
517 }
518
519 gint
520 g_atomic_int_exchange_and_add (volatile gint *atomic, 
521                                gint           val)
522 {
523   gint result;
524  
525   atomic_spin_lock();  
526   result = *atomic;
527   *atomic += val;
528   atomic_spin_unlock();
529
530   return result;
531 }
532
533 void
534 g_atomic_int_add (volatile gint *atomic,
535                   gint           val)
536 {
537   atomic_spin_lock();
538   *atomic += val;
539   atomic_spin_unlock();
540 }
541
542 gboolean
543 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
544                                    gint           oldval, 
545                                    gint           newval)
546 {
547   gboolean result;
548
549   atomic_spin_lock();
550   if (*atomic == oldval)
551     {
552       result = TRUE;
553       *atomic = newval;
554     }
555   else
556     result = FALSE;
557   atomic_spin_unlock();
558
559   return result;
560 }
561
562 gboolean
563 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
564                                        gpointer           oldval, 
565                                        gpointer           newval)
566 {
567   gboolean result;
568  
569   atomic_spin_lock();
570   if (*atomic == oldval)
571     {
572       result = TRUE;
573       *atomic = newval;
574     }
575   else
576     result = FALSE;
577   atomic_spin_unlock();
578
579   return result;
580 }
581 # else /* !G_ATOMIC_ARM */
582 #  define DEFINE_WITH_MUTEXES
583 # endif /* G_ATOMIC_IA64 */
584 #else /* !__GNUC__ */
585 # ifdef G_PLATFORM_WIN32
586 #  define DEFINE_WITH_WIN32_INTERLOCKED
587 # else
588 #  define DEFINE_WITH_MUTEXES
589 # endif
590 #endif /* __GNUC__ */
591
592 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
593 # include <windows.h>
594 /* Following indicates that InterlockedCompareExchangePointer is
595  * declared in winbase.h (included by windows.h) and needs to be
596  * commented out if not true. It is defined iff WINVER > 0x0400,
597  * which is usually correct but can be wrong if WINVER is set before
598  * windows.h is included.
599  */
600 # if WINVER > 0x0400
601 #  define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
602 # endif
603
604 gint32
605 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
606                                gint32           val)
607 {
608   return InterlockedExchangeAdd (atomic, val);
609 }
610
611 void     
612 g_atomic_int_add (volatile gint32 *atomic, 
613                   gint32           val)
614 {
615   InterlockedExchangeAdd (atomic, val);
616 }
617
618 gboolean 
619 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
620                                    gint32           oldval,
621                                    gint32           newval)
622 {
623 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
624   return (guint32) InterlockedCompareExchange ((PVOID*)atomic, 
625                                                (PVOID)newval, 
626                                                (PVOID)oldval) == oldval;
627 #else
628   return InterlockedCompareExchange (atomic, 
629                                      newval, 
630                                      oldval) == oldval;
631 #endif
632 }
633
634 gboolean 
635 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
636                                        gpointer           oldval,
637                                        gpointer           newval)
638 {
639 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
640   return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
641 # else
642 #  if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
643 #   error "InterlockedCompareExchangePointer needed"
644 #  else
645    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
646 #  endif
647 # endif
648 }
649 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
650
651 #ifdef DEFINE_WITH_MUTEXES
652 /* We have to use the slow, but safe locking method */
653 static GMutex *g_atomic_mutex; 
654
655 gint
656 g_atomic_int_exchange_and_add (volatile gint *atomic, 
657                                gint           val)
658 {
659   gint result;
660     
661   g_mutex_lock (g_atomic_mutex);
662   result = *atomic;
663   *atomic += val;
664   g_mutex_unlock (g_atomic_mutex);
665
666   return result;
667 }
668
669
670 void
671 g_atomic_int_add (volatile gint *atomic,
672                   gint           val)
673 {
674   g_mutex_lock (g_atomic_mutex);
675   *atomic += val;
676   g_mutex_unlock (g_atomic_mutex);
677 }
678
679 gboolean
680 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
681                                    gint           oldval, 
682                                    gint           newval)
683 {
684   gboolean result;
685     
686   g_mutex_lock (g_atomic_mutex);
687   if (*atomic == oldval)
688     {
689       result = TRUE;
690       *atomic = newval;
691     }
692   else
693     result = FALSE;
694   g_mutex_unlock (g_atomic_mutex);
695
696   return result;
697 }
698
699 gboolean
700 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
701                                        gpointer           oldval, 
702                                        gpointer           newval)
703 {
704   gboolean result;
705     
706   g_mutex_lock (g_atomic_mutex);
707   if (*atomic == oldval)
708     {
709       result = TRUE;
710       *atomic = newval;
711     }
712   else
713     result = FALSE;
714   g_mutex_unlock (g_atomic_mutex);
715
716   return result;
717 }
718
719 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
720 gint
721 g_atomic_int_get (volatile gint *atomic)
722 {
723   gint result;
724
725   g_mutex_lock (g_atomic_mutex);
726   result = *atomic;
727   g_mutex_unlock (g_atomic_mutex);
728
729   return result;
730 }
731
732 void
733 g_atomic_int_set (volatile gint *atomic,
734                   gint           newval)
735 {
736   g_mutex_lock (g_atomic_mutex);
737   *atomic = newval;
738   g_mutex_unlock (g_atomic_mutex);
739 }
740
741 gpointer
742 g_atomic_pointer_get (volatile gpointer *atomic)
743 {
744   gpointer result;
745
746   g_mutex_lock (g_atomic_mutex);
747   result = *atomic;
748   g_mutex_unlock (g_atomic_mutex);
749
750   return result;
751 }
752
753 void
754 g_atomic_pointer_set (volatile gpointer *atomic,
755                       gpointer           newval)
756 {
757   g_mutex_lock (g_atomic_mutex);
758   *atomic = newval;
759   g_mutex_unlock (g_atomic_mutex);
760 }
761 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
762 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
763 gint
764 g_atomic_int_get (volatile gint *atomic)
765 {
766   G_ATOMIC_MEMORY_BARRIER;
767   return *atomic;
768 }
769
770 void
771 g_atomic_int_set (volatile gint *atomic,
772                   gint           newval)
773 {
774   *atomic = newval;
775   G_ATOMIC_MEMORY_BARRIER; 
776 }
777
778 gpointer
779 g_atomic_pointer_get (volatile gpointer *atomic)
780 {
781   G_ATOMIC_MEMORY_BARRIER;
782   return *atomic;
783 }   
784
785 void
786 g_atomic_pointer_set (volatile gpointer *atomic,
787                       gpointer           newval)
788 {
789   *atomic = newval;
790   G_ATOMIC_MEMORY_BARRIER; 
791 }
792 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
793
794 #ifdef ATOMIC_INT_CMP_XCHG
795 gboolean
796 g_atomic_int_compare_and_exchange (volatile gint *atomic,
797                                    gint           oldval,
798                                    gint           newval)
799 {
800   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
801 }
802
803 gint
804 g_atomic_int_exchange_and_add (volatile gint *atomic,
805                                gint           val)
806 {
807   gint result;
808   do
809     result = *atomic;
810   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
811
812   return result;
813 }
814  
815 void
816 g_atomic_int_add (volatile gint *atomic,
817                   gint           val)
818 {
819   gint result;
820   do
821     result = *atomic;
822   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
823 }
824 #endif /* ATOMIC_INT_CMP_XCHG */
825
826 void 
827 _g_atomic_thread_init (void)
828 {
829 #ifdef DEFINE_WITH_MUTEXES
830   g_atomic_mutex = g_mutex_new ();
831 #endif /* DEFINE_WITH_MUTEXES */
832 }
833
834 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
835 gint
836 (g_atomic_int_get) (volatile gint *atomic)
837 {
838   return g_atomic_int_get (atomic);
839 }
840
841 void
842 (g_atomic_int_set) (volatile gint *atomic,
843                     gint           newval)
844 {
845   g_atomic_int_set (atomic, newval);
846 }
847
848 gpointer
849 (g_atomic_pointer_get) (volatile gpointer *atomic)
850 {
851   return g_atomic_pointer_get (atomic);
852 }
853
854 void
855 (g_atomic_pointer_set) (volatile gpointer *atomic,
856                         gpointer           newval)
857 {
858   g_atomic_pointer_set (atomic, newval);
859 }
860 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
861
862 #define __G_ATOMIC_C__
863 #include "galiasdef.c"