Add a missing volatile for the IA64 implementation.
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 02111-1307, USA.
21  */
22  
23 #include "config.h"
24
25 #include "glib.h"
26 #include "gthreadinit.h"
27 #include "galias.h"
28
29 #if defined (__GNUC__)
30 # if defined (G_ATOMIC_I486)
31 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
32  */
33 gint
34 g_atomic_int_exchange_and_add (volatile gint *atomic, 
35                                gint           val)
36 {
37   gint result;
38
39   __asm__ __volatile__ ("lock; xaddl %0,%1"
40                         : "=r" (result), "=m" (*atomic) 
41                         : "0" (val), "m" (*atomic));
42   return result;
43 }
44  
45 void
46 g_atomic_int_add (volatile gint *atomic, 
47                   gint           val)
48 {
49   __asm__ __volatile__ ("lock; addl %1,%0"
50                         : "=m" (*atomic) 
51                         : "ir" (val), "m" (*atomic));
52 }
53
54 gboolean
55 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
56                                    gint           oldval, 
57                                    gint           newval)
58 {
59   gint result;
60  
61   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
62                         : "=a" (result), "=m" (*atomic)
63                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
64
65   return result == oldval;
66 }
67
68 /* The same code as above, as on i386 gpointer is 32 bit as well.
69  * Duplicating the code here seems more natural than casting the
70  * arguments and calling the former function */
71
72 gboolean
73 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
74                                        gpointer           oldval, 
75                                        gpointer           newval)
76 {
77   gpointer result;
78  
79   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
80                         : "=a" (result), "=m" (*atomic)
81                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
82
83   return result == oldval;
84 }
85
86 # elif defined (G_ATOMIC_SPARCV9)
87 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
88  */
89 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
90   ({                                                                    \
91      gint __result;                                                     \
92      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
93                            : "=r" (__result), "=m" (*(atomic))          \
94                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
95                            "0" (newval));                               \
96      __result == oldval;                                                \
97   })
98
99 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
100 gboolean
101 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
102                                        gpointer           oldval, 
103                                        gpointer           newval)
104 {
105   gpointer result;
106   __asm__ __volatile__ ("cas [%4], %2, %0"
107                         : "=r" (result), "=m" (*atomic)
108                         : "r" (oldval), "m" (*atomic), "r" (atomic),
109                         "0" (newval));
110   return result == oldval;
111 }
112 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
113 gboolean
114 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
115                                        gpointer           oldval, 
116                                        gpointer           newval)
117 {
118   gpointer result;
119   gpointer *a = atomic;
120   __asm__ __volatile__ ("casx [%4], %2, %0"
121                         : "=r" (result), "=m" (*a)
122                         : "r" (oldval), "m" (*a), "r" (a),
123                         "0" (newval));
124   return result == oldval;
125 }
126 #  else /* What's that */
127 #    error "Your system has an unsupported pointer size"
128 #  endif /* GLIB_SIZEOF_VOID_P */
129 #  define G_ATOMIC_MEMORY_BARRIER                                       \
130   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
131                         " | #StoreLoad | #StoreStore" : : : "memory")
132
133 # elif defined (G_ATOMIC_ALPHA)
134 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
135  */
136 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
137   ({                                                                    \
138      gint __result;                                                     \
139      gint __prev;                                                       \
140      __asm__ __volatile__ (                                             \
141         "       mb\n"                                                   \
142         "1:     ldl_l   %0,%2\n"                                        \
143         "       cmpeq   %0,%3,%1\n"                                     \
144         "       beq     %1,2f\n"                                        \
145         "       mov     %4,%1\n"                                        \
146         "       stl_c   %1,%2\n"                                        \
147         "       beq     %1,1b\n"                                        \
148         "       mb\n"                                                   \
149         "2:"                                                            \
150         : "=&r" (__prev),                                               \
151           "=&r" (__result)                                              \
152         : "m" (*(atomic)),                                              \
153           "Ir" (oldval),                                                \
154           "Ir" (newval)                                                 \
155         : "memory");                                                    \
156      __result != 0;                                                     \
157   })
158 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
159 gboolean
160 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
161                                        gpointer           oldval, 
162                                        gpointer           newval)
163 {
164   gint result;
165   gpointer prev;
166   __asm__ __volatile__ (
167         "       mb\n"
168         "1:     ldl_l   %0,%2\n"
169         "       cmpeq   %0,%3,%1\n"
170         "       beq     %1,2f\n"
171         "       mov     %4,%1\n"
172         "       stl_c   %1,%2\n"
173         "       beq     %1,1b\n"
174         "       mb\n"
175         "2:"
176         : "=&r" (prev), 
177           "=&r" (result)
178         : "m" (*atomic),
179           "Ir" (oldval),
180           "Ir" (newval)
181         : "memory");
182   return result != 0;
183 }
184 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
185 gboolean
186 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
187                                        gpointer           oldval, 
188                                        gpointer           newval)
189 {
190   gint result;
191   gpointer prev;
192   __asm__ __volatile__ (
193         "       mb\n"
194         "1:     ldq_l   %0,%2\n"
195         "       cmpeq   %0,%3,%1\n"
196         "       beq     %1,2f\n"
197         "       mov     %4,%1\n"
198         "       stq_c   %1,%2\n"
199         "       beq     %1,1b\n"
200         "       mb\n"
201         "2:"
202         : "=&r" (prev), 
203           "=&r" (result)
204         : "m" (*atomic),
205           "Ir" (oldval),
206           "Ir" (newval)
207         : "memory");
208   return result != 0;
209 }
210 #  else /* What's that */
211 #   error "Your system has an unsupported pointer size"
212 #  endif /* GLIB_SIZEOF_VOID_P */
213 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
214 # elif defined (G_ATOMIC_X86_64)
215 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
216  */
217 gint
218 g_atomic_int_exchange_and_add (volatile gint *atomic,
219                                gint           val)
220 {
221   gint result;
222
223   __asm__ __volatile__ ("lock; xaddl %0,%1"
224                         : "=r" (result), "=m" (*atomic) 
225                         : "0" (val), "m" (*atomic));
226   return result;
227 }
228  
229 void
230 g_atomic_int_add (volatile gint *atomic, 
231                   gint           val)
232 {
233   __asm__ __volatile__ ("lock; addl %1,%0"
234                         : "=m" (*atomic) 
235                         : "ir" (val), "m" (*atomic));
236 }
237
238 gboolean
239 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
240                                    gint           oldval, 
241                                    gint           newval)
242 {
243   gint result;
244  
245   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
246                         : "=a" (result), "=m" (*atomic)
247                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
248
249   return result == oldval;
250 }
251
252 gboolean
253 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
254                                        gpointer           oldval, 
255                                        gpointer           newval)
256 {
257   gpointer result;
258  
259   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
260                         : "=a" (result), "=m" (*atomic)
261                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
262
263   return result == oldval;
264 }
265
266 # elif defined (G_ATOMIC_POWERPC)
267 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h 
268  * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
269  * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
270  */
271 #   ifdef __OPTIMIZE__
272 /* Non-optimizing compile bails on the following two asm statements
273  * for reasons unknown to the author */
274 gint
275 g_atomic_int_exchange_and_add (volatile gint *atomic, 
276                                gint           val)
277 {
278   gint result, temp;
279   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
280                         "         add     %1,%0,%4\n"
281                         "         stwcx.  %1,0,%3\n"
282                         "         bne-    1b"
283                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
284                         : "b" (atomic), "r" (val), "m" (*atomic)
285                         : "cr0", "memory");
286   return result;
287 }
288  
289 /* The same as above, to save a function call repeated here */
290 void
291 g_atomic_int_add (volatile gint *atomic, 
292                   gint           val)
293 {
294   gint result, temp;  
295   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
296                         "         add     %1,%0,%4\n"
297                         "         stwcx.  %1,0,%3\n"
298                         "         bne-    1b"
299                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
300                         : "b" (atomic), "r" (val), "m" (*atomic)
301                         : "cr0", "memory");
302 }
303 #   else /* !__OPTIMIZE__ */
304 gint
305 g_atomic_int_exchange_and_add (volatile gint *atomic, 
306                                gint           val)
307 {
308   gint result;
309   do
310     result = *atomic;
311   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
312
313   return result;
314 }
315  
316 void
317 g_atomic_int_add (volatile gint *atomic,
318                   gint           val)
319 {
320   gint result;
321   do
322     result = *atomic;
323   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
324 }
325 #   endif /* !__OPTIMIZE__ */
326
327 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
328 gboolean
329 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
330                                    gint           oldval, 
331                                    gint           newval)
332 {
333   gint result;
334   __asm__ __volatile__ ("sync\n"
335                         "1: lwarx   %0,0,%1\n"
336                         "   subf.   %0,%2,%0\n"
337                         "   bne     2f\n"
338                         "   stwcx.  %3,0,%1\n"
339                         "   bne-    1b\n"
340                         "2: isync"
341                         : "=&r" (result)
342                         : "b" (atomic), "r" (oldval), "r" (newval)
343                         : "cr0", "memory"); 
344   return result == 0;
345 }
346
347 gboolean
348 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
349                                        gpointer           oldval, 
350                                        gpointer           newval)
351 {
352   gpointer result;
353   __asm__ __volatile__ ("sync\n"
354                         "1: lwarx   %0,0,%1\n"
355                         "   subf.   %0,%2,%0\n"
356                         "   bne     2f\n"
357                         "   stwcx.  %3,0,%1\n"
358                         "   bne-    1b\n"
359                         "2: isync"
360                         : "=&r" (result)
361                         : "b" (atomic), "r" (oldval), "r" (newval)
362                         : "cr0", "memory"); 
363   return result == 0;
364 }
365 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
366 gboolean
367 g_atomic_int_compare_and_exchange (volatile gint *atomic,
368                                    gint           oldval, 
369                                    gint           newval)
370 {
371   gpointer result;
372   __asm__ __volatile__ ("sync\n"
373                         "1: lwarx   %0,0,%1\n"
374                         "   extsw   %0,%0\n"
375                         "   subf.   %0,%2,%0\n"
376                         "   bne     2f\n"
377                         "   stwcx.  %3,0,%1\n"
378                         "   bne-    1b\n"
379                         "2: isync"
380                         : "=&r" (result)
381                         : "b" (atomic), "r" (oldval), "r" (newval)
382                         : "cr0", "memory"); 
383   return result == 0;
384 }
385
386 gboolean
387 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
388                                        gpointer           oldval, 
389                                        gpointer           newval)
390 {
391   gpointer result;
392   __asm__ __volatile__ ("sync\n"
393                         "1: ldarx   %0,0,%1\n"
394                         "   subf.   %0,%2,%0\n"
395                         "   bne     2f\n"
396                         "   stdcx.  %3,0,%1\n"
397                         "   bne-    1b\n"
398                         "2: isync"
399                         : "=&r" (result)
400                         : "b" (atomic), "r" (oldval), "r" (newval)
401                         : "cr0", "memory"); 
402   return result == 0;
403 }
404 #  else /* What's that */
405 #   error "Your system has an unsupported pointer size"
406 #  endif /* GLIB_SIZEOF_VOID_P */
407
408 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
409
410 # elif defined (G_ATOMIC_IA64)
411 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
412  */
413 gint
414 g_atomic_int_exchange_and_add (volatile gint *atomic,
415                                gint           val)
416 {
417   return __sync_fetch_and_add_si (atomic, val);
418 }
419  
420 void
421 g_atomic_int_add (volatile gint *atomic, 
422                   gint val)
423 {
424   __sync_fetch_and_add_si (atomic, val);
425 }
426
427 gboolean
428 g_atomic_int_compare_and_exchange (volatile gint *atomic,
429                                    gint           oldval, 
430                                    gint           newval)
431 {
432   return __sync_bool_compare_and_swap_si (atomic, oldval, newval);
433 }
434
435 gboolean
436 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
437                                        gpointer           oldval, 
438                                        gpointer           newval)
439 {
440   return __sync_bool_compare_and_swap_di ((long *)atomic, 
441                                           (long)oldval, (long)newval);
442 }
443
444 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
445 # elif defined (G_ATOMIC_S390)
446 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
447  */
448 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
449   ({                                                                    \
450      gint __result = oldval;                                    \
451      __asm__ __volatile__ ("cs %0, %2, %1"                              \
452                            : "+d" (__result), "=Q" (*(atomic))          \
453                            : "d" (newval), "m" (*(atomic)) : "cc" );    \
454      __result == oldval;                                                \
455   })
456
457 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
458 gboolean
459 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
460                                        gpointer           oldval,
461                                        gpointer           newval)
462 {
463   gpointer result = oldval;
464   __asm__ __volatile__ ("cs %0, %2, %1"
465                         : "+d" (result), "=Q" (*(atomic))
466                         : "d" (newval), "m" (*(atomic)) : "cc" );
467   return result == oldval;
468 }
469 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
470 gboolean
471 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
472                                        gpointer           oldval,
473                                        gpointer           newval)
474 {
475   gpointer result = oldval;
476   gpointer *a = atomic;
477   __asm__ __volatile__ ("csg %0, %2, %1"
478                         : "+d" (result), "=Q" (*a)
479                         : "d" ((long)(newval)), "m" (*a) : "cc" );
480   return result == oldval;
481 }
482 #  else /* What's that */
483 #    error "Your system has an unsupported pointer size"
484 #  endif /* GLIB_SIZEOF_VOID_P */
485 # else /* !G_ATOMIC_IA64 */
486 #  define DEFINE_WITH_MUTEXES
487 # endif /* G_ATOMIC_IA64 */
488 #else /* !__GNUC__ */
489 # ifdef G_PLATFORM_WIN32
490 #  define DEFINE_WITH_WIN32_INTERLOCKED
491 # else
492 #  define DEFINE_WITH_MUTEXES
493 # endif
494 #endif /* __GNUC__ */
495
496 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
497 # include <windows.h>
498 gint32   
499 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
500                                gint32           val)
501 {
502   return InterlockedExchangeAdd (atomic, val);
503 }
504
505 void     
506 g_atomic_int_add (volatile gint32 *atomic, 
507                   gint32           val)
508 {
509   InterlockedExchangeAdd (atomic, val);
510 }
511
512 gboolean 
513 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
514                                    gint32           oldval,
515                                    gint32           newval)
516 {
517   return (guint32) InterlockedCompareExchange ((PVOID*)atomic, 
518                                                (PVOID)newval, 
519                                                (PVOID)oldval) == oldval;
520 }
521
522 gboolean 
523 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
524                                        gpointer           oldval,
525                                        gpointer           newval)
526 {
527 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
528 #  error "InterlockedCompareExchangePointer needed"
529 # else
530    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
531 # endif
532 }
533 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
534
535 #ifdef DEFINE_WITH_MUTEXES
536 /* We have to use the slow, but safe locking method */
537 static GMutex *g_atomic_mutex; 
538
539 gint
540 g_atomic_int_exchange_and_add (volatile gint *atomic, 
541                                gint           val)
542 {
543   gint result;
544     
545   g_mutex_lock (g_atomic_mutex);
546   result = *atomic;
547   *atomic += val;
548   g_mutex_unlock (g_atomic_mutex);
549
550   return result;
551 }
552
553
554 void
555 g_atomic_int_add (volatile gint *atomic,
556                   gint           val)
557 {
558   g_mutex_lock (g_atomic_mutex);
559   *atomic += val;
560   g_mutex_unlock (g_atomic_mutex);
561 }
562
563 gboolean
564 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
565                                    gint           oldval, 
566                                    gint           newval)
567 {
568   gboolean result;
569     
570   g_mutex_lock (g_atomic_mutex);
571   if (*atomic == oldval)
572     {
573       result = TRUE;
574       *atomic = newval;
575     }
576   else
577     result = FALSE;
578   g_mutex_unlock (g_atomic_mutex);
579
580   return result;
581 }
582
583 gboolean
584 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
585                                        gpointer           oldval, 
586                                        gpointer           newval)
587 {
588   gboolean result;
589     
590   g_mutex_lock (g_atomic_mutex);
591   if (*atomic == oldval)
592     {
593       result = TRUE;
594       *atomic = newval;
595     }
596   else
597     result = FALSE;
598   g_mutex_unlock (g_atomic_mutex);
599
600   return result;
601 }
602
603 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
604 gint
605 g_atomic_int_get (volatile gint *atomic)
606 {
607   gint result;
608
609   g_mutex_lock (g_atomic_mutex);
610   result = *atomic;
611   g_mutex_unlock (g_atomic_mutex);
612
613   return result;
614 }
615
616 gpointer
617 g_atomic_pointer_get (volatile gpointer *atomic)
618 {
619   gpointer result;
620
621   g_mutex_lock (g_atomic_mutex);
622   result = *atomic;
623   g_mutex_unlock (g_atomic_mutex);
624
625   return result;
626 }
627 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
628 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
629 gint
630 g_atomic_int_get (volatile gint *atomic)
631 {
632   gint result = *atomic;
633
634   G_ATOMIC_MEMORY_BARRIER;
635
636   return result;
637 }
638
639 gpointer
640 g_atomic_pointer_get (volatile gpointer *atomic)
641 {
642   gpointer result = *atomic;
643
644   G_ATOMIC_MEMORY_BARRIER;
645
646   return result;
647 }   
648 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
649
650 #ifdef ATOMIC_INT_CMP_XCHG
651 gboolean
652 g_atomic_int_compare_and_exchange (volatile gint *atomic,
653                                    gint           oldval,
654                                    gint           newval)
655 {
656   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
657 }
658
659 gint
660 g_atomic_int_exchange_and_add (volatile gint *atomic,
661                                gint           val)
662 {
663   gint result;
664   do
665     result = *atomic;
666   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
667
668   return result;
669 }
670  
671 void
672 g_atomic_int_add (volatile gint *atomic,
673                   gint           val)
674 {
675   gint result;
676   do
677     result = *atomic;
678   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
679 }
680 #endif /* ATOMIC_INT_CMP_XCHG */
681
682 void 
683 _g_atomic_thread_init (void)
684 {
685 #ifdef DEFINE_WITH_MUTEXES
686   g_atomic_mutex = g_mutex_new ();
687 #endif /* DEFINE_WITH_MUTEXES */
688 }
689
690 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
691 gint
692 (g_atomic_int_get) (volatile gint *atomic)
693 {
694   return g_atomic_int_get (atomic);
695 }
696
697 gpointer
698 (g_atomic_pointer_get) (volatile gpointer *atomic)
699 {
700   return g_atomic_pointer_get (atomic);
701 }
702 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
703
704 #define __G_ATOMIC_C__
705 #include "galiasdef.c"