Recreate the navit git/gerrit project that vanished
[profile/ivi/navit.git] / navit / support / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  * Copyright (C) 2007 Nokia Corporation
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21  * Boston, MA 02111-1307, USA.
22  */
23
24 #include "config.h"
25
26 #if defined (G_ATOMIC_ARM)
27 #include <sched.h>
28 #endif
29
30 #include "glib.h"
31 #include "gthreadprivate.h"
32 #include "galias.h"
33
34 # if HAVE_API_WIN32_BASE
35 #include <windows.h>
36 #endif
37
38 #if USE_POSIX_THREADS
39 #include <pthread.h>
40 #endif
41
42 #if defined (__GNUC__)
43 # if defined (G_ATOMIC_I486)
44 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
45  */
46 gint
47 g_atomic_int_exchange_and_add (volatile gint *atomic,
48                                gint           val)
49 {
50   gint result;
51
52   __asm__ __volatile__ ("lock; xaddl %0,%1"
53                         : "=r" (result), "=m" (*atomic)
54                         : "0" (val), "m" (*atomic));
55   return result;
56 }
57
58 void
59 g_atomic_int_add (volatile gint *atomic,
60                   gint           val)
61 {
62   __asm__ __volatile__ ("lock; addl %1,%0"
63                         : "=m" (*atomic)
64                         : "ir" (val), "m" (*atomic));
65 }
66
67 gboolean
68 g_atomic_int_compare_and_exchange (volatile gint *atomic,
69                                    gint           oldval,
70                                    gint           newval)
71 {
72   gint result;
73
74   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
75                         : "=a" (result), "=m" (*atomic)
76                         : "r" (newval), "m" (*atomic), "0" (oldval));
77
78   return result == oldval;
79 }
80
81 /* The same code as above, as on i386 gpointer is 32 bit as well.
82  * Duplicating the code here seems more natural than casting the
83  * arguments and calling the former function */
84
85 gboolean
86 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
87                                        gpointer           oldval,
88                                        gpointer           newval)
89 {
90   gpointer result;
91
92   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
93                         : "=a" (result), "=m" (*atomic)
94                         : "r" (newval), "m" (*atomic), "0" (oldval));
95
96   return result == oldval;
97 }
98
99 # elif defined (G_ATOMIC_SPARCV9)
100 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
101  */
102 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
103   ({                                                                    \
104      gint __result;                                                     \
105      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
106                            : "=r" (__result), "=m" (*(atomic))          \
107                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
108                            "0" (newval));                               \
109      __result == oldval;                                                \
110   })
111
112 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
113 gboolean
114 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
115                                        gpointer           oldval,
116                                        gpointer           newval)
117 {
118   gpointer result;
119   __asm__ __volatile__ ("cas [%4], %2, %0"
120                         : "=r" (result), "=m" (*atomic)
121                         : "r" (oldval), "m" (*atomic), "r" (atomic),
122                         "0" (newval));
123   return result == oldval;
124 }
125 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
126 gboolean
127 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
128                                        gpointer           oldval,
129                                        gpointer           newval)
130 {
131   gpointer result;
132   gpointer *a = atomic;
133   __asm__ __volatile__ ("casx [%4], %2, %0"
134                         : "=r" (result), "=m" (*a)
135                         : "r" (oldval), "m" (*a), "r" (a),
136                         "0" (newval));
137   return result == oldval;
138 }
139 #  else /* What's that */
140 #    error "Your system has an unsupported pointer size"
141 #  endif /* GLIB_SIZEOF_VOID_P */
142 #  define G_ATOMIC_MEMORY_BARRIER                                       \
143   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
144                         " | #StoreLoad | #StoreStore" : : : "memory")
145
146 # elif defined (G_ATOMIC_ALPHA)
147 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
148  */
149 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
150   ({                                                                    \
151      gint __result;                                                     \
152      gint __prev;                                                       \
153      __asm__ __volatile__ (                                             \
154         "       mb\n"                                                   \
155         "1:     ldl_l   %0,%2\n"                                        \
156         "       cmpeq   %0,%3,%1\n"                                     \
157         "       beq     %1,2f\n"                                        \
158         "       mov     %4,%1\n"                                        \
159         "       stl_c   %1,%2\n"                                        \
160         "       beq     %1,1b\n"                                        \
161         "       mb\n"                                                   \
162         "2:"                                                            \
163         : "=&r" (__prev),                                               \
164           "=&r" (__result)                                              \
165         : "m" (*(atomic)),                                              \
166           "Ir" (oldval),                                                \
167           "Ir" (newval)                                                 \
168         : "memory");                                                    \
169      __result != 0;                                                     \
170   })
171 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
172 gboolean
173 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
174                                        gpointer           oldval,
175                                        gpointer           newval)
176 {
177   gint result;
178   gpointer prev;
179   __asm__ __volatile__ (
180         "       mb\n"
181         "1:     ldl_l   %0,%2\n"
182         "       cmpeq   %0,%3,%1\n"
183         "       beq     %1,2f\n"
184         "       mov     %4,%1\n"
185         "       stl_c   %1,%2\n"
186         "       beq     %1,1b\n"
187         "       mb\n"
188         "2:"
189         : "=&r" (prev),
190           "=&r" (result)
191         : "m" (*atomic),
192           "Ir" (oldval),
193           "Ir" (newval)
194         : "memory");
195   return result != 0;
196 }
197 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
198 gboolean
199 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
200                                        gpointer           oldval,
201                                        gpointer           newval)
202 {
203   gint result;
204   gpointer prev;
205   __asm__ __volatile__ (
206         "       mb\n"
207         "1:     ldq_l   %0,%2\n"
208         "       cmpeq   %0,%3,%1\n"
209         "       beq     %1,2f\n"
210         "       mov     %4,%1\n"
211         "       stq_c   %1,%2\n"
212         "       beq     %1,1b\n"
213         "       mb\n"
214         "2:"
215         : "=&r" (prev),
216           "=&r" (result)
217         : "m" (*atomic),
218           "Ir" (oldval),
219           "Ir" (newval)
220         : "memory");
221   return result != 0;
222 }
223 #  else /* What's that */
224 #   error "Your system has an unsupported pointer size"
225 #  endif /* GLIB_SIZEOF_VOID_P */
226 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
227 # elif defined (G_ATOMIC_X86_64)
228 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
229  */
230 gint
231 g_atomic_int_exchange_and_add (volatile gint *atomic,
232                                gint           val)
233 {
234   gint result;
235
236   __asm__ __volatile__ ("lock; xaddl %0,%1"
237                         : "=r" (result), "=m" (*atomic)
238                         : "0" (val), "m" (*atomic));
239   return result;
240 }
241
242 void
243 g_atomic_int_add (volatile gint *atomic,
244                   gint           val)
245 {
246   __asm__ __volatile__ ("lock; addl %1,%0"
247                         : "=m" (*atomic)
248                         : "ir" (val), "m" (*atomic));
249 }
250
251 gboolean
252 g_atomic_int_compare_and_exchange (volatile gint *atomic,
253                                    gint           oldval,
254                                    gint           newval)
255 {
256   gint result;
257
258   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
259                         : "=a" (result), "=m" (*atomic)
260                         : "r" (newval), "m" (*atomic), "0" (oldval));
261
262   return result == oldval;
263 }
264
265 gboolean
266 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
267                                        gpointer           oldval,
268                                        gpointer           newval)
269 {
270   gpointer result;
271
272   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
273                         : "=a" (result), "=m" (*atomic)
274                         : "r" (newval), "m" (*atomic), "0" (oldval));
275
276   return result == oldval;
277 }
278
279 # elif defined (G_ATOMIC_POWERPC)
280 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
281  * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
282  * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
283  */
284 #   ifdef __OPTIMIZE__
285 /* Non-optimizing compile bails on the following two asm statements
286  * for reasons unknown to the author */
287 gint
288 g_atomic_int_exchange_and_add (volatile gint *atomic,
289                                gint           val)
290 {
291   gint result, temp;
292 #if ASM_NUMERIC_LABELS
293   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
294                         "         add     %1,%0,%4\n"
295                         "         stwcx.  %1,0,%3\n"
296                         "         bne-    1b"
297                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
298                         : "b" (atomic), "r" (val), "m" (*atomic)
299                         : "cr0", "memory");
300 #else
301   __asm__ __volatile__ (".Lieaa%=:       lwarx   %0,0,%3\n"
302                         "         add     %1,%0,%4\n"
303                         "         stwcx.  %1,0,%3\n"
304                         "         bne-    .Lieaa%="
305                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
306                         : "b" (atomic), "r" (val), "m" (*atomic)
307                         : "cr0", "memory");
308 #endif
309   return result;
310 }
311
312 /* The same as above, to save a function call repeated here */
313 void
314 g_atomic_int_add (volatile gint *atomic,
315                   gint           val)
316 {
317   gint result, temp;
318 #if ASM_NUMERIC_LABELS
319   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
320                         "         add     %1,%0,%4\n"
321                         "         stwcx.  %1,0,%3\n"
322                         "         bne-    1b"
323                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
324                         : "b" (atomic), "r" (val), "m" (*atomic)
325                         : "cr0", "memory");
326 #else
327   __asm__ __volatile__ (".Lia%=:       lwarx   %0,0,%3\n"
328                         "         add     %1,%0,%4\n"
329                         "         stwcx.  %1,0,%3\n"
330                         "         bne-    .Lia%="
331                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
332                         : "b" (atomic), "r" (val), "m" (*atomic)
333                         : "cr0", "memory");
334 #endif
335 }
336 #   else /* !__OPTIMIZE__ */
337 gint
338 g_atomic_int_exchange_and_add (volatile gint *atomic,
339                                gint           val)
340 {
341   gint result;
342   do
343     result = *atomic;
344   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
345
346   return result;
347 }
348
349 void
350 g_atomic_int_add (volatile gint *atomic,
351                   gint           val)
352 {
353   gint result;
354   do
355     result = *atomic;
356   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
357 }
358 #   endif /* !__OPTIMIZE__ */
359
360 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
361 gboolean
362 g_atomic_int_compare_and_exchange (volatile gint *atomic,
363                                    gint           oldval,
364                                    gint           newval)
365 {
366   gint result;
367 #if ASM_NUMERIC_LABELS
368   __asm__ __volatile__ ("sync\n"
369                         "1: lwarx   %0,0,%1\n"
370                         "   subf.   %0,%2,%0\n"
371                         "   bne     2f\n"
372                         "   stwcx.  %3,0,%1\n"
373                         "   bne-    1b\n"
374                         "2: isync"
375                         : "=&r" (result)
376                         : "b" (atomic), "r" (oldval), "r" (newval)
377                         : "cr0", "memory");
378 #else
379   __asm__ __volatile__ ("sync\n"
380                         ".L1icae%=: lwarx   %0,0,%1\n"
381                         "   subf.   %0,%2,%0\n"
382                         "   bne     .L2icae%=\n"
383                         "   stwcx.  %3,0,%1\n"
384                         "   bne-    .L1icae%=\n"
385                         ".L2icae%=: isync"
386                         : "=&r" (result)
387                         : "b" (atomic), "r" (oldval), "r" (newval)
388                         : "cr0", "memory");
389 #endif
390   return result == 0;
391 }
392
393 gboolean
394 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
395                                        gpointer           oldval,
396                                        gpointer           newval)
397 {
398   gpointer result;
399 #if ASM_NUMERIC_LABELS
400   __asm__ __volatile__ ("sync\n"
401                         "1: lwarx   %0,0,%1\n"
402                         "   subf.   %0,%2,%0\n"
403                         "   bne     2f\n"
404                         "   stwcx.  %3,0,%1\n"
405                         "   bne-    1b\n"
406                         "2: isync"
407                         : "=&r" (result)
408                         : "b" (atomic), "r" (oldval), "r" (newval)
409                         : "cr0", "memory");
410 #else
411   __asm__ __volatile__ ("sync\n"
412                         ".L1pcae%=: lwarx   %0,0,%1\n"
413                         "   subf.   %0,%2,%0\n"
414                         "   bne     .L2pcae%=\n"
415                         "   stwcx.  %3,0,%1\n"
416                         "   bne-    .L1pcae%=\n"
417                         ".L2pcae%=: isync"
418                         : "=&r" (result)
419                         : "b" (atomic), "r" (oldval), "r" (newval)
420                         : "cr0", "memory");
421 #endif
422   return result == 0;
423 }
424 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
425 gboolean
426 g_atomic_int_compare_and_exchange (volatile gint *atomic,
427                                    gint           oldval,
428                                    gint           newval)
429 {
430   gpointer result;
431 #if ASM_NUMERIC_LABELS
432   __asm__ __volatile__ ("sync\n"
433                         "1: lwarx   %0,0,%1\n"
434                         "   extsw   %0,%0\n"
435                         "   subf.   %0,%2,%0\n"
436                         "   bne     2f\n"
437                         "   stwcx.  %3,0,%1\n"
438                         "   bne-    1b\n"
439                         "2: isync"
440                         : "=&r" (result)
441                         : "b" (atomic), "r" (oldval), "r" (newval)
442                         : "cr0", "memory");
443 #else
444   __asm__ __volatile__ ("sync\n"
445                         ".L1icae%=: lwarx   %0,0,%1\n"
446                         "   extsw   %0,%0\n"
447                         "   subf.   %0,%2,%0\n"
448                         "   bne     .L2icae%=\n"
449                         "   stwcx.  %3,0,%1\n"
450                         "   bne-    .L1icae%=\n"
451                         ".L2icae%=: isync"
452                         : "=&r" (result)
453                         : "b" (atomic), "r" (oldval), "r" (newval)
454                         : "cr0", "memory");
455 #endif
456   return result == 0;
457 }
458
459 gboolean
460 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
461                                        gpointer           oldval,
462                                        gpointer           newval)
463 {
464   gpointer result;
465 #if ASM_NUMERIC_LABELS
466   __asm__ __volatile__ ("sync\n"
467                         "1: ldarx   %0,0,%1\n"
468                         "   subf.   %0,%2,%0\n"
469                         "   bne     2f\n"
470                         "   stdcx.  %3,0,%1\n"
471                         "   bne-    1b\n"
472                         "2: isync"
473                         : "=&r" (result)
474                         : "b" (atomic), "r" (oldval), "r" (newval)
475                         : "cr0", "memory");
476 #else
477   __asm__ __volatile__ ("sync\n"
478                         ".L1pcae%=: ldarx   %0,0,%1\n"
479                         "   subf.   %0,%2,%0\n"
480                         "   bne     .L2pcae%=\n"
481                         "   stdcx.  %3,0,%1\n"
482                         "   bne-    .L1pcae%=\n"
483                         ".L2pcae%=: isync"
484                         : "=&r" (result)
485                         : "b" (atomic), "r" (oldval), "r" (newval)
486                         : "cr0", "memory");
487 #endif
488   return result == 0;
489 }
490 #  else /* What's that */
491 #   error "Your system has an unsupported pointer size"
492 #  endif /* GLIB_SIZEOF_VOID_P */
493
494 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
495
496 # elif defined (G_ATOMIC_IA64)
497 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
498  */
499 gint
500 g_atomic_int_exchange_and_add (volatile gint *atomic,
501                                gint           val)
502 {
503   return __sync_fetch_and_add (atomic, val);
504 }
505
506 void
507 g_atomic_int_add (volatile gint *atomic,
508                   gint val)
509 {
510   __sync_fetch_and_add (atomic, val);
511 }
512
513 gboolean
514 g_atomic_int_compare_and_exchange (volatile gint *atomic,
515                                    gint           oldval,
516                                    gint           newval)
517 {
518   return __sync_bool_compare_and_swap (atomic, oldval, newval);
519 }
520
521 gboolean
522 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
523                                        gpointer           oldval,
524                                        gpointer           newval)
525 {
526   return __sync_bool_compare_and_swap ((long *)atomic,
527                                        (long)oldval, (long)newval);
528 }
529
530 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
531 # elif defined (G_ATOMIC_S390)
532 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
533  */
534 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
535   ({                                                                    \
536      gint __result = oldval;                                    \
537      __asm__ __volatile__ ("cs %0, %2, %1"                              \
538                            : "+d" (__result), "=Q" (*(atomic))          \
539                            : "d" (newval), "m" (*(atomic)) : "cc" );    \
540      __result == oldval;                                                \
541   })
542
543 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
544 gboolean
545 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
546                                        gpointer           oldval,
547                                        gpointer           newval)
548 {
549   gpointer result = oldval;
550   __asm__ __volatile__ ("cs %0, %2, %1"
551                         : "+d" (result), "=Q" (*(atomic))
552                         : "d" (newval), "m" (*(atomic)) : "cc" );
553   return result == oldval;
554 }
555 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
556 gboolean
557 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
558                                        gpointer           oldval,
559                                        gpointer           newval)
560 {
561   gpointer result = oldval;
562   gpointer *a = atomic;
563   __asm__ __volatile__ ("csg %0, %2, %1"
564                         : "+d" (result), "=Q" (*a)
565                         : "d" ((long)(newval)), "m" (*a) : "cc" );
566   return result == oldval;
567 }
568 #  else /* What's that */
569 #    error "Your system has an unsupported pointer size"
570 #  endif /* GLIB_SIZEOF_VOID_P */
571 # elif defined (G_ATOMIC_ARM)
572 static volatile int atomic_spin = 0;
573
574 static int atomic_spin_trylock (void)
575 {
576   int result;
577
578   asm volatile (
579     "swp %0, %1, [%2]\n"
580     : "=&r,&r" (result)
581     : "r,0" (1), "r,r" (&atomic_spin)
582     : "memory");
583   if (result == 0)
584     return 0;
585   else
586     return -1;
587 }
588
589 static void atomic_spin_lock (void)
590 {
591   while (atomic_spin_trylock())
592     sched_yield();
593 }
594
595 static void atomic_spin_unlock (void)
596 {
597   atomic_spin = 0;
598 }
599
600 gint
601 g_atomic_int_exchange_and_add (volatile gint *atomic,
602                                gint           val)
603 {
604   gint result;
605
606   atomic_spin_lock();
607   result = *atomic;
608   *atomic += val;
609   atomic_spin_unlock();
610
611   return result;
612 }
613
614 void
615 g_atomic_int_add (volatile gint *atomic,
616                   gint           val)
617 {
618   atomic_spin_lock();
619   *atomic += val;
620   atomic_spin_unlock();
621 }
622
623 gboolean
624 g_atomic_int_compare_and_exchange (volatile gint *atomic,
625                                    gint           oldval,
626                                    gint           newval)
627 {
628   gboolean result;
629
630   atomic_spin_lock();
631   if (*atomic == oldval)
632     {
633       result = TRUE;
634       *atomic = newval;
635     }
636   else
637     result = FALSE;
638   atomic_spin_unlock();
639
640   return result;
641 }
642
643 gboolean
644 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
645                                        gpointer           oldval,
646                                        gpointer           newval)
647 {
648   gboolean result;
649
650   atomic_spin_lock();
651   if (*atomic == oldval)
652     {
653       result = TRUE;
654       *atomic = newval;
655     }
656   else
657     result = FALSE;
658   atomic_spin_unlock();
659
660   return result;
661 }
662 # elif defined(G_PLATFORM_WIN32)
663 #  define DEFINE_WITH_WIN32_INTERLOCKED
664 # else
665 #  define DEFINE_WITH_MUTEXES
666 # endif /* G_ATOMIC_IA64 */
667 #else /* !__GNUC__ */
668 # ifdef G_PLATFORM_WIN32
669 #  define DEFINE_WITH_WIN32_INTERLOCKED
670 # else
671 #  define DEFINE_WITH_MUTEXES
672 # endif
673 #endif /* __GNUC__ */
674
675 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
676 # include <windows.h>
677 /* Following indicates that InterlockedCompareExchangePointer is
678  * declared in winbase.h (included by windows.h) and needs to be
679  * commented out if not true. It is defined iff WINVER > 0x0400,
680  * which is usually correct but can be wrong if WINVER is set before
681  * windows.h is included.
682  */
683 # if WINVER > 0x0400
684 #  define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
685 # endif
686
687 gint32
688 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
689                                gint32           val)
690 {
691   return InterlockedExchangeAdd (atomic, val);
692 }
693
694 void
695 g_atomic_int_add (volatile gint32 *atomic,
696                   gint32           val)
697 {
698   InterlockedExchangeAdd (atomic, val);
699 }
700
701 gboolean
702 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
703                                    gint32           oldval,
704                                    gint32           newval)
705 {
706 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
707   return (guint32) InterlockedCompareExchange ((PVOID*)atomic,
708                                                (PVOID)newval,
709                                                (PVOID)oldval) == oldval;
710 #else
711   return InterlockedCompareExchange (atomic,
712                                      newval,
713                                      oldval) == oldval;
714 #endif
715 }
716
717 gboolean
718 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
719                                        gpointer           oldval,
720                                        gpointer           newval)
721 {
722 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
723   return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
724 # else
725 #  if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
726 #   error "InterlockedCompareExchangePointer needed"
727 #  else
728    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
729 #  endif
730 # endif
731 }
732 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
733
734 #ifdef DEFINE_WITH_MUTEXES
735 /* We have to use the slow, but safe locking method */
736 static GMutex *g_atomic_mutex;
737
738 gint
739 g_atomic_int_exchange_and_add (volatile gint *atomic,
740                                gint           val)
741 {
742   gint result;
743
744   g_mutex_lock (g_atomic_mutex);
745   result = *atomic;
746   *atomic += val;
747   g_mutex_unlock (g_atomic_mutex);
748
749   return result;
750 }
751
752
753 void
754 g_atomic_int_add (volatile gint *atomic,
755                   gint           val)
756 {
757   g_mutex_lock (g_atomic_mutex);
758   *atomic += val;
759   g_mutex_unlock (g_atomic_mutex);
760 }
761
762 gboolean
763 g_atomic_int_compare_and_exchange (volatile gint *atomic,
764                                    gint           oldval,
765                                    gint           newval)
766 {
767   gboolean result;
768
769   g_mutex_lock (g_atomic_mutex);
770   if (*atomic == oldval)
771     {
772       result = TRUE;
773       *atomic = newval;
774     }
775   else
776     result = FALSE;
777   g_mutex_unlock (g_atomic_mutex);
778
779   return result;
780 }
781
782 gboolean
783 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
784                                        gpointer           oldval,
785                                        gpointer           newval)
786 {
787   gboolean result;
788
789   g_mutex_lock (g_atomic_mutex);
790   if (*atomic == oldval)
791     {
792       result = TRUE;
793       *atomic = newval;
794     }
795   else
796     result = FALSE;
797   g_mutex_unlock (g_atomic_mutex);
798
799   return result;
800 }
801
802 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
803 gint
804 g_atomic_int_get (volatile gint *atomic)
805 {
806   gint result;
807
808   g_mutex_lock (g_atomic_mutex);
809   result = *atomic;
810   g_mutex_unlock (g_atomic_mutex);
811
812   return result;
813 }
814
815 void
816 g_atomic_int_set (volatile gint *atomic,
817                   gint           newval)
818 {
819   g_mutex_lock (g_atomic_mutex);
820   *atomic = newval;
821   g_mutex_unlock (g_atomic_mutex);
822 }
823
824 gpointer
825 g_atomic_pointer_get (volatile gpointer *atomic)
826 {
827   gpointer result;
828
829   g_mutex_lock (g_atomic_mutex);
830   result = *atomic;
831   g_mutex_unlock (g_atomic_mutex);
832
833   return result;
834 }
835
836 void
837 g_atomic_pointer_set (volatile gpointer *atomic,
838                       gpointer           newval)
839 {
840   g_mutex_lock (g_atomic_mutex);
841   *atomic = newval;
842   g_mutex_unlock (g_atomic_mutex);
843 }
844 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
845 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
846 gint
847 g_atomic_int_get (volatile gint *atomic)
848 {
849   G_ATOMIC_MEMORY_BARRIER;
850   return *atomic;
851 }
852
853 void
854 g_atomic_int_set (volatile gint *atomic,
855                   gint           newval)
856 {
857   *atomic = newval;
858   G_ATOMIC_MEMORY_BARRIER;
859 }
860
861 gpointer
862 g_atomic_pointer_get (volatile gpointer *atomic)
863 {
864   G_ATOMIC_MEMORY_BARRIER;
865   return *atomic;
866 }
867
868 void
869 g_atomic_pointer_set (volatile gpointer *atomic,
870                       gpointer           newval)
871 {
872   *atomic = newval;
873   G_ATOMIC_MEMORY_BARRIER;
874 }
875 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
876
877 #ifdef ATOMIC_INT_CMP_XCHG
878 gboolean
879 g_atomic_int_compare_and_exchange (volatile gint *atomic,
880                                    gint           oldval,
881                                    gint           newval)
882 {
883   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
884 }
885
886 gint
887 g_atomic_int_exchange_and_add (volatile gint *atomic,
888                                gint           val)
889 {
890   gint result;
891   do
892     result = *atomic;
893   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
894
895   return result;
896 }
897
898 void
899 g_atomic_int_add (volatile gint *atomic,
900                   gint           val)
901 {
902   gint result;
903   do
904     result = *atomic;
905   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
906 }
907 #endif /* ATOMIC_INT_CMP_XCHG */
908
909 void
910 _g_atomic_thread_init (void)
911 {
912 #ifdef DEFINE_WITH_MUTEXES
913   g_atomic_mutex = g_mutex_new ();
914 #endif /* DEFINE_WITH_MUTEXES */
915 }
916
917 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
918 gint
919 (g_atomic_int_get) (volatile gint *atomic)
920 {
921   return g_atomic_int_get (atomic);
922 }
923
924 void
925 (g_atomic_int_set) (volatile gint *atomic,
926                     gint           newval)
927 {
928   g_atomic_int_set (atomic, newval);
929 }
930
931 gpointer
932 (g_atomic_pointer_get) (volatile gpointer *atomic)
933 {
934   return g_atomic_pointer_get (atomic);
935 }
936
937 void
938 (g_atomic_pointer_set) (volatile gpointer *atomic,
939                         gpointer           newval)
940 {
941   g_atomic_pointer_set (atomic, newval);
942 }
943 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
944
945 #define __G_ATOMIC_C__
946 #include "galiasdef.c"