Add an implementation for the CRIS and CRISv32 architectures, by Peter
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  * Copyright (C) 2007 Nokia Corporation
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21  * Boston, MA 02111-1307, USA.
22  */
23
24 #include "config.h"
25
26 #if defined (G_ATOMIC_ARM)
27 #include <sched.h>
28 #endif
29
30 #include "glib.h"
31 #include "gthreadprivate.h"
32 #include "galias.h"
33
34 #if defined (__GNUC__)
35 # if defined (G_ATOMIC_I486)
36 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
37  */
38 gint
39 g_atomic_int_exchange_and_add (volatile gint *atomic, 
40                                gint           val)
41 {
42   gint result;
43
44   __asm__ __volatile__ ("lock; xaddl %0,%1"
45                         : "=r" (result), "=m" (*atomic) 
46                         : "0" (val), "m" (*atomic));
47   return result;
48 }
49  
50 void
51 g_atomic_int_add (volatile gint *atomic, 
52                   gint           val)
53 {
54   __asm__ __volatile__ ("lock; addl %1,%0"
55                         : "=m" (*atomic) 
56                         : "ir" (val), "m" (*atomic));
57 }
58
59 gboolean
60 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
61                                    gint           oldval, 
62                                    gint           newval)
63 {
64   gint result;
65  
66   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
67                         : "=a" (result), "=m" (*atomic)
68                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
69
70   return result == oldval;
71 }
72
73 /* The same code as above, as on i386 gpointer is 32 bit as well.
74  * Duplicating the code here seems more natural than casting the
75  * arguments and calling the former function */
76
77 gboolean
78 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
79                                        gpointer           oldval, 
80                                        gpointer           newval)
81 {
82   gpointer result;
83  
84   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
85                         : "=a" (result), "=m" (*atomic)
86                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
87
88   return result == oldval;
89 }
90
91 # elif defined (G_ATOMIC_SPARCV9)
92 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
93  */
94 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
95   ({                                                                    \
96      gint __result;                                                     \
97      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
98                            : "=r" (__result), "=m" (*(atomic))          \
99                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
100                            "0" (newval));                               \
101      __result == oldval;                                                \
102   })
103
104 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
105 gboolean
106 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
107                                        gpointer           oldval, 
108                                        gpointer           newval)
109 {
110   gpointer result;
111   __asm__ __volatile__ ("cas [%4], %2, %0"
112                         : "=r" (result), "=m" (*atomic)
113                         : "r" (oldval), "m" (*atomic), "r" (atomic),
114                         "0" (newval));
115   return result == oldval;
116 }
117 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
118 gboolean
119 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
120                                        gpointer           oldval, 
121                                        gpointer           newval)
122 {
123   gpointer result;
124   gpointer *a = atomic;
125   __asm__ __volatile__ ("casx [%4], %2, %0"
126                         : "=r" (result), "=m" (*a)
127                         : "r" (oldval), "m" (*a), "r" (a),
128                         "0" (newval));
129   return result == oldval;
130 }
131 #  else /* What's that */
132 #    error "Your system has an unsupported pointer size"
133 #  endif /* GLIB_SIZEOF_VOID_P */
134 #  define G_ATOMIC_MEMORY_BARRIER                                       \
135   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
136                         " | #StoreLoad | #StoreStore" : : : "memory")
137
138 # elif defined (G_ATOMIC_ALPHA)
139 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
140  */
141 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
142   ({                                                                    \
143      gint __result;                                                     \
144      gint __prev;                                                       \
145      __asm__ __volatile__ (                                             \
146         "       mb\n"                                                   \
147         "1:     ldl_l   %0,%2\n"                                        \
148         "       cmpeq   %0,%3,%1\n"                                     \
149         "       beq     %1,2f\n"                                        \
150         "       mov     %4,%1\n"                                        \
151         "       stl_c   %1,%2\n"                                        \
152         "       beq     %1,1b\n"                                        \
153         "       mb\n"                                                   \
154         "2:"                                                            \
155         : "=&r" (__prev),                                               \
156           "=&r" (__result)                                              \
157         : "m" (*(atomic)),                                              \
158           "Ir" (oldval),                                                \
159           "Ir" (newval)                                                 \
160         : "memory");                                                    \
161      __result != 0;                                                     \
162   })
163 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
164 gboolean
165 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
166                                        gpointer           oldval, 
167                                        gpointer           newval)
168 {
169   gint result;
170   gpointer prev;
171   __asm__ __volatile__ (
172         "       mb\n"
173         "1:     ldl_l   %0,%2\n"
174         "       cmpeq   %0,%3,%1\n"
175         "       beq     %1,2f\n"
176         "       mov     %4,%1\n"
177         "       stl_c   %1,%2\n"
178         "       beq     %1,1b\n"
179         "       mb\n"
180         "2:"
181         : "=&r" (prev), 
182           "=&r" (result)
183         : "m" (*atomic),
184           "Ir" (oldval),
185           "Ir" (newval)
186         : "memory");
187   return result != 0;
188 }
189 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
190 gboolean
191 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
192                                        gpointer           oldval, 
193                                        gpointer           newval)
194 {
195   gint result;
196   gpointer prev;
197   __asm__ __volatile__ (
198         "       mb\n"
199         "1:     ldq_l   %0,%2\n"
200         "       cmpeq   %0,%3,%1\n"
201         "       beq     %1,2f\n"
202         "       mov     %4,%1\n"
203         "       stq_c   %1,%2\n"
204         "       beq     %1,1b\n"
205         "       mb\n"
206         "2:"
207         : "=&r" (prev), 
208           "=&r" (result)
209         : "m" (*atomic),
210           "Ir" (oldval),
211           "Ir" (newval)
212         : "memory");
213   return result != 0;
214 }
215 #  else /* What's that */
216 #   error "Your system has an unsupported pointer size"
217 #  endif /* GLIB_SIZEOF_VOID_P */
218 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
219 # elif defined (G_ATOMIC_X86_64)
220 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
221  */
222 gint
223 g_atomic_int_exchange_and_add (volatile gint *atomic,
224                                gint           val)
225 {
226   gint result;
227
228   __asm__ __volatile__ ("lock; xaddl %0,%1"
229                         : "=r" (result), "=m" (*atomic) 
230                         : "0" (val), "m" (*atomic));
231   return result;
232 }
233  
234 void
235 g_atomic_int_add (volatile gint *atomic, 
236                   gint           val)
237 {
238   __asm__ __volatile__ ("lock; addl %1,%0"
239                         : "=m" (*atomic) 
240                         : "ir" (val), "m" (*atomic));
241 }
242
243 gboolean
244 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
245                                    gint           oldval, 
246                                    gint           newval)
247 {
248   gint result;
249  
250   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
251                         : "=a" (result), "=m" (*atomic)
252                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
253
254   return result == oldval;
255 }
256
257 gboolean
258 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
259                                        gpointer           oldval, 
260                                        gpointer           newval)
261 {
262   gpointer result;
263  
264   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
265                         : "=a" (result), "=m" (*atomic)
266                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
267
268   return result == oldval;
269 }
270
271 # elif defined (G_ATOMIC_POWERPC)
272 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h 
273  * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
274  * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
275  */
276 #   ifdef __OPTIMIZE__
277 /* Non-optimizing compile bails on the following two asm statements
278  * for reasons unknown to the author */
279 gint
280 g_atomic_int_exchange_and_add (volatile gint *atomic, 
281                                gint           val)
282 {
283   gint result, temp;
284 #if ASM_NUMERIC_LABELS
285   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
286                         "         add     %1,%0,%4\n"
287                         "         stwcx.  %1,0,%3\n"
288                         "         bne-    1b"
289                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
290                         : "b" (atomic), "r" (val), "m" (*atomic)
291                         : "cr0", "memory");
292 #else
293   __asm__ __volatile__ (".Lieaa%=:       lwarx   %0,0,%3\n"
294                         "         add     %1,%0,%4\n"
295                         "         stwcx.  %1,0,%3\n"
296                         "         bne-    .Lieaa%="
297                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
298                         : "b" (atomic), "r" (val), "m" (*atomic)
299                         : "cr0", "memory");
300 #endif
301   return result;
302 }
303  
304 /* The same as above, to save a function call repeated here */
305 void
306 g_atomic_int_add (volatile gint *atomic, 
307                   gint           val)
308 {
309   gint result, temp;  
310 #if ASM_NUMERIC_LABELS
311   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
312                         "         add     %1,%0,%4\n"
313                         "         stwcx.  %1,0,%3\n"
314                         "         bne-    1b"
315                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
316                         : "b" (atomic), "r" (val), "m" (*atomic)
317                         : "cr0", "memory");
318 #else
319   __asm__ __volatile__ (".Lia%=:       lwarx   %0,0,%3\n"
320                         "         add     %1,%0,%4\n"
321                         "         stwcx.  %1,0,%3\n"
322                         "         bne-    .Lia%="
323                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
324                         : "b" (atomic), "r" (val), "m" (*atomic)
325                         : "cr0", "memory");
326 #endif
327 }
328 #   else /* !__OPTIMIZE__ */
329 gint
330 g_atomic_int_exchange_and_add (volatile gint *atomic, 
331                                gint           val)
332 {
333   gint result;
334   do
335     result = *atomic;
336   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
337
338   return result;
339 }
340  
341 void
342 g_atomic_int_add (volatile gint *atomic,
343                   gint           val)
344 {
345   gint result;
346   do
347     result = *atomic;
348   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
349 }
350 #   endif /* !__OPTIMIZE__ */
351
352 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
353 gboolean
354 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
355                                    gint           oldval, 
356                                    gint           newval)
357 {
358   gint result;
359 #if ASM_NUMERIC_LABELS
360   __asm__ __volatile__ ("sync\n"
361                         "1: lwarx   %0,0,%1\n"
362                         "   subf.   %0,%2,%0\n"
363                         "   bne     2f\n"
364                         "   stwcx.  %3,0,%1\n"
365                         "   bne-    1b\n"
366                         "2: isync"
367                         : "=&r" (result)
368                         : "b" (atomic), "r" (oldval), "r" (newval)
369                         : "cr0", "memory"); 
370 #else
371   __asm__ __volatile__ ("sync\n"
372                         ".L1icae%=: lwarx   %0,0,%1\n"
373                         "   subf.   %0,%2,%0\n"
374                         "   bne     .L2icae%=\n"
375                         "   stwcx.  %3,0,%1\n"
376                         "   bne-    .L1icae%=\n"
377                         ".L2icae%=: isync"
378                         : "=&r" (result)
379                         : "b" (atomic), "r" (oldval), "r" (newval)
380                         : "cr0", "memory"); 
381 #endif
382   return result == 0;
383 }
384
385 gboolean
386 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
387                                        gpointer           oldval, 
388                                        gpointer           newval)
389 {
390   gpointer result;
391 #if ASM_NUMERIC_LABELS
392   __asm__ __volatile__ ("sync\n"
393                         "1: lwarx   %0,0,%1\n"
394                         "   subf.   %0,%2,%0\n"
395                         "   bne     2f\n"
396                         "   stwcx.  %3,0,%1\n"
397                         "   bne-    1b\n"
398                         "2: isync"
399                         : "=&r" (result)
400                         : "b" (atomic), "r" (oldval), "r" (newval)
401                         : "cr0", "memory"); 
402 #else
403   __asm__ __volatile__ ("sync\n"
404                         ".L1pcae%=: lwarx   %0,0,%1\n"
405                         "   subf.   %0,%2,%0\n"
406                         "   bne     .L2pcae%=\n"
407                         "   stwcx.  %3,0,%1\n"
408                         "   bne-    .L1pcae%=\n"
409                         ".L2pcae%=: isync"
410                         : "=&r" (result)
411                         : "b" (atomic), "r" (oldval), "r" (newval)
412                         : "cr0", "memory"); 
413 #endif
414   return result == 0;
415 }
416 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
417 gboolean
418 g_atomic_int_compare_and_exchange (volatile gint *atomic,
419                                    gint           oldval, 
420                                    gint           newval)
421 {
422   gpointer result;
423 #if ASM_NUMERIC_LABELS
424   __asm__ __volatile__ ("sync\n"
425                         "1: lwarx   %0,0,%1\n"
426                         "   extsw   %0,%0\n"
427                         "   subf.   %0,%2,%0\n"
428                         "   bne     2f\n"
429                         "   stwcx.  %3,0,%1\n"
430                         "   bne-    1b\n"
431                         "2: isync"
432                         : "=&r" (result)
433                         : "b" (atomic), "r" (oldval), "r" (newval)
434                         : "cr0", "memory"); 
435 #else
436   __asm__ __volatile__ ("sync\n"
437                         ".L1icae%=: lwarx   %0,0,%1\n"
438                         "   extsw   %0,%0\n"
439                         "   subf.   %0,%2,%0\n"
440                         "   bne     .L2icae%=\n"
441                         "   stwcx.  %3,0,%1\n"
442                         "   bne-    .L1icae%=\n"
443                         ".L2icae%=: isync"
444                         : "=&r" (result)
445                         : "b" (atomic), "r" (oldval), "r" (newval)
446                         : "cr0", "memory"); 
447 #endif
448   return result == 0;
449 }
450
451 gboolean
452 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
453                                        gpointer           oldval, 
454                                        gpointer           newval)
455 {
456   gpointer result;
457 #if ASM_NUMERIC_LABELS
458   __asm__ __volatile__ ("sync\n"
459                         "1: ldarx   %0,0,%1\n"
460                         "   subf.   %0,%2,%0\n"
461                         "   bne     2f\n"
462                         "   stdcx.  %3,0,%1\n"
463                         "   bne-    1b\n"
464                         "2: isync"
465                         : "=&r" (result)
466                         : "b" (atomic), "r" (oldval), "r" (newval)
467                         : "cr0", "memory"); 
468 #else
469   __asm__ __volatile__ ("sync\n"
470                         ".L1pcae%=: ldarx   %0,0,%1\n"
471                         "   subf.   %0,%2,%0\n"
472                         "   bne     .L2pcae%=\n"
473                         "   stdcx.  %3,0,%1\n"
474                         "   bne-    .L1pcae%=\n"
475                         ".L2pcae%=: isync"
476                         : "=&r" (result)
477                         : "b" (atomic), "r" (oldval), "r" (newval)
478                         : "cr0", "memory"); 
479 #endif
480   return result == 0;
481 }
482 #  else /* What's that */
483 #   error "Your system has an unsupported pointer size"
484 #  endif /* GLIB_SIZEOF_VOID_P */
485
486 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
487
488 # elif defined (G_ATOMIC_IA64)
489 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
490  */
491 gint
492 g_atomic_int_exchange_and_add (volatile gint *atomic,
493                                gint           val)
494 {
495   return __sync_fetch_and_add (atomic, val);
496 }
497  
498 void
499 g_atomic_int_add (volatile gint *atomic, 
500                   gint val)
501 {
502   __sync_fetch_and_add (atomic, val);
503 }
504
505 gboolean
506 g_atomic_int_compare_and_exchange (volatile gint *atomic,
507                                    gint           oldval, 
508                                    gint           newval)
509 {
510   return __sync_bool_compare_and_swap (atomic, oldval, newval);
511 }
512
513 gboolean
514 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
515                                        gpointer           oldval, 
516                                        gpointer           newval)
517 {
518   return __sync_bool_compare_and_swap ((long *)atomic, 
519                                        (long)oldval, (long)newval);
520 }
521
522 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
523 # elif defined (G_ATOMIC_S390)
524 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
525  */
526 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
527   ({                                                                    \
528      gint __result = oldval;                                    \
529      __asm__ __volatile__ ("cs %0, %2, %1"                              \
530                            : "+d" (__result), "=Q" (*(atomic))          \
531                            : "d" (newval), "m" (*(atomic)) : "cc" );    \
532      __result == oldval;                                                \
533   })
534
535 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
536 gboolean
537 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
538                                        gpointer           oldval,
539                                        gpointer           newval)
540 {
541   gpointer result = oldval;
542   __asm__ __volatile__ ("cs %0, %2, %1"
543                         : "+d" (result), "=Q" (*(atomic))
544                         : "d" (newval), "m" (*(atomic)) : "cc" );
545   return result == oldval;
546 }
547 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
548 gboolean
549 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
550                                        gpointer           oldval,
551                                        gpointer           newval)
552 {
553   gpointer result = oldval;
554   gpointer *a = atomic;
555   __asm__ __volatile__ ("csg %0, %2, %1"
556                         : "+d" (result), "=Q" (*a)
557                         : "d" ((long)(newval)), "m" (*a) : "cc" );
558   return result == oldval;
559 }
560 #  else /* What's that */
561 #    error "Your system has an unsupported pointer size"
562 #  endif /* GLIB_SIZEOF_VOID_P */
563 # elif defined (G_ATOMIC_ARM)
564 static volatile int atomic_spin = 0;
565
566 static int atomic_spin_trylock (void)
567 {
568   int result;
569
570   asm volatile (
571     "swp %0, %1, [%2]\n"
572     : "=&r,&r" (result)
573     : "r,0" (1), "r,r" (&atomic_spin)
574     : "memory");
575   if (result == 0)
576     return 0;
577   else
578     return -1;
579 }
580
581 static void atomic_spin_lock (void)
582 {
583   while (atomic_spin_trylock())
584     sched_yield();
585 }
586
587 static void atomic_spin_unlock (void)
588 {
589   atomic_spin = 0;
590 }
591
592 gint
593 g_atomic_int_exchange_and_add (volatile gint *atomic, 
594                                gint           val)
595 {
596   gint result;
597  
598   atomic_spin_lock();  
599   result = *atomic;
600   *atomic += val;
601   atomic_spin_unlock();
602
603   return result;
604 }
605
606 void
607 g_atomic_int_add (volatile gint *atomic,
608                   gint           val)
609 {
610   atomic_spin_lock();
611   *atomic += val;
612   atomic_spin_unlock();
613 }
614
615 gboolean
616 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
617                                    gint           oldval, 
618                                    gint           newval)
619 {
620   gboolean result;
621
622   atomic_spin_lock();
623   if (*atomic == oldval)
624     {
625       result = TRUE;
626       *atomic = newval;
627     }
628   else
629     result = FALSE;
630   atomic_spin_unlock();
631
632   return result;
633 }
634
635 gboolean
636 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
637                                        gpointer           oldval, 
638                                        gpointer           newval)
639 {
640   gboolean result;
641  
642   atomic_spin_lock();
643   if (*atomic == oldval)
644     {
645       result = TRUE;
646       *atomic = newval;
647     }
648   else
649     result = FALSE;
650   atomic_spin_unlock();
651
652   return result;
653 }
654 # elif defined (G_ATOMIC_CRIS) || defined (G_ATOMIC_CRISV32)
655 #  ifdef G_ATOMIC_CRIS
656 #   define CRIS_ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)             \
657   ({                                                                    \
658      gboolean __result;                                                 \
659      __asm__ __volatile__ ("\n"                                         \
660                            "0:\tclearf\n\t"                             \
661                            "cmp.d [%[Atomic]], %[OldVal]\n\t"           \
662                            "bne 1f\n\t"                                 \
663                            "ax\n\t"                                     \
664                            "move.d %[NewVal], [%[Atomic]]\n\t"          \
665                            "bwf 0b\n"                                   \
666                            "1:\tseq %[Result]"                          \
667                            : [Result] "=&r" (__result),                 \
668                                       "=m" (*(atomic))                  \
669                            : [Atomic] "r" (atomic),                     \
670                              [OldVal] "r" (oldval),                     \
671                              [NewVal] "r" (newval),                     \
672                                       "g" (*(gpointer*) (atomic))       \
673                            : "memory");                                 \
674      __result;                                                          \
675   })
676 #  else
677 #   define CRIS_ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)             \
678   ({                                                                    \
679      gboolean __result;                                                 \
680      __asm__ __volatile__ ("\n"                                         \
681                            "0:\tclearf p\n\t"                           \
682                            "cmp.d [%[Atomic]], %[OldVal]\n\t"           \
683                            "bne 1f\n\t"                                 \
684                            "ax\n\t"                                     \
685                            "move.d %[NewVal], [%[Atomic]]\n\t"          \
686                            "bcs 0b\n"                                   \
687                            "1:\tseq %[Result]"                          \
688                            : [Result] "=&r" (__result),                 \
689                                       "=m" (*(atomic))                  \
690                            : [Atomic] "r" (atomic),                     \
691                              [OldVal] "r" (oldval),                     \
692                              [NewVal] "r" (newval),                     \
693                                       "g" (*(gpointer*) (atomic))       \
694                            : "memory");                                 \
695      __result;                                                          \
696   })
697 #  endif
698
699 #define CRIS_CACHELINE_SIZE 32
700 #define CRIS_ATOMIC_BREAKS_CACHELINE(atomic) \
701   (((gulong)(atomic) & (CRIS_CACHELINE_SIZE - 1)) > (CRIS_CACHELINE_SIZE - sizeof (atomic)))
702
703 gint     __g_atomic_int_exchange_and_add         (volatile gint   *atomic,
704                                                   gint             val);
705 void     __g_atomic_int_add                      (volatile gint   *atomic,
706                                                   gint             val);
707 gboolean __g_atomic_int_compare_and_exchange     (volatile gint   *atomic,
708                                                   gint             oldval,
709                                                   gint             newval);
710 gboolean __g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
711                                                   gpointer         oldval,
712                                                   gpointer         newval);
713
714 gboolean
715 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
716                                        gpointer           oldval,
717                                        gpointer           newval)
718 {
719   if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic)))
720     return __g_atomic_pointer_compare_and_exchange (atomic, oldval, newval);
721
722   return CRIS_ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
723 }
724
725 gboolean
726 g_atomic_int_compare_and_exchange (volatile gint *atomic,
727                                    gint           oldval,
728                                    gint           newval)
729 {
730   if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic)))
731     return __g_atomic_int_compare_and_exchange (atomic, oldval, newval);
732
733   return CRIS_ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
734 }
735
736 gint
737 g_atomic_int_exchange_and_add (volatile gint *atomic,
738                                gint           val)
739 {
740   gint result;
741
742   if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic)))
743     return __g_atomic_int_exchange_and_add (atomic, val);
744
745   do
746     result = *atomic;
747   while (!CRIS_ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
748
749   return result;
750 }
751
752 void
753 g_atomic_int_add (volatile gint *atomic,
754                   gint           val)
755 {
756   gint result;
757
758   if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic)))
759     return __g_atomic_int_add (atomic, val);
760
761   do
762     result = *atomic;
763   while (!CRIS_ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
764 }
765
766 /* We need the atomic mutex for atomic operations where the atomic variable
767  * breaks the 32 byte cache line since the CRIS architecture does not support
768  * atomic operations on such variables. Fortunately this should be rare.
769  */
770 #  define DEFINE_WITH_MUTEXES
771 #  define g_atomic_int_exchange_and_add __g_atomic_int_exchange_and_add
772 #  define g_atomic_int_add __g_atomic_int_add
773 #  define g_atomic_int_compare_and_exchange __g_atomic_int_compare_and_exchange
774 #  define g_atomic_pointer_compare_and_exchange __g_atomic_pointer_compare_and_exchange
775
776 # else /* !G_ATOMIC_* */
777 #  define DEFINE_WITH_MUTEXES
778 # endif /* G_ATOMIC_* */
779 #else /* !__GNUC__ */
780 # ifdef G_PLATFORM_WIN32
781 #  define DEFINE_WITH_WIN32_INTERLOCKED
782 # else
783 #  define DEFINE_WITH_MUTEXES
784 # endif
785 #endif /* __GNUC__ */
786
787 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
788 # include <windows.h>
789 /* Following indicates that InterlockedCompareExchangePointer is
790  * declared in winbase.h (included by windows.h) and needs to be
791  * commented out if not true. It is defined iff WINVER > 0x0400,
792  * which is usually correct but can be wrong if WINVER is set before
793  * windows.h is included.
794  */
795 # if WINVER > 0x0400
796 #  define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
797 # endif
798
799 gint32
800 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
801                                gint32           val)
802 {
803   return InterlockedExchangeAdd (atomic, val);
804 }
805
806 void     
807 g_atomic_int_add (volatile gint32 *atomic, 
808                   gint32           val)
809 {
810   InterlockedExchangeAdd (atomic, val);
811 }
812
813 gboolean 
814 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
815                                    gint32           oldval,
816                                    gint32           newval)
817 {
818 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
819   return (guint32) InterlockedCompareExchange ((PVOID*)atomic, 
820                                                (PVOID)newval, 
821                                                (PVOID)oldval) == oldval;
822 #else
823   return InterlockedCompareExchange (atomic, 
824                                      newval, 
825                                      oldval) == oldval;
826 #endif
827 }
828
829 gboolean 
830 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
831                                        gpointer           oldval,
832                                        gpointer           newval)
833 {
834 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
835   return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
836 # else
837 #  if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
838 #   error "InterlockedCompareExchangePointer needed"
839 #  else
840    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
841 #  endif
842 # endif
843 }
844 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
845
846 #ifdef DEFINE_WITH_MUTEXES
847 /* We have to use the slow, but safe locking method */
848 static GMutex *g_atomic_mutex; 
849
850 gint
851 g_atomic_int_exchange_and_add (volatile gint *atomic, 
852                                gint           val)
853 {
854   gint result;
855     
856   g_mutex_lock (g_atomic_mutex);
857   result = *atomic;
858   *atomic += val;
859   g_mutex_unlock (g_atomic_mutex);
860
861   return result;
862 }
863
864
865 void
866 g_atomic_int_add (volatile gint *atomic,
867                   gint           val)
868 {
869   g_mutex_lock (g_atomic_mutex);
870   *atomic += val;
871   g_mutex_unlock (g_atomic_mutex);
872 }
873
874 gboolean
875 g_atomic_int_compare_and_exchange (volatile gint *atomic, 
876                                    gint           oldval, 
877                                    gint           newval)
878 {
879   gboolean result;
880     
881   g_mutex_lock (g_atomic_mutex);
882   if (*atomic == oldval)
883     {
884       result = TRUE;
885       *atomic = newval;
886     }
887   else
888     result = FALSE;
889   g_mutex_unlock (g_atomic_mutex);
890
891   return result;
892 }
893
894 gboolean
895 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
896                                        gpointer           oldval, 
897                                        gpointer           newval)
898 {
899   gboolean result;
900     
901   g_mutex_lock (g_atomic_mutex);
902   if (*atomic == oldval)
903     {
904       result = TRUE;
905       *atomic = newval;
906     }
907   else
908     result = FALSE;
909   g_mutex_unlock (g_atomic_mutex);
910
911   return result;
912 }
913
914 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
915 gint
916 g_atomic_int_get (volatile gint *atomic)
917 {
918   gint result;
919
920   g_mutex_lock (g_atomic_mutex);
921   result = *atomic;
922   g_mutex_unlock (g_atomic_mutex);
923
924   return result;
925 }
926
927 void
928 g_atomic_int_set (volatile gint *atomic,
929                   gint           newval)
930 {
931   g_mutex_lock (g_atomic_mutex);
932   *atomic = newval;
933   g_mutex_unlock (g_atomic_mutex);
934 }
935
936 gpointer
937 g_atomic_pointer_get (volatile gpointer *atomic)
938 {
939   gpointer result;
940
941   g_mutex_lock (g_atomic_mutex);
942   result = *atomic;
943   g_mutex_unlock (g_atomic_mutex);
944
945   return result;
946 }
947
948 void
949 g_atomic_pointer_set (volatile gpointer *atomic,
950                       gpointer           newval)
951 {
952   g_mutex_lock (g_atomic_mutex);
953   *atomic = newval;
954   g_mutex_unlock (g_atomic_mutex);
955 }
956 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
957 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
958 gint
959 g_atomic_int_get (volatile gint *atomic)
960 {
961   G_ATOMIC_MEMORY_BARRIER;
962   return *atomic;
963 }
964
965 void
966 g_atomic_int_set (volatile gint *atomic,
967                   gint           newval)
968 {
969   *atomic = newval;
970   G_ATOMIC_MEMORY_BARRIER; 
971 }
972
973 gpointer
974 g_atomic_pointer_get (volatile gpointer *atomic)
975 {
976   G_ATOMIC_MEMORY_BARRIER;
977   return *atomic;
978 }   
979
980 void
981 g_atomic_pointer_set (volatile gpointer *atomic,
982                       gpointer           newval)
983 {
984   *atomic = newval;
985   G_ATOMIC_MEMORY_BARRIER; 
986 }
987 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
988
989 #ifdef ATOMIC_INT_CMP_XCHG
990 gboolean
991 g_atomic_int_compare_and_exchange (volatile gint *atomic,
992                                    gint           oldval,
993                                    gint           newval)
994 {
995   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
996 }
997
998 gint
999 g_atomic_int_exchange_and_add (volatile gint *atomic,
1000                                gint           val)
1001 {
1002   gint result;
1003   do
1004     result = *atomic;
1005   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
1006
1007   return result;
1008 }
1009  
1010 void
1011 g_atomic_int_add (volatile gint *atomic,
1012                   gint           val)
1013 {
1014   gint result;
1015   do
1016     result = *atomic;
1017   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
1018 }
1019 #endif /* ATOMIC_INT_CMP_XCHG */
1020
1021 void 
1022 _g_atomic_thread_init (void)
1023 {
1024 #ifdef DEFINE_WITH_MUTEXES
1025   g_atomic_mutex = g_mutex_new ();
1026 #endif /* DEFINE_WITH_MUTEXES */
1027 }
1028
1029 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
1030 gint
1031 (g_atomic_int_get) (volatile gint *atomic)
1032 {
1033   return g_atomic_int_get (atomic);
1034 }
1035
1036 void
1037 (g_atomic_int_set) (volatile gint *atomic,
1038                     gint           newval)
1039 {
1040   g_atomic_int_set (atomic, newval);
1041 }
1042
1043 gpointer
1044 (g_atomic_pointer_get) (volatile gpointer *atomic)
1045 {
1046   return g_atomic_pointer_get (atomic);
1047 }
1048
1049 void
1050 (g_atomic_pointer_set) (volatile gpointer *atomic,
1051                         gpointer           newval)
1052 {
1053   g_atomic_pointer_set (atomic, newval);
1054 }
1055 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
1056
1057 #define __G_ATOMIC_C__
1058 #include "galiasdef.c"