Include gthreadinit.h here to see the declaration for
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 02111-1307, USA.
21  */
22  
23 #include "config.h"
24
25 #include "glib.h"
26 #include "gthreadinit.h"
27
28 #if defined (__GNUC__)
29 # if defined (G_ATOMIC_I486)
30 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
31  */
32 gint
33 g_atomic_int_exchange_and_add (gint *atomic, 
34                                gint val)
35 {
36   gint result;
37
38   __asm__ __volatile__ ("lock; xaddl %0,%1"
39                         : "=r" (result), "=m" (*atomic) 
40                         : "0" (val), "m" (*atomic));
41   return result;
42 }
43  
44 void
45 g_atomic_int_add (gint *atomic, 
46                   gint val)
47 {
48   __asm__ __volatile__ ("lock; addl %1,%0"
49                         : "=m" (*atomic) 
50                         : "ir" (val), "m" (*atomic));
51 }
52
53 gboolean
54 g_atomic_int_compare_and_exchange (gint *atomic, 
55                                    gint oldval, 
56                                    gint newval)
57 {
58   gint result;
59  
60   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
61                         : "=a" (result), "=m" (*atomic)
62                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
63
64   return result == oldval;
65 }
66
67 /* The same code as above, as on i386 gpointer is 32 bit as well.
68  * Duplicating the code here seems more natural than casting the
69  * arguments and calling the former function */
70
71 gboolean
72 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
73                                        gpointer  oldval, 
74                                        gpointer  newval)
75 {
76   gpointer result;
77  
78   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
79                         : "=a" (result), "=m" (*atomic)
80                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
81
82   return result == oldval;
83 }
84
85 # elif defined (G_ATOMIC_SPARCV9)
86 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
87  */
88 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
89   ({                                                                    \
90      gint __result;                                                     \
91      __asm__ __volatile__ ("cas [%4], %2, %0"                           \
92                            : "=r" (__result), "=m" (*(atomic))          \
93                            : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
94                            "0" (newval));                               \
95      __result == oldval;                                                \
96   })
97
98 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
99 gboolean
100 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
101                                        gpointer  oldval, 
102                                        gpointer  newval)
103 {
104   gpointer result;
105   __asm__ __volatile__ ("cas [%4], %2, %0"
106                         : "=r" (result), "=m" (*atomic)
107                         : "r" (oldval), "m" (*atomic), "r" (atomic),
108                         "0" (newval));
109   return result == oldval;
110 }
111 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
112 gboolean
113 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
114                                        gpointer  oldval, 
115                                        gpointer  newval)
116 {
117   gpointer result;
118   gpointer *a = atomic;
119   __asm__ __volatile__ ("casx [%4], %2, %0"
120                         : "=r" (result), "=m" (*a)
121                         : "r" (oldval), "m" (*a), "r" (a),
122                         "0" (newval));
123   return result != 0;
124 }
125 #  else /* What's that */
126 #    error "Your system has an unsupported pointer size"
127 #  endif /* GLIB_SIZEOF_VOID_P */
128 #  define G_ATOMIC_MEMORY_BARRIER                                       \
129   __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"                 \
130                         " | #StoreLoad | #StoreStore" : : : "memory")
131
132 # elif defined (G_ATOMIC_ALPHA)
133 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
134  */
135 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
136   ({                                                                    \
137      gint __result;                                                     \
138      gint __prev;                                                       \
139      __asm__ __volatile__ (                                             \
140         "       mb\n"                                                   \
141         "1:     ldl_l   %0,%2\n"                                        \
142         "       cmpeq   %0,%3,%1\n"                                     \
143         "       beq     %1,2f\n"                                        \
144         "       mov     %4,%1\n"                                        \
145         "       stl_c   %1,%2\n"                                        \
146         "       beq     %1,1b\n"                                        \
147         "       mb\n"                                                   \
148         "2:"                                                            \
149         : "=&r" (__prev),                                               \
150           "=&r" (__result)                                              \
151         : "m" (*(atomic)),                                              \
152           "Ir" (oldval),                                                \
153           "Ir" (newval)                                                 \
154         : "memory");                                                    \
155      __result != 0;                                                     \
156   })
157 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
158 gboolean
159 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
160                                        gpointer  oldval, 
161                                        gpointer  newval)
162 {
163   gint result;
164   gpointer prev;
165   __asm__ __volatile__ (
166         "       mb\n"
167         "1:     ldl_l   %0,%2\n"
168         "       cmpeq   %0,%3,%1\n"
169         "       beq     %1,2f\n"
170         "       mov     %4,%1\n"
171         "       stl_c   %1,%2\n"
172         "       beq     %1,1b\n"
173         "       mb\n"
174         "2:"
175         : "=&r" (prev), 
176           "=&r" (result)
177         : "m" (*atomic),
178           "Ir" (oldval),
179           "Ir" (newval)
180         : "memory");
181   return result != 0;
182 }
183 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
184 gboolean
185 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
186                                        gpointer  oldval, 
187                                        gpointer  newval)
188 {
189   gint result;
190   gpointer prev;
191   __asm__ __volatile__ (
192         "       mb\n"
193         "1:     ldq_l   %0,%2\n"
194         "       cmpeq   %0,%3,%1\n"
195         "       beq     %1,2f\n"
196         "       mov     %4,%1\n"
197         "       stq_c   %1,%2\n"
198         "       beq     %1,1b\n"
199         "       mb\n"
200         "2:"
201         : "=&r" (prev), 
202           "=&r" (result)
203         : "m" (*atomic),
204           "Ir" (oldval),
205           "Ir" (newval)
206         : "memory");
207   return result != 0;
208 }
209 #  else /* What's that */
210 #   error "Your system has an unsupported pointer size"
211 #  endif /* GLIB_SIZEOF_VOID_P */
212 #  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
213 # elif defined (G_ATOMIC_X86_64)
214 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
215  */
216 gint
217 g_atomic_int_exchange_and_add (gint *atomic, 
218                                gint val)
219 {
220   gint result;
221
222   __asm__ __volatile__ ("lock; xaddl %0,%1"
223                         : "=r" (result), "=m" (*atomic) 
224                         : "0" (val), "m" (*atomic));
225   return result;
226 }
227  
228 void
229 g_atomic_int_add (gint *atomic, 
230                   gint val)
231 {
232   __asm__ __volatile__ ("lock; addl %1,%0"
233                         : "=m" (*atomic) 
234                         : "ir" (val), "m" (*atomic));
235 }
236
237 gboolean
238 g_atomic_int_compare_and_exchange (gint *atomic, 
239                                    gint oldval, 
240                                    gint newval)
241 {
242   gint result;
243  
244   __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
245                         : "=a" (result), "=m" (*atomic)
246                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
247
248   return result == oldval;
249 }
250
251 gboolean
252 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
253                                        gpointer  oldval, 
254                                        gpointer  newval)
255 {
256   gpointer result;
257  
258   __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
259                         : "=a" (result), "=m" (*atomic)
260                         : "r" (newval), "m" (*atomic), "0" (oldval)); 
261
262   return result == oldval;
263 }
264
265 # elif defined (G_ATOMIC_POWERPC)
266 /* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h 
267  * and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
268  * and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
269  */
270 #   ifdef __OPTIMIZE__
271 /* Non-optimizing compile bails on the following two asm statements
272  * for reasons unknown to the author */
273 gint
274 g_atomic_int_exchange_and_add (gint *atomic, 
275                                gint val)
276 {
277   gint result, temp;
278   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
279                         "         add     %1,%0,%4\n"
280                         "         stwcx.  %1,0,%3\n"
281                         "         bne-    1b"
282                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
283                         : "b" (atomic), "r" (val), "2" (*atomic)
284                         : "cr0", "memory");
285   return result;
286 }
287  
288 /* The same as above, to save a function call repeated here */
289 void
290 g_atomic_int_add (gint *atomic, 
291                   gint val)
292 {
293   gint result, temp;  
294   __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
295                         "         add     %1,%0,%4\n"
296                         "         stwcx.  %1,0,%3\n"
297                         "         bne-    1b"
298                         : "=&b" (result), "=&r" (temp), "=m" (*atomic)
299                         : "b" (atomic), "r" (val), "2" (*atomic)
300                         : "cr0", "memory");
301 }
302 #   else /* !__OPTIMIZE__ */
303 gint
304 g_atomic_int_exchange_and_add (gint *atomic, 
305                                gint val)
306 {
307   gint result;
308   do
309     result = *atomic;
310   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
311
312   return result;
313 }
314  
315 void
316 g_atomic_int_add (gint *atomic, 
317                   gint val)
318 {
319   gint result;
320   do
321     result = *atomic;
322   while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
323 }
324 #   endif /* !__OPTIMIZE__ */
325
326 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
327 gboolean
328 g_atomic_int_compare_and_exchange (gint *atomic, 
329                                    gint oldval, 
330                                    gint newval)
331 {
332   gint result;
333   __asm__ __volatile__ ("sync\n"
334                         "1: lwarx   %0,0,%1\n"
335                         "   subf.   %0,%2,%0\n"
336                         "   bne     2f\n"
337                         "   stwcx.  %3,0,%1\n"
338                         "   bne-    1b\n"
339                         "2: isync"
340                         : "=&r" (result)
341                         : "b" (atomic), "r" (oldval), "r" (newval)
342                         : "cr0", "memory"); 
343   return result == 0;
344 }
345
346 gboolean
347 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
348                                        gpointer  oldval, 
349                                        gpointer  newval)
350 {
351   gpointer result;
352   __asm__ __volatile__ ("sync\n"
353                         "1: lwarx   %0,0,%1\n"
354                         "   subf.   %0,%2,%0\n"
355                         "   bne     2f\n"
356                         "   stwcx.  %3,0,%1\n"
357                         "   bne-    1b\n"
358                         "2: isync"
359                         : "=&r" (result)
360                         : "b" (atomic), "r" (oldval), "r" (newval)
361                         : "cr0", "memory"); 
362   return result == 0;
363 }
364 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
365 gboolean
366 g_atomic_int_compare_and_exchange (gint *atomic, 
367                                    gint oldval, 
368                                    gint newval)
369 {
370   gpointer result;
371   __asm__ __volatile__ ("sync\n"
372                         "1: lwarx   %0,0,%1\n"
373                         "   extsw   %0,%0\n"
374                         "   subf.   %0,%2,%0\n"
375                         "   bne     2f\n"
376                         "   stwcx.  %3,0,%1\n"
377                         "   bne-    1b\n"
378                         "2: isync"
379                         : "=&r" (result)
380                         : "b" (atomic), "r" (oldval), "r" (newval)
381                         : "cr0", "memory"); 
382   return result == 0;
383 }
384
385 gboolean
386 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
387                                        gpointer  oldval, 
388                                        gpointer  newval)
389 {
390   gpointer result;
391   __asm__ __volatile__ ("sync\n"
392                         "1: ldarx   %0,0,%1\n"
393                         "   subf.   %0,%2,%0\n"
394                         "   bne     2f\n"
395                         "   stdcx.  %3,0,%1\n"
396                         "   bne-    1b\n"
397                         "2: isync"
398                         : "=&r" (result)
399                         : "b" (atomic), "r" (oldval), "r" (newval)
400                         : "cr0", "memory"); 
401   return result == 0;
402 }
403 #  else /* What's that */
404 #   error "Your system has an unsupported pointer size"
405 #  endif /* GLIB_SIZEOF_VOID_P */
406
407 #  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
408
409 # elif defined (G_ATOMIC_IA64)
410 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
411  */
412 gint
413 g_atomic_int_exchange_and_add (gint *atomic, 
414                                gint val)
415 {
416   return __sync_fetch_and_add_si (atomic, val);
417 }
418  
419 void
420 g_atomic_int_add (gint *atomic, 
421                   gint val)
422 {
423   __sync_fetch_and_add_si (atomic, val);
424 }
425
426 gboolean
427 g_atomic_int_compare_and_exchange (gint *atomic, 
428                                    gint oldval, 
429                                    gint newval)
430 {
431   return __sync_bool_compare_and_swap_si (atomic, oldval, newval);
432 }
433
434 gboolean
435 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
436                                        gpointer  oldval, 
437                                        gpointer  newval)
438 {
439   return __sync_bool_compare_and_swap_di ((long *)atomic, 
440                                           (long)oldval, (long)newval);
441 }
442
443 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
444 # else /* !G_ATOMIC */
445 #  define DEFINE_WITH_MUTEXES
446 # endif /* G_ATOMIC */
447 #else /* !__GNUC__ */
448 # ifdef G_PLATFORM_WIN32
449 #  define DEFINE_WITH_WIN32_INTERLOCKED
450 # else
451 #  define DEFINE_WITH_MUTEXES
452 # endif
453 #endif /* __GNUC__ */
454
455 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
456 # include <windows.h>
457 gint32   
458 g_atomic_int_exchange_and_add (gint32   *atomic, 
459                                gint32    val)
460 {
461   return InterlockedExchangeAdd (atomic, val);
462 }
463
464 void     
465 g_atomic_int_add (gint32   *atomic, 
466                   gint32    val)
467 {
468   InterlockedExchangeAdd (atomic, val);
469 }
470
471 gboolean 
472 g_atomic_int_compare_and_exchange (gint32   *atomic, 
473                                    gint32    oldval, 
474                                    gint32    newval)
475 {
476   return (guint32)InterlockedCompareExchange ((PVOID*)atomic, 
477                                               (PVOID)newval, 
478                                               (PVOID)oldval) == oldval;
479 }
480
481 gboolean 
482 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
483                                        gpointer  oldval, 
484                                        gpointer  newval)
485 {
486 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
487 #  error "InterlockedCompareExchangePointer needed"
488 # else
489    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
490 # endif
491 }
492 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
493
494 #ifdef DEFINE_WITH_MUTEXES
495 /* We have to use the slow, but safe locking method */
496 static GMutex *g_atomic_mutex; 
497
498 gint
499 g_atomic_int_exchange_and_add (gint *atomic, 
500                                gint  val)
501 {
502   gint result;
503     
504   g_mutex_lock (g_atomic_mutex);
505   result = *atomic;
506   *atomic += val;
507   g_mutex_unlock (g_atomic_mutex);
508
509   return result;
510 }
511
512
513 void
514 g_atomic_int_add (gint *atomic,
515                   gint  val)
516 {
517   g_mutex_lock (g_atomic_mutex);
518   *atomic += val;
519   g_mutex_unlock (g_atomic_mutex);
520 }
521
522 gboolean
523 g_atomic_int_compare_and_exchange (gint *atomic, 
524                                    gint  oldval, 
525                                    gint  newval)
526 {
527   gboolean result;
528     
529   g_mutex_lock (g_atomic_mutex);
530   if (*atomic == oldval)
531     {
532       result = TRUE;
533       *atomic = newval;
534     }
535   else
536     result = FALSE;
537   g_mutex_unlock (g_atomic_mutex);
538
539   return result;
540 }
541
542 gboolean
543 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
544                                        gpointer  oldval, 
545                                        gpointer  newval)
546 {
547   gboolean result;
548     
549   g_mutex_lock (g_atomic_mutex);
550   if (*atomic == oldval)
551     {
552       result = TRUE;
553       *atomic = newval;
554     }
555   else
556     result = FALSE;
557   g_mutex_unlock (g_atomic_mutex);
558
559   return result;
560 }
561
562 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
563 gint
564 g_atomic_int_get (gint *atomic)
565 {
566   gint result;
567
568   g_mutex_lock (g_atomic_mutex);
569   result = *atomic;
570   g_mutex_unlock (g_atomic_mutex);
571
572   return result;
573 }
574
575 gpointer
576 g_atomic_pointer_get (gpointer *atomic)
577 {
578   gpointer result;
579
580   g_mutex_lock (g_atomic_mutex);
581   result = *atomic;
582   g_mutex_unlock (g_atomic_mutex);
583
584   return result;
585 }
586 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
587 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
588 gint
589 g_atomic_int_get (gint *atomic)
590 {
591   gint result = *atomic;
592
593   G_ATOMIC_MEMORY_BARRIER;
594
595   return result;
596 }
597
598 gpointer
599 g_atomic_pointer_get (gpointer *atomic)
600 {
601   gpointer result = *atomic;
602
603   G_ATOMIC_MEMORY_BARRIER;
604
605   return result;
606 }   
607 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
608
609 #ifdef ATOMIC_INT_CMP_XCHG
610 gboolean
611 g_atomic_int_compare_and_exchange (gint *atomic, 
612                                    gint oldval, 
613                                    gint newval)
614 {
615   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
616 }
617
618 gint
619 g_atomic_int_exchange_and_add (gint *atomic, 
620                                gint val)
621 {
622   gint result;
623   do
624     result = *atomic;
625   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
626
627   return result;
628 }
629  
630 void
631 g_atomic_int_add (gint *atomic, 
632                   gint val)
633 {
634   gint result;
635   do
636     result = *atomic;
637   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
638 }
639 #endif /* ATOMIC_INT_CMP_XCHG */
640
641 void 
642 _g_atomic_thread_init ()
643 {
644 #ifdef DEFINE_WITH_MUTEXES
645   g_atomic_mutex = g_mutex_new ();
646 #endif /* DEFINE_WITH_MUTEXES */
647 }