Patch by Sebastian Wilhemi to fix infinite recursion in g_atomic.
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 02111-1307, USA.
21  */
22  
23 #include "config.h"
24
25 #include "glib.h"
26
27 #if defined (__GNUC__)
28 # if defined (G_ATOMIC_I486)
29 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
30  */
31 gint
32 g_atomic_int_exchange_and_add (gint *atomic, 
33                                gint val)
34 {
35   gint result;
36
37   __asm__ __volatile__ ("lock; xaddl %0,%1"
38                         : "=r" (result), "=m" (*atomic) 
39                         : "0" (val), "m" (*atomic));
40   return result;
41 }
42  
43 void
44 g_atomic_int_add (gint *atomic, 
45                   gint val)
46 {
47   __asm__ __volatile__ ("lock; addl %1,%0"
48                         : "=m" (*atomic) 
49                         : "ir" (val), "m" (*atomic));
50 }
51
52 gboolean
53 g_atomic_int_compare_and_exchange (gint *atomic, 
54                                    gint oldval, 
55                                    gint newval)
56 {
57   gint result;
58  
59   __asm __volatile ("lock; cmpxchgl %2, %1"
60                     : "=a" (result), "=m" (*atomic)
61                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
62
63   return result == oldval;
64 }
65
66 /* The same code as above, as on i386 gpointer is 32 bit as well.
67  * Duplicating the code here seems more natural than casting the
68  * arguments and calling the former function */
69
70 gboolean
71 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
72                                        gpointer  oldval, 
73                                        gpointer  newval)
74 {
75   gpointer result;
76  
77   __asm __volatile ("lock; cmpxchgl %2, %1"
78                     : "=a" (result), "=m" (*atomic)
79                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
80
81   return result == oldval;
82 }
83
84 # elif defined (G_ATOMIC_SPARCV9)
85 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
86  */
87 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
88   ({                                                                    \
89      gint __result;                                                     \
90      __asm __volatile ("cas [%4], %2, %0"                               \
91                        : "=r" (__result), "=m" (*(atomic))              \
92                        : "r" (oldval), "m" (*(atomic)), "r" (atomic),   \
93                          "0" (newval));                                 \
94      __result == oldval;                                                \
95   })
96
97 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
98 gboolean
99 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
100                                        gpointer  oldval, 
101                                        gpointer  newval)
102 {
103   gpointer result;
104   __asm __volatile ("cas [%4], %2, %0"
105                     : "=r" (result), "=m" (*atomic)
106                     : "r" (oldval), "m" (*atomic), "r" (atomic),
107                       "0" (newval));
108   return result == oldval;
109 }
110 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
111 gboolean
112 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
113                                        gpointer  oldval, 
114                                        gpointer  newval)
115 {
116   gpointer result;
117   gpointer *a = atomic;
118   __asm __volatile ("casx [%4], %2, %0"
119                     : "=r" (result), "=m" (*a)
120                     : "r" (oldval), "m" (*a), "r" (a),
121                       "0" (newval));
122   return result != 0;
123 }
124 #  else /* What's that */
125 #    error "Your system has an unsupported pointer size"
126 #  endif /* GLIB_SIZEOF_VOID_P */
127 #  define G_ATOMIC_MEMORY_BARRIER                                       \
128   __asm __volatile ("membar #LoadLoad | #LoadStore"                     \
129                     " | #StoreLoad | #StoreStore" : : : "memory")
130
131 # elif defined (G_ATOMIC_ALPHA)
132 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
133  */
134 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
135   ({                                                                    \
136      gint __result;                                                     \
137      gint __prev;                                                       \
138      __asm__ __volatile__ (                                             \
139         "       mb\n"                                                   \
140         "1:     ldl_l   %0,%2\n"                                        \
141         "       cmpeq   %0,%3,%1\n"                                     \
142         "       beq     %1,2f\n"                                        \
143         "       mov     %4,%1\n"                                        \
144         "       stl_c   %1,%2\n"                                        \
145         "       beq     %1,1b\n"                                        \
146         "       mb\n"                                                   \
147         "2:"                                                            \
148         : "=&r" (__prev),                                               \
149           "=&r" (__result)                                              \
150         : "m" (*(atomic)),                                              \
151           "Ir" (oldval),                                                \
152           "Ir" (newval)                                                 \
153         : "memory");                                                    \
154      __result != 0;                                                     \
155   })
156 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
157 gboolean
158 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
159                                        gpointer  oldval, 
160                                        gpointer  newval)
161 {
162   gint result;
163   gpointer prev;
164   __asm__ __volatile__ (
165         "       mb\n"
166         "1:     ldl_l   %0,%2\n"
167         "       cmpeq   %0,%3,%1\n"
168         "       beq     %1,2f\n"
169         "       mov     %4,%1\n"
170         "       stl_c   %1,%2\n"
171         "       beq     %1,1b\n"
172         "       mb\n"
173         "2:"
174         : "=&r" (prev), 
175           "=&r" (result)
176         : "m" (*atomic),
177           "Ir" (oldval),
178           "Ir" (newval)
179         : "memory");
180   return result != 0;
181 }
182 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
183 gboolean
184 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
185                                        gpointer  oldval, 
186                                        gpointer  newval)
187 {
188   gint result;
189   gpointer prev;
190   __asm__ __volatile__ (
191         "       mb\n"
192         "1:     ldq_l   %0,%2\n"
193         "       cmpeq   %0,%3,%1\n"
194         "       beq     %1,2f\n"
195         "       mov     %4,%1\n"
196         "       stq_c   %1,%2\n"
197         "       beq     %1,1b\n"
198         "       mb\n"
199         "2:"
200         : "=&r" (prev), 
201           "=&r" (result)
202         : "m" (*atomic),
203           "Ir" (oldval),
204           "Ir" (newval)
205         : "memory");
206   return result != 0;
207 }
208 #  else /* What's that */
209 #   error "Your system has an unsupported pointer size"
210 #  endif /* GLIB_SIZEOF_VOID_P */
211 #  define G_ATOMIC_MEMORY_BARRIER  __asm ("mb" : : : "memory")
212 # elif defined (G_ATOMIC_X86_64)
213 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
214  */
215 gint
216 g_atomic_int_exchange_and_add (gint *atomic, 
217                                gint val)
218 {
219   gint result;
220
221   __asm__ __volatile__ ("lock; xaddl %0,%1"
222                         : "=r" (result), "=m" (*atomic) 
223                         : "0" (val), "m" (*atomic));
224   return result;
225 }
226  
227 void
228 g_atomic_int_add (gint *atomic, 
229                   gint val)
230 {
231   __asm__ __volatile__ ("lock; addl %1,%0"
232                         : "=m" (*atomic) 
233                         : "ir" (val), "m" (*atomic));
234 }
235
236 gboolean
237 g_atomic_int_compare_and_exchange (gint *atomic, 
238                                    gint oldval, 
239                                    gint newval)
240 {
241   gint result;
242  
243   __asm __volatile ("lock; cmpxchgl %2, %1"
244                     : "=a" (result), "=m" (*atomic)
245                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
246
247   return result == oldval;
248 }
249
250 gboolean
251 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
252                                        gpointer  oldval, 
253                                        gpointer  newval)
254 {
255   gpointer result;
256  
257   __asm __volatile ("lock; cmpxchgq %q2, %1"
258                     : "=a" (result), "=m" (*atomic)
259                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
260
261   return result == oldval;
262 }
263
264 # elif defined (G_ATOMIC_POWERPC)
265 /* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h 
266  * and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
267  * and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
268  */
269 gint
270 g_atomic_int_exchange_and_add (gint *atomic, 
271                                gint val)
272 {
273   gint result, temp;
274   __asm __volatile ("1:       lwarx   %0,0,%3\n"
275                     "         add     %1,%0,%4\n"
276                     "         stwcx.  %1,0,%3\n"
277                     "         bne-    1b"
278                     : "=&b" (result), "=&r" (temp), "=m" (*atomic)
279                     : "b" (atomic), "r" (val), "2" (*atomic)
280                     : "cr0", "memory");
281   return result;
282 }
283  
284 /* The same as above, to save a function call repeated here */
285 void
286 g_atomic_int_add (gint *atomic, 
287                   gint val)
288 {
289   gint result, temp;  
290   __asm __volatile ("1:       lwarx   %0,0,%3\n"
291                     "         add     %1,%0,%4\n"
292                     "         stwcx.  %1,0,%3\n"
293                     "         bne-    1b"
294                     : "=&b" (result), "=&r" (temp), "=m" (*atomic)
295                     : "b" (atomic), "r" (val), "2" (*atomic)
296                     : "cr0", "memory");
297 }
298
299 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
300 gboolean
301 g_atomic_int_compare_and_exchange (gint *atomic, 
302                                    gint oldval, 
303                                    gint newval)
304 {
305   gint result;
306   __asm __volatile ("sync\n"
307                     "1: lwarx   %0,0,%1\n"
308                     "   subf.   %0,%2,%0\n"
309                     "   bne     2f\n"
310                     "   stwcx.  %3,0,%1\n"
311                     "   bne-    1b\n"
312                     "2: isync"
313                     : "=&r" (result)
314                     : "b" (atomic), "r" (oldval), "r" (newval)
315                     : "cr0", "memory"); 
316   return result == 0;
317 }
318
319 gboolean
320 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
321                                        gpointer  oldval, 
322                                        gpointer  newval)
323 {
324   gpointer result;
325   __asm __volatile ("sync\n"
326                     "1: lwarx   %0,0,%1\n"
327                     "   subf.   %0,%2,%0\n"
328                     "   bne     2f\n"
329                     "   stwcx.  %3,0,%1\n"
330                     "   bne-    1b\n"
331                     "2: isync"
332                     : "=&r" (result)
333                     : "b" (atomic), "r" (oldval), "r" (newval)
334                     : "cr0", "memory"); 
335   return result == 0;
336 }
337 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
338 gboolean
339 g_atomic_int_compare_and_exchange (gint *atomic, 
340                                    gint oldval, 
341                                    gint newval)
342 {
343   gpointer result;
344   __asm __volatile ("sync\n"
345                     "1: lwarx   %0,0,%1\n"
346                     "   extsw   %0,%0\n"
347                     "   subf.   %0,%2,%0\n"
348                     "   bne     2f\n"
349                     "   stwcx.  %3,0,%1\n"
350                     "   bne-    1b\n"
351                     "2: isync"
352                     : "=&r" (result)
353                     : "b" (atomic), "r" (oldval), "r" (newval)
354                     : "cr0", "memory"); 
355   return result == 0;
356 }
357
358 gboolean
359 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
360                                        gpointer  oldval, 
361                                        gpointer  newval)
362 {
363   gpointer result;
364   __asm __volatile ("sync\n"
365                     "1: ldarx   %0,0,%1\n"
366                     "   subf.   %0,%2,%0\n"
367                     "   bne     2f\n"
368                     "   stdcx.  %3,0,%1\n"
369                     "   bne-    1b\n"
370                     "2: isync"
371                     : "=&r" (result)
372                     : "b" (atomic), "r" (oldval), "r" (newval)
373                     : "cr0", "memory"); 
374   return result == 0;
375 }
376 #  else /* What's that */
377 #   error "Your system has an unsupported pointer size"
378 #  endif /* GLIB_SIZEOF_VOID_P */
379
380 #  define G_ATOMIC_MEMORY_BARRIER __asm ("sync" : : : "memory")
381
382 # elif defined (G_ATOMIC_IA64)
383 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
384  */
385 gint
386 g_atomic_int_exchange_and_add (gint *atomic, 
387                                gint val)
388 {
389   return __sync_fetch_and_add_si (atomic, val);
390 }
391  
392 void
393 g_atomic_int_add (gint *atomic, 
394                   gint val)
395 {
396   __sync_fetch_and_add_si (atomic, val);
397 }
398
399 gboolean
400 g_atomic_int_compare_and_exchange (gint *atomic, 
401                                    gint oldval, 
402                                    gint newval)
403 {
404   return __sync_bool_compare_and_swap_si (atomic, oldval, newval);
405 }
406
407 gboolean
408 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
409                                        gpointer  oldval, 
410                                        gpointer  newval)
411 {
412   return __sync_bool_compare_and_swap_di ((long *)atomic, 
413                                           (long)oldval, (long)newval);
414 }
415
416 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
417 # else /* !G_ATOMIC */
418 #  define DEFINE_WITH_MUTEXES
419 # endif /* G_ATOMIC */
420 #else /* !__GNUC__ */
421 # ifdef G_PLATFORM_WIN32
422 #  define DEFINE_WITH_WIN32_INTERLOCKED
423 # else
424 #  define DEFINE_WITH_MUTEXES
425 # endif
426 #endif /* __GNUC__ */
427
428 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
429 # include <windows.h>
430 gint32   
431 g_atomic_int_exchange_and_add (gint32   *atomic, 
432                                gint32    val)
433 {
434   return InterlockedExchangeAdd (atomic, val);
435 }
436
437 void     
438 g_atomic_int_add (gint32   *atomic, 
439                   gint32    val)
440 {
441   InterlockedExchangeAdd (atomic, val);
442 }
443
444 gboolean 
445 g_atomic_int_compare_and_exchange (gint32   *atomic, 
446                                    gint32    oldval, 
447                                    gint32    newval)
448 {
449   return (guint32)InterlockedCompareExchange ((PVOID*)atomic, 
450                                               (PVOID)newval, 
451                                               (PVOID)oldval) == oldval;
452 }
453
454 gboolean 
455 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
456                                        gpointer  oldval, 
457                                        gpointer  newval)
458 {
459 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
460 #  error "InterlockedCompareExchangePointer needed"
461 # else
462    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
463 # endif
464 }
465 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
466
467 #ifdef DEFINE_WITH_MUTEXES
468 /* We have to use the slow, but safe locking method */
469 static GMutex *g_atomic_mutex; 
470
471 gint
472 g_atomic_int_exchange_and_add (gint *atomic, 
473                                gint  val)
474 {
475   gint result;
476     
477   g_mutex_lock (g_atomic_mutex);
478   result = *atomic;
479   *atomic += val;
480   g_mutex_unlock (g_atomic_mutex);
481
482   return result;
483 }
484
485
486 void
487 g_atomic_int_add (gint *atomic,
488                   gint  val)
489 {
490   g_mutex_lock (g_atomic_mutex);
491   *atomic += val;
492   g_mutex_unlock (g_atomic_mutex);
493 }
494
495 gboolean
496 g_atomic_int_compare_and_exchange (gint *atomic, 
497                                    gint  oldval, 
498                                    gint  newval)
499 {
500   gboolean result;
501     
502   g_mutex_lock (g_atomic_mutex);
503   if (*atomic == oldval)
504     {
505       result = TRUE;
506       *atomic = newval;
507     }
508   else
509     result = FALSE;
510   g_mutex_unlock (g_atomic_mutex);
511
512   return result;
513 }
514
515 gboolean
516 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
517                                        gpointer  oldval, 
518                                        gpointer  newval)
519 {
520   gboolean result;
521     
522   g_mutex_lock (g_atomic_mutex);
523   if (*atomic == oldval)
524     {
525       result = TRUE;
526       *atomic = newval;
527     }
528   else
529     result = FALSE;
530   g_mutex_unlock (g_atomic_mutex);
531
532   return result;
533 }
534
535 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
536 gint
537 g_atomic_int_get (gint *atomic)
538 {
539   gint result;
540
541   g_mutex_lock (g_atomic_mutex);
542   result = *atomic;
543   g_mutex_unlock (g_atomic_mutex);
544
545   return result;
546 }
547
548 gpointer
549 g_atomic_pointer_get (gpointer *atomic)
550 {
551   gpointer result;
552
553   g_mutex_lock (g_atomic_mutex);
554   result = *atomic;
555   g_mutex_unlock (g_atomic_mutex);
556
557   return result;
558 }
559 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
560 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
561 gint
562 g_atomic_int_get (gint *atomic)
563 {
564   gint result = *atomic;
565
566   G_ATOMIC_MEMORY_BARRIER;
567
568   return result;
569 }
570
571 gpointer
572 g_atomic_pointer_get (gpointer *atomic)
573 {
574   gpointer result = *atomic;
575
576   G_ATOMIC_MEMORY_BARRIER;
577
578   return result;
579 }   
580 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
581
582 #ifdef ATOMIC_INT_CMP_XCHG
583 gboolean
584 g_atomic_int_compare_and_exchange (gint *atomic, 
585                                    gint oldval, 
586                                    gint newval)
587 {
588   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
589 }
590
591 gint
592 g_atomic_int_exchange_and_add (gint *atomic, 
593                                gint val)
594 {
595   gint result;
596   do
597     result = *atomic;
598   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
599
600   return result;
601 }
602  
603 void
604 g_atomic_int_add (gint *atomic, 
605                   gint val)
606 {
607   gint result;
608   do
609     result = *atomic;
610   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
611 }
612 #endif /* ATOMIC_INT_CMP_XCHG */
613
614 void 
615 _g_atomic_thread_init ()
616 {
617 #ifdef DEFINE_WITH_MUTEXES
618   g_atomic_mutex = g_mutex_new ();
619 #endif /* DEFINE_WITH_MUTEXES */
620 }