Fixed missing definition for ppc code. Fix by Mark McLoughlin
[platform/upstream/glib.git] / glib / gatomic.c
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * g_atomic_*: atomic operations.
5  * Copyright (C) 2003 Sebastian Wilhelmi
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 02111-1307, USA.
21  */
22  
23 #include "config.h"
24
25 #include "glib.h"
26
27 #if defined (__GNUC__)
28 # if defined (G_ATOMIC_I486)
29 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
30  */
31 gint
32 g_atomic_int_exchange_and_add (gint *atomic, 
33                                gint val)
34 {
35   gint result;
36
37   __asm__ __volatile__ ("lock; xaddl %0,%1"
38                         : "=r" (result), "=m" (*atomic) 
39                         : "0" (val), "m" (*atomic));
40   return result;
41 }
42  
43 void
44 g_atomic_int_add (gint *atomic, 
45                   gint val)
46 {
47   __asm__ __volatile__ ("lock; addl %1,%0"
48                         : "=m" (*atomic) 
49                         : "ir" (val), "m" (*atomic));
50 }
51
52 gboolean
53 g_atomic_int_compare_and_exchange (gint *atomic, 
54                                    gint oldval, 
55                                    gint newval)
56 {
57   gint result;
58  
59   __asm __volatile ("lock; cmpxchgl %2, %1"
60                     : "=a" (result), "=m" (*atomic)
61                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
62
63   return result == oldval;
64 }
65
66 /* The same code as above, as on i386 gpointer is 32 bit as well.
67  * Duplicating the code here seems more natural than casting the
68  * arguments and calling the former function */
69
70 gboolean
71 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
72                                        gpointer  oldval, 
73                                        gpointer  newval)
74 {
75   gpointer result;
76  
77   __asm __volatile ("lock; cmpxchgl %2, %1"
78                     : "=a" (result), "=m" (*atomic)
79                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
80
81   return result == oldval;
82 }
83
84 # elif defined (G_ATOMIC_SPARCV9)
85 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
86  */
87 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
88   ({                                                                    \
89      gint __result;                                                     \
90      __asm __volatile ("cas [%4], %2, %0"                               \
91                        : "=r" (__result), "=m" (*(atomic))              \
92                        : "r" (oldval), "m" (*(atomic)), "r" (atomic),   \
93                          "0" (newval));                                 \
94      __result == oldval;                                                \
95   })
96
97 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
98 gboolean
99 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
100                                        gpointer  oldval, 
101                                        gpointer  newval)
102 {
103   gpointer result;
104   __asm __volatile ("cas [%4], %2, %0"
105                     : "=r" (result), "=m" (*atomic)
106                     : "r" (oldval), "m" (*atomic), "r" (atomic),
107                       "0" (newval));
108   return result == oldval;
109 }
110 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
111 gboolean
112 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
113                                        gpointer  oldval, 
114                                        gpointer  newval)
115 {
116   gpointer result;
117   gpointer *a = atomic;
118   __asm __volatile ("casx [%4], %2, %0"
119                     : "=r" (result), "=m" (*a)
120                     : "r" (oldval), "m" (*a), "r" (a),
121                       "0" (newval));
122   return result != 0;
123 }
124 #  else /* What's that */
125 #    error "Your system has an unsupported pointer size"
126 #  endif /* GLIB_SIZEOF_VOID_P */
127 #  define G_ATOMIC_MEMORY_BARRIER                                       \
128   __asm __volatile ("membar #LoadLoad | #LoadStore"                     \
129                     " | #StoreLoad | #StoreStore" : : : "memory")
130
131 # elif defined (G_ATOMIC_ALPHA)
132 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
133  */
134 #  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)                   \
135   ({                                                                    \
136      gint __result;                                                     \
137      gint __prev;                                                       \
138      __asm__ __volatile__ (                                             \
139         "       mb\n"                                                   \
140         "1:     ldl_l   %0,%2\n"                                        \
141         "       cmpeq   %0,%3,%1\n"                                     \
142         "       beq     %1,2f\n"                                        \
143         "       mov     %4,%1\n"                                        \
144         "       stl_c   %1,%2\n"                                        \
145         "       beq     %1,1b\n"                                        \
146         "       mb\n"                                                   \
147         "2:"                                                            \
148         : "=&r" (__prev),                                               \
149           "=&r" (__result)                                              \
150         : "m" (*(atomic)),                                              \
151           "Ir" (oldval),                                                \
152           "Ir" (newval)                                                 \
153         : "memory");                                                    \
154      __result != 0;                                                     \
155   })
156 #  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
157 gboolean
158 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
159                                        gpointer  oldval, 
160                                        gpointer  newval)
161 {
162   gint result;
163   gpointer prev;
164   __asm__ __volatile__ (
165         "       mb\n"
166         "1:     ldl_l   %0,%2\n"
167         "       cmpeq   %0,%3,%1\n"
168         "       beq     %1,2f\n"
169         "       mov     %4,%1\n"
170         "       stl_c   %1,%2\n"
171         "       beq     %1,1b\n"
172         "       mb\n"
173         "2:"
174         : "=&r" (prev), 
175           "=&r" (result)
176         : "m" (*atomic),
177           "Ir" (oldval),
178           "Ir" (newval)
179         : "memory");
180   return result != 0;
181 }
182 #  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
183 gboolean
184 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
185                                        gpointer  oldval, 
186                                        gpointer  newval)
187 {
188   gint result;
189   gpointer prev;
190   __asm__ __volatile__ (
191         "       mb\n"
192         "1:     ldq_l   %0,%2\n"
193         "       cmpeq   %0,%3,%1\n"
194         "       beq     %1,2f\n"
195         "       mov     %4,%1\n"
196         "       stq_c   %1,%2\n"
197         "       beq     %1,1b\n"
198         "       mb\n"
199         "2:"
200         : "=&r" (prev), 
201           "=&r" (result)
202         : "m" (*atomic),
203           "Ir" (oldval),
204           "Ir" (newval)
205         : "memory");
206   return result != 0;
207 }
208 #  else /* What's that */
209 #   error "Your system has an unsupported pointer size"
210 #  endif /* GLIB_SIZEOF_VOID_P */
211 #  define G_ATOMIC_MEMORY_BARRIER  __asm ("mb" : : : "memory")
212 # elif defined (G_ATOMIC_X86_64)
213 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
214  */
215 gint
216 g_atomic_int_exchange_and_add (gint *atomic, 
217                                gint val)
218 {
219   gint result;
220
221   __asm__ __volatile__ ("lock; xaddl %0,%1"
222                         : "=r" (result), "=m" (*atomic) 
223                         : "0" (val), "m" (*atomic));
224   return result;
225 }
226  
227 void
228 g_atomic_int_add (gint *atomic, 
229                   gint val)
230 {
231   __asm__ __volatile__ ("lock; addl %1,%0"
232                         : "=m" (*atomic) 
233                         : "ir" (val), "m" (*atomic));
234 }
235
236 gboolean
237 g_atomic_int_compare_and_exchange (gint *atomic, 
238                                    gint oldval, 
239                                    gint newval)
240 {
241   gint result;
242  
243   __asm __volatile ("lock; cmpxchgl %2, %1"
244                     : "=a" (result), "=m" (*atomic)
245                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
246
247   return result == oldval;
248 }
249
250 gboolean
251 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
252                                        gpointer  oldval, 
253                                        gpointer  newval)
254 {
255   gpointer result;
256  
257   __asm __volatile ("lock; cmpxchgq %q2, %1"
258                     : "=a" (result), "=m" (*atomic)
259                     : "r" (newval), "m" (*atomic), "0" (oldval)); 
260
261   return result == oldval;
262 }
263
264 # elif defined (G_ATOMIC_POWERPC)
265 /* Adapted from CVS version 1.12 of glibc's sysdeps/powerpc/bits/atomic.h 
266  * and CVS version 1.3 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
267  * and CVS version 1.2 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
268  */
269 gint
270 g_atomic_int_exchange_and_add (gint *atomic, 
271                                gint val)
272 {
273   gint result, temp;
274   __asm __volatile ("1:       lwarx   %0,0,%3\n"
275                     "         add     %1,%0,%4\n"
276                     "         stwcx.  %1,0,%3\n"
277                     "         bne-    1b"
278                     : "=&b" (result), "=&r" (temp), "=m" (*atomic)
279                     : "b" (atomic), "r" (val), "2" (*atomic)
280                     : "cr0", "memory");
281   return result;
282 }
283  
284 /* The same as above, to save a function call repeated here */
285 void
286 g_atomic_int_add (gint *atomic, 
287                   gint val)
288 {
289   gint result, temp;  
290   __asm __volatile ("1:       lwarx   %0,0,%3\n"
291                     "         add     %1,%0,%4\n"
292                     "         stwcx.  %1,0,%3\n"
293                     "         bne-    1b"
294                     : "=&b" (result), "=&r" (temp), "=m" (*atomic)
295                     : "b" (atomic), "r" (val), "2" (*atomic)
296                     : "cr0", "memory");
297 }
298
299 #   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
300 gboolean
301 g_atomic_int_compare_and_exchange (gint *atomic, 
302                                    gint oldval, 
303                                    gint newval)
304 {
305   gint result;
306   __asm __volatile ("sync\n"
307                     "1: lwarx   %0,0,%1\n"
308                     "   subf.   %0,%2,%0\n"
309                     "   bne     2f\n"
310                     "   stwcx.  %3,0,%1\n"
311                     "   bne-    1b\n"
312                     "2: isync"
313                     : "=&r" (result)
314                     : "b" (atomic), "r" (oldval), "r" (newval)
315                     : "cr0", "memory"); 
316   return result == 0;
317 }
318
319 gboolean
320 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
321                                        gpointer  oldval, 
322                                        gpointer  newval)
323 {
324   gpointer result;
325   __asm __volatile ("sync\n"
326                     "1: lwarx   %0,0,%1\n"
327                     "   subf.   %0,%2,%0\n"
328                     "   bne     2f\n"
329                     "   stwcx.  %3,0,%1\n"
330                     "   bne-    1b\n"
331                     "2: isync"
332                     : "=&r" (result)
333                     : "b" (atomic), "r" (oldval), "r" (newval)
334                     : "cr0", "memory"); 
335   return result == 0;
336 }
337 #   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
338 gboolean
339 g_atomic_int_compare_and_exchange (gint *atomic, 
340                                    gint oldval, 
341                                    gint newval)
342 {
343   gpointer result;
344   __asm __volatile ("sync\n"
345                     "1: lwarx   %0,0,%1\n"
346                     "   extsw   %0,%0\n"
347                     "   subf.   %0,%2,%0\n"
348                     "   bne     2f\n"
349                     "   stwcx.  %3,0,%1\n"
350                     "   bne-    1b\n"
351                     "2: isync"
352                     : "=&r" (result)
353                     : "b" (atomic), "r" (oldval), "r" (newval)
354                     : "cr0", "memory"); 
355   return result == 0;
356 }
357
358 gboolean
359 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
360                                        gpointer  oldval, 
361                                        gpointer  newval)
362 {
363   gpointer result;
364   __asm __volatile ("sync\n"
365                     "1: ldarx   %0,0,%1\n"
366                     "   subf.   %0,%2,%0\n"
367                     "   bne     2f\n"
368                     "   stdcx.  %3,0,%1\n"
369                     "   bne-    1b\n"
370                     "2: isync"
371                     : "=&r" (result)
372                     : "b" (atomic), "r" (oldval), "r" (newval)
373                     : "cr0", "memory"); 
374   return result == 0;
375 }
376 #  else /* What's that */
377 #   error "Your system has an unsupported pointer size"
378 #  endif /* GLIB_SIZEOF_VOID_P */
379
380 #  define G_ATOMIC_MEMORY_BARRIER __asm ("sync" : : : "memory")
381
382 # elif defined (G_ATOMIC_IA64)
383 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
384  */
385 gint
386 g_atomic_int_exchange_and_add (gint *atomic, 
387                                gint val)
388 {
389   return __sync_fetch_and_add_si (atomic, val);
390 }
391  
392 void
393 g_atomic_int_add (gint *atomic, 
394                   gint val)
395 {
396   __sync_fetch_and_add_si (atomic, val);
397 }
398
399 gboolean
400 g_atomic_int_compare_and_exchange (gint *atomic, 
401                                    gint oldval, 
402                                    gint newval)
403 {
404   return __sync_bool_compare_and_swap_si (atomic, oldval, newval);
405 }
406
407 gboolean
408 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
409                                        gpointer  oldval, 
410                                        gpointer  newval)
411 {
412   return __sync_bool_compare_and_swap_di ((long *)atomic, 
413                                           (long)oldval, (long)newval);
414 }
415
416 #  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
417 # else /* !G_ATOMIC */
418 #  define DEFINE_WITH_MUTEXES
419 # endif /* G_ATOMIC */
420 #else /* !__GNUC__ */
421 # ifdef G_PLATFORM_WIN32
422 #  define DEFINE_WITH_WIN32_INTERLOCKED
423 # else
424 #  define DEFINE_WITH_MUTEXES
425 # endif
426 #endif /* __GNUC__ */
427
428 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
429 # include <windows.h>
430 gint32   
431 g_atomic_int_exchange_and_add (gint32   *atomic, 
432                                gint32    val)
433 {
434   return InterlockedExchangeAdd (atomic, val);
435 }
436
437 void     
438 g_atomic_int_add (gint32   *atomic, 
439                   gint32    val)
440 {
441   InterlockedExchangeAdd (atomic, val);
442 }
443
444 gboolean 
445 g_atomic_int_compare_and_exchange (gint32   *atomic, 
446                                    gint32    oldval, 
447                                    gint32    newval)
448 {
449   return (guint32)InterlockedCompareExchange ((PVOID*)atomic, 
450                                               (PVOID)newval, 
451                                               (PVOID)oldval) == oldval;
452 }
453
454 gboolean 
455 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
456                                        gpointer  oldval, 
457                                        gpointer  newval)
458 {
459 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
460 #  error "InterlockedCompareExchangePointer needed"
461 # else
462    return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
463 # endif
464 }
465 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
466
467 #ifdef DEFINE_WITH_MUTEXES
468 /* We have to use the slow, but safe locking method */
469 G_LOCK_DEFINE_STATIC (g_atomic_lock);
470 gint
471 g_atomic_int_exchange_and_add (gint *atomic, 
472                                gint  val)
473 {
474   gint result;
475     
476   G_LOCK (g_atomic_lock);
477   result = *atomic;
478   *atomic += val;
479   G_UNLOCK (g_atomic_lock);
480
481   return result;
482 }
483
484
485 void
486 g_atomic_int_add (gint *atomic,
487                   gint  val)
488 {
489   G_LOCK (g_atomic_lock);
490   *atomic += val;
491   G_UNLOCK (g_atomic_lock);
492 }
493
494 gboolean
495 g_atomic_int_compare_and_exchange (gint *atomic, 
496                                    gint  oldval, 
497                                    gint  newval)
498 {
499   gboolean result;
500     
501   G_LOCK (g_atomic_lock);
502   if (*atomic == oldval)
503     {
504       result = TRUE;
505       *atomic = newval;
506     }
507   else
508     result = FALSE;
509   G_UNLOCK (g_atomic_lock);
510
511   return result;
512 }
513
514 gboolean
515 g_atomic_pointer_compare_and_exchange (gpointer *atomic, 
516                                        gpointer  oldval, 
517                                        gpointer  newval)
518 {
519   gboolean result;
520     
521   G_LOCK (g_atomic_lock);
522   if (*atomic == oldval)
523     {
524       result = TRUE;
525       *atomic = newval;
526     }
527   else
528     result = FALSE;
529   G_UNLOCK (g_atomic_lock);
530
531   return result;
532 }
533
534 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
535 gint
536 g_atomic_int_get (gint *atomic)
537 {
538   gint result;
539
540   G_LOCK (g_atomic_lock);
541   result = *atomic;
542   G_UNLOCK (g_atomic_lock);
543
544   return result;
545 }
546
547 gpointer
548 g_atomic_pointer_get (gpointer *atomic)
549 {
550   gpointer result;
551
552   G_LOCK (g_atomic_lock);
553   result = *atomic;
554   G_UNLOCK (g_atomic_lock);
555
556   return result;
557 }
558 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
559 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
560 gint
561 g_atomic_int_get (gint *atomic)
562 {
563   gint result = *atomic;
564
565   G_ATOMIC_MEMORY_BARRIER;
566
567   return result;
568 }
569
570 gpointer
571 g_atomic_pointer_get (gpointer *atomic)
572 {
573   gpointer result = *atomic;
574
575   G_ATOMIC_MEMORY_BARRIER;
576
577   return result;
578 }   
579 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
580
581 #ifdef ATOMIC_INT_CMP_XCHG
582 gboolean
583 g_atomic_int_compare_and_exchange (gint *atomic, 
584                                    gint oldval, 
585                                    gint newval)
586 {
587   return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
588 }
589
590 gint
591 g_atomic_int_exchange_and_add (gint *atomic, 
592                                gint val)
593 {
594   gint result;
595   do
596     result = *atomic;
597   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
598
599   return result;
600 }
601  
602 void
603 g_atomic_int_add (gint *atomic, 
604                   gint val)
605 {
606   gint result;
607   do
608     result = *atomic;
609   while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
610 }
611 #endif /* ATOMIC_INT_CMP_XCHG */