1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
5 This file is part of PulseAudio.
7 Copyright 2006-2008 Lennart Poettering
8 Copyright 2008 Nokia Corporation
10 PulseAudio is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as
12 published by the Free Software Foundation; either version 2.1 of the
13 License, or (at your option) any later version.
15 PulseAudio is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
24 #include <pulsecore/macro.h>
27 * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
28 * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
29 * however very likely.
31 * For now we do only full memory barriers. Eventually we might want
32 * to support more elaborate memory barriers, in which case we will add
33 * suffixes to the function names.
35 * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
40 #error "Please include config.h before including this file!"
43 #ifdef HAVE_ATOMIC_BUILTINS
45 /* __sync based implementation */
47 typedef struct pa_atomic {
51 #define PA_ATOMIC_INIT(v) { .value = (v) }
53 #ifdef HAVE_ATOMIC_BUILTINS_MEMORY_MODEL
55 /* __atomic based implementation */
57 static inline int pa_atomic_load(const pa_atomic_t *a) {
58 return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
61 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
62 __atomic_store_n(&a->value, i, __ATOMIC_SEQ_CST);
67 static inline int pa_atomic_load(const pa_atomic_t *a) {
72 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
80 /* Returns the previously set value */
81 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
82 return __sync_fetch_and_add(&a->value, i);
85 /* Returns the previously set value */
86 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
87 return __sync_fetch_and_sub(&a->value, i);
90 /* Returns the previously set value */
91 static inline int pa_atomic_inc(pa_atomic_t *a) {
92 return pa_atomic_add(a, 1);
95 /* Returns the previously set value */
96 static inline int pa_atomic_dec(pa_atomic_t *a) {
97 return pa_atomic_sub(a, 1);
100 /* Returns true when the operation was successful. */
101 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
102 return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
105 typedef struct pa_atomic_ptr {
106 volatile unsigned long value;
109 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
111 #ifdef HAVE_ATOMIC_BUILTINS_MEMORY_MODEL
113 /* __atomic based implementation */
115 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
116 return (void*) __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
119 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void* p) {
120 __atomic_store_n(&a->value, (unsigned long) p, __ATOMIC_SEQ_CST);
125 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
126 __sync_synchronize();
127 return (void*) a->value;
130 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
131 a->value = (unsigned long) p;
132 __sync_synchronize();
137 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
138 return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
141 #elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
143 /* NetBSD 5.0+ atomic_ops(3) implementation */
145 #include <sys/atomic.h>
147 typedef struct pa_atomic {
148 volatile unsigned int value;
151 #define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
153 static inline int pa_atomic_load(const pa_atomic_t *a) {
155 return (int) a->value;
158 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
159 a->value = (unsigned int) i;
163 /* Returns the previously set value */
164 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
165 int nv = (int) atomic_add_int_nv(&a->value, i);
169 /* Returns the previously set value */
170 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
171 int nv = (int) atomic_add_int_nv(&a->value, -i);
175 /* Returns the previously set value */
176 static inline int pa_atomic_inc(pa_atomic_t *a) {
177 int nv = (int) atomic_inc_uint_nv(&a->value);
181 /* Returns the previously set value */
182 static inline int pa_atomic_dec(pa_atomic_t *a) {
183 int nv = (int) atomic_dec_uint_nv(&a->value);
187 /* Returns true when the operation was successful. */
188 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
189 unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i);
190 return (int) r == old_i;
193 typedef struct pa_atomic_ptr {
194 volatile void *value;
197 #define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
199 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
201 return (void *) a->value;
204 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
209 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
210 void *r = atomic_cas_ptr(&a->value, old_p, new_p);
214 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
216 #include <sys/cdefs.h>
217 #include <sys/types.h>
218 #include <sys/param.h>
219 #include <machine/atomic.h>
221 typedef struct pa_atomic {
222 volatile unsigned long value;
225 #define PA_ATOMIC_INIT(v) { .value = (v) }
227 static inline int pa_atomic_load(const pa_atomic_t *a) {
228 return (int) atomic_load_acq_int((unsigned int *) &a->value);
231 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
232 atomic_store_rel_int((unsigned int *) &a->value, i);
235 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
236 return atomic_fetchadd_int((unsigned int *) &a->value, i);
239 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
240 return atomic_fetchadd_int((unsigned int *) &a->value, -(i));
243 static inline int pa_atomic_inc(pa_atomic_t *a) {
244 return atomic_fetchadd_int((unsigned int *) &a->value, 1);
247 static inline int pa_atomic_dec(pa_atomic_t *a) {
248 return atomic_fetchadd_int((unsigned int *) &a->value, -1);
251 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
252 return atomic_cmpset_int((unsigned int *) &a->value, old_i, new_i);
255 typedef struct pa_atomic_ptr {
256 volatile unsigned long value;
259 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
261 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
262 #ifdef atomic_load_acq_64
263 return (void*) atomic_load_acq_ptr((unsigned long *) &a->value);
265 return (void*) atomic_load_acq_ptr((unsigned int *) &a->value);
269 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
270 #ifdef atomic_load_acq_64
271 atomic_store_rel_ptr(&a->value, (unsigned long) p);
273 atomic_store_rel_ptr((unsigned int *) &a->value, (unsigned int) p);
277 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
278 #ifdef atomic_load_acq_64
279 return atomic_cmpset_ptr(&a->value, (unsigned long) old_p, (unsigned long) new_p);
281 return atomic_cmpset_ptr((unsigned int *) &a->value, (unsigned int) old_p, (unsigned int) new_p);
285 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
287 #warn "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
289 /* Adapted from glibc */
291 typedef struct pa_atomic {
295 #define PA_ATOMIC_INIT(v) { .value = (v) }
297 static inline int pa_atomic_load(const pa_atomic_t *a) {
301 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
305 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
308 __asm __volatile ("lock; xaddl %0, %1"
309 : "=r" (result), "=m" (a->value)
310 : "0" (i), "m" (a->value));
315 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
316 return pa_atomic_add(a, -i);
319 static inline int pa_atomic_inc(pa_atomic_t *a) {
320 return pa_atomic_add(a, 1);
323 static inline int pa_atomic_dec(pa_atomic_t *a) {
324 return pa_atomic_sub(a, 1);
327 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
330 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
331 : "=a" (result), "=m" (a->value)
332 : "r" (new_i), "m" (a->value), "0" (old_i));
334 return result == old_i;
337 typedef struct pa_atomic_ptr {
338 volatile unsigned long value;
341 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
343 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
344 return (void*) a->value;
347 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
348 a->value = (unsigned long) p;
351 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
354 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
355 : "=a" (result), "=m" (a->value)
356 : "r" (new_p), "m" (a->value), "0" (old_p));
358 return result == old_p;
361 #elif defined(ATOMIC_ARM_INLINE_ASM)
364 These should only be enabled if we have ARMv6 or better.
367 typedef struct pa_atomic {
371 #define PA_ATOMIC_INIT(v) { .value = (v) }
373 static inline void pa_memory_barrier(void) {
374 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
375 asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
379 static inline int pa_atomic_load(const pa_atomic_t *a) {
384 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
389 /* Returns the previously set value */
390 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
391 unsigned long not_exclusive;
392 int new_val, old_val;
396 asm volatile ("ldrex %0, [%3]\n"
398 "strex %1, %2, [%3]\n"
399 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
400 : "r" (&a->value), "Ir" (i)
402 } while(not_exclusive);
408 /* Returns the previously set value */
409 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
410 unsigned long not_exclusive;
411 int new_val, old_val;
415 asm volatile ("ldrex %0, [%3]\n"
417 "strex %1, %2, [%3]\n"
418 : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
419 : "r" (&a->value), "Ir" (i)
421 } while(not_exclusive);
427 static inline int pa_atomic_inc(pa_atomic_t *a) {
428 return pa_atomic_add(a, 1);
431 static inline int pa_atomic_dec(pa_atomic_t *a) {
432 return pa_atomic_sub(a, 1);
435 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
436 unsigned long not_equal, not_exclusive;
440 asm volatile ("ldrex %0, [%2]\n"
443 "strexeq %0, %4, [%2]\n"
444 : "=&r" (not_exclusive), "=&r" (not_equal)
445 : "r" (&a->value), "Ir" (old_i), "r" (new_i)
447 } while(not_exclusive && !not_equal);
453 typedef struct pa_atomic_ptr {
454 volatile unsigned long value;
457 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
459 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
461 return (void*) a->value;
464 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
465 a->value = (unsigned long) p;
469 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
470 unsigned long not_equal, not_exclusive;
474 asm volatile ("ldrex %0, [%2]\n"
477 "strexeq %0, %4, [%2]\n"
478 : "=&r" (not_exclusive), "=&r" (not_equal)
479 : "r" (&a->value), "Ir" (old_p), "r" (new_p)
481 } while(not_exclusive && !not_equal);
487 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
489 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
490 information about these functions. The arm kernel helper functions first
492 Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer
493 inline asm implementation or you have an obsolete Linux kernel.
496 typedef void (__kernel_dmb_t)(void);
497 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
499 static inline void pa_memory_barrier(void) {
500 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
505 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
506 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
507 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
509 /* This is just to get rid of all warnings */
510 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
511 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
513 typedef struct pa_atomic {
517 #define PA_ATOMIC_INIT(v) { .value = (v) }
519 static inline int pa_atomic_load(const pa_atomic_t *a) {
524 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
529 /* Returns the previously set value */
530 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
534 } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
538 /* Returns the previously set value */
539 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
543 } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
547 /* Returns the previously set value */
548 static inline int pa_atomic_inc(pa_atomic_t *a) {
549 return pa_atomic_add(a, 1);
552 /* Returns the previously set value */
553 static inline int pa_atomic_dec(pa_atomic_t *a) {
554 return pa_atomic_sub(a, 1);
557 /* Returns true when the operation was successful. */
558 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
561 failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
562 } while(failed && a->value == old_i);
566 typedef struct pa_atomic_ptr {
567 volatile unsigned long value;
570 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
572 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
574 return (void*) a->value;
577 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
578 a->value = (unsigned long) p;
582 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
585 failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
586 } while(failed && a->value == (unsigned long) old_p);
592 /* libatomic_ops based implementation */
594 #include <atomic_ops.h>
596 typedef struct pa_atomic {
600 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
602 static inline int pa_atomic_load(const pa_atomic_t *a) {
603 return (int) AO_load_full((AO_t*) &a->value);
606 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
607 AO_store_full(&a->value, (AO_t) i);
610 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
611 return (int) AO_fetch_and_add_full(&a->value, (AO_t) i);
614 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
615 return (int) AO_fetch_and_add_full(&a->value, (AO_t) -i);
618 static inline int pa_atomic_inc(pa_atomic_t *a) {
619 return (int) AO_fetch_and_add1_full(&a->value);
622 static inline int pa_atomic_dec(pa_atomic_t *a) {
623 return (int) AO_fetch_and_sub1_full(&a->value);
626 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
627 return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
630 typedef struct pa_atomic_ptr {
634 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
636 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
637 return (void*) AO_load_full((AO_t*) &a->value);
640 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
641 AO_store_full(&a->value, (AO_t) p);
644 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
645 return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);