1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc...
12 * But use these as seldom as possible since they are much slower
13 * than regular operations.
17 #define ATOMIC_INIT(i) { (i) }
18 #define ATOMIC64_INIT(i) { (i) }
20 #define atomic_read(v) (*(volatile int *)&(v)->counter)
21 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
23 #define atomic_set(v,i) ((v)->counter = (i))
24 #define atomic64_set(v,i) ((v)->counter = (i))
27 * To get proper branch prediction for the main line, we must branch
28 * forward to code at the end of this object's .text section, then
29 * branch back to restart the operation.
32 static __inline__ void atomic_add(int i, atomic_t * v)
43 :"=&r" (temp), "=m" (v->counter)
44 :"Ir" (i), "m" (v->counter));
47 static __inline__ void atomic64_add(long i, atomic64_t * v)
58 :"=&r" (temp), "=m" (v->counter)
59 :"Ir" (i), "m" (v->counter));
62 static __inline__ void atomic_sub(int i, atomic_t * v)
73 :"=&r" (temp), "=m" (v->counter)
74 :"Ir" (i), "m" (v->counter));
77 static __inline__ void atomic64_sub(long i, atomic64_t * v)
88 :"=&r" (temp), "=m" (v->counter)
89 :"Ir" (i), "m" (v->counter));
94 * Same as above, but return the result value
96 static inline int atomic_add_return(int i, atomic_t *v)
100 __asm__ __volatile__(
109 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
110 :"Ir" (i), "m" (v->counter) : "memory");
115 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
119 __asm__ __volatile__(
128 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
129 :"Ir" (i), "m" (v->counter) : "memory");
134 static __inline__ long atomic_sub_return(int i, atomic_t * v)
138 __asm__ __volatile__(
147 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
148 :"Ir" (i), "m" (v->counter) : "memory");
153 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
157 __asm__ __volatile__(
166 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
167 :"Ir" (i), "m" (v->counter) : "memory");
172 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
175 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
176 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
179 * __atomic_add_unless - add unless the number is a given value
180 * @v: pointer of type atomic_t
181 * @a: the amount to add to v...
182 * @u: ...unless v is equal to u.
184 * Atomically adds @a to @v, so long as it was not @u.
185 * Returns the old value of @v.
187 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
191 __asm__ __volatile__(
192 "1: ldl_l %[old],%[mem]\n"
193 " cmpeq %[old],%[u],%[c]\n"
194 " addl %[old],%[a],%[new]\n"
196 " stl_c %[new],%[mem]\n"
202 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
203 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
211 * atomic64_add_unless - add unless the number is a given value
212 * @v: pointer of type atomic64_t
213 * @a: the amount to add to v...
214 * @u: ...unless v is equal to u.
216 * Atomically adds @a to @v, so long as it was not @u.
217 * Returns true iff @v was not @u.
219 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
223 __asm__ __volatile__(
224 "1: ldq_l %[tmp],%[mem]\n"
225 " cmpeq %[tmp],%[u],%[c]\n"
226 " addq %[tmp],%[a],%[tmp]\n"
228 " stq_c %[tmp],%[mem]\n"
234 : [tmp] "=&r"(tmp), [c] "=&r"(c)
235 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
242 * atomic64_dec_if_positive - decrement by 1 if old value positive
243 * @v: pointer of type atomic_t
245 * The function returns the old value of *v minus 1, even if
246 * the atomic variable, v, was not decremented.
248 static inline long atomic64_dec_if_positive(atomic64_t *v)
252 __asm__ __volatile__(
253 "1: ldq_l %[old],%[mem]\n"
254 " subq %[old],1,%[tmp]\n"
256 " stq_c %[tmp],%[mem]\n"
262 : [old] "=&r"(old), [tmp] "=&r"(tmp)
269 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
271 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
272 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
274 #define atomic_dec_return(v) atomic_sub_return(1,(v))
275 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
277 #define atomic_inc_return(v) atomic_add_return(1,(v))
278 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
280 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
281 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
283 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
284 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
286 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
287 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
289 #define atomic_inc(v) atomic_add(1,(v))
290 #define atomic64_inc(v) atomic64_add(1,(v))
292 #define atomic_dec(v) atomic_sub(1,(v))
293 #define atomic64_dec(v) atomic64_sub(1,(v))
295 #define smp_mb__before_atomic_dec() smp_mb()
296 #define smp_mb__after_atomic_dec() smp_mb()
297 #define smp_mb__before_atomic_inc() smp_mb()
298 #define smp_mb__after_atomic_inc() smp_mb()
300 #endif /* _ALPHA_ATOMIC_H */