1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
20 #include <asm/spinlock.h>
21 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
23 /* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
27 # define ATOMIC_HASH_SIZE 4
28 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32 /* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */
34 #define _atomic_spin_lock_irqsave(l,f) do { \
35 arch_spinlock_t *s = ATOMIC_HASH(l); \
40 #define _atomic_spin_unlock_irqrestore(l,f) do { \
41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 arch_spin_unlock(s); \
43 local_irq_restore(f); \
48 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
53 * Note that we need not lock read accesses - aligned word writes/reads
54 * are atomic, so a reader never sees inconsistent values.
57 /* It's possible to reduce all atomic operations to either
58 * __atomic_add_return, atomic_set and atomic_read (the latter
59 * is there only for consistency).
62 static __inline__ int __atomic_add_return(int i, atomic_t *v)
66 _atomic_spin_lock_irqsave(v, flags);
68 ret = (v->counter += i);
70 _atomic_spin_unlock_irqrestore(v, flags);
74 static __inline__ void atomic_set(atomic_t *v, int i)
77 _atomic_spin_lock_irqsave(v, flags);
81 _atomic_spin_unlock_irqrestore(v, flags);
84 static __inline__ int atomic_read(const atomic_t *v)
86 return (*(volatile int *)&(v)->counter);
89 /* exported interface */
90 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
91 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
94 * __atomic_add_unless - add unless the number is a given value
95 * @v: pointer of type atomic_t
96 * @a: the amount to add to v...
97 * @u: ...unless v is equal to u.
99 * Atomically adds @a to @v, so long as it was not @u.
100 * Returns the old value of @v.
102 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
107 if (unlikely(c == (u)))
109 old = atomic_cmpxchg((v), c, c + (a));
110 if (likely(old == c))
118 #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
119 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
120 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
121 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
123 #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
124 #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
125 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
126 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
128 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
131 * atomic_inc_and_test - increment and test
132 * @v: pointer of type atomic_t
134 * Atomically increments @v by 1
135 * and returns true if the result is zero, or false for all
138 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
140 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
142 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
144 #define ATOMIC_INIT(i) { (i) }
146 #define smp_mb__before_atomic_dec() smp_mb()
147 #define smp_mb__after_atomic_dec() smp_mb()
148 #define smp_mb__before_atomic_inc() smp_mb()
149 #define smp_mb__after_atomic_inc() smp_mb()
153 #define ATOMIC64_INIT(i) { (i) }
155 static __inline__ s64
156 __atomic64_add_return(s64 i, atomic64_t *v)
160 _atomic_spin_lock_irqsave(v, flags);
162 ret = (v->counter += i);
164 _atomic_spin_unlock_irqrestore(v, flags);
168 static __inline__ void
169 atomic64_set(atomic64_t *v, s64 i)
172 _atomic_spin_lock_irqsave(v, flags);
176 _atomic_spin_unlock_irqrestore(v, flags);
179 static __inline__ s64
180 atomic64_read(const atomic64_t *v)
182 return (*(volatile long *)&(v)->counter);
185 #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
186 #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
187 #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
188 #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
190 #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
191 #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
192 #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
193 #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
195 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
197 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
198 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
199 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
201 /* exported interface */
202 #define atomic64_cmpxchg(v, o, n) \
203 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
204 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
207 * atomic64_add_unless - add unless the number is a given value
208 * @v: pointer of type atomic64_t
209 * @a: the amount to add to v...
210 * @u: ...unless v is equal to u.
212 * Atomically adds @a to @v, so long as it was not @u.
213 * Returns the old value of @v.
215 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
218 c = atomic64_read(v);
220 if (unlikely(c == (u)))
222 old = atomic64_cmpxchg((v), c, c + (a));
223 if (likely(old == c))
230 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
233 * atomic64_dec_if_positive - decrement by 1 if old value positive
234 * @v: pointer of type atomic_t
236 * The function returns the old value of *v minus 1, even if
237 * the atomic variable, v, was not decremented.
239 static inline long atomic64_dec_if_positive(atomic64_t *v)
242 c = atomic64_read(v);
245 if (unlikely(dec < 0))
247 old = atomic64_cmpxchg((v), c, dec);
248 if (likely(old == c))
255 #endif /* !CONFIG_64BIT */
258 #endif /* _ASM_PARISC_ATOMIC_H_ */