1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic implementation of 64-bit atomics using spinlocks,
4 * useful on processors that don't have 64-bit atomic instructions.
6 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 #include <linux/types.h>
9 #include <linux/cache.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/atomic.h>
16 * We use a hashed array of spinlocks to provide exclusive access
17 * to each atomic64_t variable. Since this is expected to used on
18 * systems with small numbers of CPUs (<= 4 or so), we use a
19 * relatively small array of 16 spinlocks to avoid wasting too much
20 * memory on the spinlock array.
25 * Ensure each lock is in a separate cacheline.
29 char pad[L1_CACHE_BYTES];
30 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 [0 ... (NR_LOCKS - 1)] = {
32 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
36 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
38 unsigned long addr = (unsigned long) v;
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
45 s64 atomic64_read(const atomic64_t *v)
48 raw_spinlock_t *lock = lock_addr(v);
51 raw_spin_lock_irqsave(lock, flags);
53 raw_spin_unlock_irqrestore(lock, flags);
56 EXPORT_SYMBOL(atomic64_read);
58 void atomic64_set(atomic64_t *v, s64 i)
61 raw_spinlock_t *lock = lock_addr(v);
63 raw_spin_lock_irqsave(lock, flags);
65 raw_spin_unlock_irqrestore(lock, flags);
67 EXPORT_SYMBOL(atomic64_set);
69 #define ATOMIC64_OP(op, c_op) \
70 void atomic64_##op(s64 a, atomic64_t *v) \
72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \
75 raw_spin_lock_irqsave(lock, flags); \
77 raw_spin_unlock_irqrestore(lock, flags); \
79 EXPORT_SYMBOL(atomic64_##op);
81 #define ATOMIC64_OP_RETURN(op, c_op) \
82 s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \
88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \
90 raw_spin_unlock_irqrestore(lock, flags); \
93 EXPORT_SYMBOL(atomic64_##op##_return);
95 #define ATOMIC64_FETCH_OP(op, c_op) \
96 s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \
102 raw_spin_lock_irqsave(lock, flags); \
105 raw_spin_unlock_irqrestore(lock, flags); \
108 EXPORT_SYMBOL(atomic64_fetch_##op);
110 #define ATOMIC64_OPS(op, c_op) \
111 ATOMIC64_OP(op, c_op) \
112 ATOMIC64_OP_RETURN(op, c_op) \
113 ATOMIC64_FETCH_OP(op, c_op)
115 ATOMIC64_OPS(add, +=)
116 ATOMIC64_OPS(sub, -=)
119 #define ATOMIC64_OPS(op, c_op) \
120 ATOMIC64_OP(op, c_op) \
121 ATOMIC64_OP_RETURN(op, c_op) \
122 ATOMIC64_FETCH_OP(op, c_op)
124 ATOMIC64_OPS(and, &=)
126 ATOMIC64_OPS(xor, ^=)
129 #undef ATOMIC64_FETCH_OP
130 #undef ATOMIC64_OP_RETURN
133 s64 atomic64_dec_if_positive(atomic64_t *v)
136 raw_spinlock_t *lock = lock_addr(v);
139 raw_spin_lock_irqsave(lock, flags);
140 val = v->counter - 1;
143 raw_spin_unlock_irqrestore(lock, flags);
146 EXPORT_SYMBOL(atomic64_dec_if_positive);
148 s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
151 raw_spinlock_t *lock = lock_addr(v);
154 raw_spin_lock_irqsave(lock, flags);
158 raw_spin_unlock_irqrestore(lock, flags);
161 EXPORT_SYMBOL(atomic64_cmpxchg);
163 s64 atomic64_xchg(atomic64_t *v, s64 new)
166 raw_spinlock_t *lock = lock_addr(v);
169 raw_spin_lock_irqsave(lock, flags);
172 raw_spin_unlock_irqrestore(lock, flags);
175 EXPORT_SYMBOL(atomic64_xchg);
177 s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
180 raw_spinlock_t *lock = lock_addr(v);
183 raw_spin_lock_irqsave(lock, flags);
187 raw_spin_unlock_irqrestore(lock, flags);
191 EXPORT_SYMBOL(atomic64_fetch_add_unless);