1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/atomic.h
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * Copyright (C) 2012 ARM Ltd.
10 #ifndef __ASM_ATOMIC_LL_SC_H
11 #define __ASM_ATOMIC_LL_SC_H
13 #ifndef __ARM64_IN_ATOMIC_IMPL
14 #error "please don't include this file directly"
18 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
19 * store exclusive to ensure that these are atomic. We may loop
20 * to ensure that the update happens.
22 * NOTE: these functions do *not* follow the PCS and must explicitly
23 * save any clobbered registers other than x0 (regardless of return
24 * value). This is achieved through -fcall-saved-* compiler flags for
25 * this file, which unfortunately don't work on a per-function basis
26 * (the optimize attribute silently ignores these options).
29 #define ATOMIC_OP(op, asm_op) \
31 __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
36 asm volatile("// atomic_" #op "\n" \
37 " prfm pstl1strm, %2\n" \
39 " " #asm_op " %w0, %w0, %w3\n" \
40 " stxr %w1, %w0, %2\n" \
42 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
45 __LL_SC_EXPORT(arch_atomic_##op);
47 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
49 __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
54 asm volatile("// atomic_" #op "_return" #name "\n" \
55 " prfm pstl1strm, %2\n" \
56 "1: ld" #acq "xr %w0, %2\n" \
57 " " #asm_op " %w0, %w0, %w3\n" \
58 " st" #rel "xr %w1, %w0, %2\n" \
61 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
67 __LL_SC_EXPORT(arch_atomic_##op##_return##name);
69 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
71 __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
76 asm volatile("// atomic_fetch_" #op #name "\n" \
77 " prfm pstl1strm, %3\n" \
78 "1: ld" #acq "xr %w0, %3\n" \
79 " " #asm_op " %w1, %w0, %w4\n" \
80 " st" #rel "xr %w2, %w1, %3\n" \
83 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
89 __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
91 #define ATOMIC_OPS(...) \
92 ATOMIC_OP(__VA_ARGS__) \
93 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
94 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
95 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
96 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
97 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
98 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
99 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
100 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
106 #define ATOMIC_OPS(...) \
107 ATOMIC_OP(__VA_ARGS__) \
108 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
109 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
110 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
111 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
114 ATOMIC_OPS(andnot, bic)
119 #undef ATOMIC_FETCH_OP
120 #undef ATOMIC_OP_RETURN
123 #define ATOMIC64_OP(op, asm_op) \
124 __LL_SC_INLINE void \
125 __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
130 asm volatile("// atomic64_" #op "\n" \
131 " prfm pstl1strm, %2\n" \
133 " " #asm_op " %0, %0, %3\n" \
134 " stxr %w1, %0, %2\n" \
136 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
139 __LL_SC_EXPORT(arch_atomic64_##op);
141 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
142 __LL_SC_INLINE long \
143 __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
148 asm volatile("// atomic64_" #op "_return" #name "\n" \
149 " prfm pstl1strm, %2\n" \
150 "1: ld" #acq "xr %0, %2\n" \
151 " " #asm_op " %0, %0, %3\n" \
152 " st" #rel "xr %w1, %0, %2\n" \
155 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
161 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
163 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
164 __LL_SC_INLINE long \
165 __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
170 asm volatile("// atomic64_fetch_" #op #name "\n" \
171 " prfm pstl1strm, %3\n" \
172 "1: ld" #acq "xr %0, %3\n" \
173 " " #asm_op " %1, %0, %4\n" \
174 " st" #rel "xr %w2, %1, %3\n" \
177 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
183 __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
185 #define ATOMIC64_OPS(...) \
186 ATOMIC64_OP(__VA_ARGS__) \
187 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
188 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
189 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
190 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
191 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
192 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
193 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
194 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
196 ATOMIC64_OPS(add, add)
197 ATOMIC64_OPS(sub, sub)
200 #define ATOMIC64_OPS(...) \
201 ATOMIC64_OP(__VA_ARGS__) \
202 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
203 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
204 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
205 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
207 ATOMIC64_OPS(and, and)
208 ATOMIC64_OPS(andnot, bic)
209 ATOMIC64_OPS(or, orr)
210 ATOMIC64_OPS(xor, eor)
213 #undef ATOMIC64_FETCH_OP
214 #undef ATOMIC64_OP_RETURN
218 __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
223 asm volatile("// atomic64_dec_if_positive\n"
224 " prfm pstl1strm, %2\n"
228 " stlxr %w1, %0, %2\n"
232 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
238 __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
240 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
241 __LL_SC_INLINE u##sz \
242 __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
250 * Sub-word sizes require explicit casting so that the compare \
251 * part of the cmpxchg doesn't end up interpreting non-zero \
252 * upper bits of the register containing "old". \
258 " prfm pstl1strm, %[v]\n" \
259 "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
260 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
261 " cbnz %" #w "[tmp], 2f\n" \
262 " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
263 " cbnz %w[tmp], 1b\n" \
266 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
267 [v] "+Q" (*(u##sz *)ptr) \
268 : [old] "Kr" (old), [new] "r" (new) \
273 __LL_SC_EXPORT(__cmpxchg_case_##name##sz);
275 __CMPXCHG_CASE(w, b, , 8, , , , )
276 __CMPXCHG_CASE(w, h, , 16, , , , )
277 __CMPXCHG_CASE(w, , , 32, , , , )
278 __CMPXCHG_CASE( , , , 64, , , , )
279 __CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
280 __CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
281 __CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
282 __CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
283 __CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
284 __CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
285 __CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
286 __CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
287 __CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
288 __CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
289 __CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
290 __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
292 #undef __CMPXCHG_CASE
294 #define __CMPXCHG_DBL(name, mb, rel, cl) \
295 __LL_SC_INLINE long \
296 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
297 unsigned long old2, \
298 unsigned long new1, \
299 unsigned long new2, \
300 volatile void *ptr)) \
302 unsigned long tmp, ret; \
304 asm volatile("// __cmpxchg_double" #name "\n" \
305 " prfm pstl1strm, %2\n" \
306 "1: ldxp %0, %1, %2\n" \
307 " eor %0, %0, %3\n" \
308 " eor %1, %1, %4\n" \
309 " orr %1, %0, %1\n" \
311 " st" #rel "xp %w0, %5, %6, %2\n" \
315 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
316 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
321 __LL_SC_EXPORT(__cmpxchg_double##name);
323 __CMPXCHG_DBL( , , , )
324 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
328 #endif /* __ASM_ATOMIC_LL_SC_H */