1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include <tls.h> /* For tcbhead_t. */
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
51 # define LOCK_PREFIX /* nothing */
53 # define LOCK_PREFIX "lock;"
58 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
59 __sync_val_compare_and_swap (mem, oldval, newval)
60 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
61 (! __sync_bool_compare_and_swap (mem, oldval, newval))
64 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
65 ({ __typeof (*mem) ret; \
66 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
69 "0:\tcmpxchgb %b2, %1" \
70 : "=a" (ret), "=m" (*mem) \
71 : "q" (newval), "m" (*mem), "0" (oldval), \
72 "i" (offsetof (tcbhead_t, multiple_threads))); \
75 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
76 ({ __typeof (*mem) ret; \
77 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
80 "0:\tcmpxchgw %w2, %1" \
81 : "=a" (ret), "=m" (*mem) \
82 : "r" (newval), "m" (*mem), "0" (oldval), \
83 "i" (offsetof (tcbhead_t, multiple_threads))); \
86 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
87 ({ __typeof (*mem) ret; \
88 __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
91 "0:\tcmpxchgl %2, %1" \
92 : "=a" (ret), "=m" (*mem) \
93 : "r" (newval), "m" (*mem), "0" (oldval), \
94 "i" (offsetof (tcbhead_t, multiple_threads))); \
97 /* XXX We do not really need 64-bit compare-and-exchange. At least
98 not in the moment. Using it would mean causing portability
99 problems since not many other 32-bit architectures have support for
100 such an operation. So don't define any code for now. If it is
101 really going to be used the code below can be used on Intel Pentium
102 and later, but NOT on i486. */
104 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
105 ({ __typeof (*mem) ret = *(mem); \
110 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
111 ({ __typeof (*mem) ret = *(mem); \
118 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
119 ({ __typeof (*mem) ret; \
120 __asm __volatile ("xchgl %2, %%ebx\n\t" \
121 LOCK_PREFIX "cmpxchg8b %1\n\t" \
123 : "=A" (ret), "=m" (*mem) \
124 : "DS" (((unsigned long long int) (newval)) \
126 "c" (((unsigned long long int) (newval)) >> 32), \
127 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
129 "d" (((unsigned long long int) (oldval)) >> 32)); \
132 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
133 ({ __typeof (*mem) ret; \
134 __asm __volatile ("xchgl %2, %%ebx\n\t" \
135 "cmpl $0, %%gs:%P7\n\t" \
138 "0:\tcmpxchg8b %1\n\t" \
140 : "=A" (ret), "=m" (*mem) \
141 : "DS" (((unsigned long long int) (newval)) \
143 "c" (((unsigned long long int) (newval)) >> 32), \
144 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
146 "d" (((unsigned long long int) (oldval)) >> 32), \
147 "i" (offsetof (tcbhead_t, multiple_threads))); \
150 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
151 ({ __typeof (*mem) ret; \
152 __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \
153 : "=A" (ret), "=m" (*mem) \
154 : "b" (((unsigned long long int) (newval)) \
156 "c" (((unsigned long long int) (newval)) >> 32), \
157 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
159 "d" (((unsigned long long int) (oldval)) >> 32)); \
162 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
163 ({ __typeof (*mem) ret; \
164 __asm __volatile ("cmpl $0, %%gs:%P7\n\t" \
168 : "=A" (ret), "=m" (*mem) \
169 : "b" (((unsigned long long int) (newval)) \
171 "c" (((unsigned long long int) (newval)) >> 32), \
172 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
174 "d" (((unsigned long long int) (oldval)) >> 32), \
175 "i" (offsetof (tcbhead_t, multiple_threads))); \
181 /* Note that we need no lock prefix. */
182 #define atomic_exchange_acq(mem, newvalue) \
183 ({ __typeof (*mem) result; \
184 if (sizeof (*mem) == 1) \
185 __asm __volatile ("xchgb %b0, %1" \
186 : "=q" (result), "=m" (*mem) \
187 : "0" (newvalue), "m" (*mem)); \
188 else if (sizeof (*mem) == 2) \
189 __asm __volatile ("xchgw %w0, %1" \
190 : "=r" (result), "=m" (*mem) \
191 : "0" (newvalue), "m" (*mem)); \
192 else if (sizeof (*mem) == 4) \
193 __asm __volatile ("xchgl %0, %1" \
194 : "=r" (result), "=m" (*mem) \
195 : "0" (newvalue), "m" (*mem)); \
204 #define __arch_exchange_and_add_body(lock, pfx, mem, value) \
205 ({ __typeof (*mem) __result; \
206 __typeof (value) __addval = (value); \
207 if (sizeof (*mem) == 1) \
208 __asm __volatile (lock "xaddb %b0, %1" \
209 : "=q" (__result), "=m" (*mem) \
210 : "0" (__addval), "m" (*mem), \
211 "i" (offsetof (tcbhead_t, multiple_threads))); \
212 else if (sizeof (*mem) == 2) \
213 __asm __volatile (lock "xaddw %w0, %1" \
214 : "=r" (__result), "=m" (*mem) \
215 : "0" (__addval), "m" (*mem), \
216 "i" (offsetof (tcbhead_t, multiple_threads))); \
217 else if (sizeof (*mem) == 4) \
218 __asm __volatile (lock "xaddl %0, %1" \
219 : "=r" (__result), "=m" (*mem) \
220 : "0" (__addval), "m" (*mem), \
221 "i" (offsetof (tcbhead_t, multiple_threads))); \
224 __typeof (mem) __memp = (mem); \
225 __typeof (*mem) __tmpval; \
226 __result = *__memp; \
228 __tmpval = __result; \
229 while ((__result = pfx##_compare_and_exchange_val_64_acq \
230 (__memp, __result + __addval, __result)) == __tmpval); \
234 #define atomic_exchange_and_add(mem, value) \
235 __sync_fetch_and_add (mem, value)
237 #define __arch_exchange_and_add_cprefix \
238 "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
240 #define catomic_exchange_and_add(mem, value) \
241 __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \
245 #define __arch_add_body(lock, pfx, mem, value) \
247 if (__builtin_constant_p (value) && (value) == 1) \
248 atomic_increment (mem); \
249 else if (__builtin_constant_p (value) && (value) == -1) \
250 atomic_decrement (mem); \
251 else if (sizeof (*mem) == 1) \
252 __asm __volatile (lock "addb %b1, %0" \
254 : "iq" (value), "m" (*mem), \
255 "i" (offsetof (tcbhead_t, multiple_threads))); \
256 else if (sizeof (*mem) == 2) \
257 __asm __volatile (lock "addw %w1, %0" \
259 : "ir" (value), "m" (*mem), \
260 "i" (offsetof (tcbhead_t, multiple_threads))); \
261 else if (sizeof (*mem) == 4) \
262 __asm __volatile (lock "addl %1, %0" \
264 : "ir" (value), "m" (*mem), \
265 "i" (offsetof (tcbhead_t, multiple_threads))); \
268 __typeof (value) __addval = (value); \
269 __typeof (mem) __memp = (mem); \
270 __typeof (*mem) __oldval = *__memp; \
271 __typeof (*mem) __tmpval; \
273 __tmpval = __oldval; \
274 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
275 (__memp, __oldval + __addval, __oldval)) == __tmpval); \
279 #define atomic_add(mem, value) \
280 __arch_add_body (LOCK_PREFIX, __arch, mem, value)
282 #define __arch_add_cprefix \
283 "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
285 #define catomic_add(mem, value) \
286 __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
289 #define atomic_add_negative(mem, value) \
290 ({ unsigned char __result; \
291 if (sizeof (*mem) == 1) \
292 __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \
293 : "=m" (*mem), "=qm" (__result) \
294 : "iq" (value), "m" (*mem)); \
295 else if (sizeof (*mem) == 2) \
296 __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \
297 : "=m" (*mem), "=qm" (__result) \
298 : "ir" (value), "m" (*mem)); \
299 else if (sizeof (*mem) == 4) \
300 __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \
301 : "=m" (*mem), "=qm" (__result) \
302 : "ir" (value), "m" (*mem)); \
308 #define atomic_add_zero(mem, value) \
309 ({ unsigned char __result; \
310 if (sizeof (*mem) == 1) \
311 __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \
312 : "=m" (*mem), "=qm" (__result) \
313 : "iq" (value), "m" (*mem)); \
314 else if (sizeof (*mem) == 2) \
315 __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \
316 : "=m" (*mem), "=qm" (__result) \
317 : "ir" (value), "m" (*mem)); \
318 else if (sizeof (*mem) == 4) \
319 __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \
320 : "=m" (*mem), "=qm" (__result) \
321 : "ir" (value), "m" (*mem)); \
327 #define __arch_increment_body(lock, pfx, mem) \
329 if (sizeof (*mem) == 1) \
330 __asm __volatile (lock "incb %b0" \
333 "i" (offsetof (tcbhead_t, multiple_threads))); \
334 else if (sizeof (*mem) == 2) \
335 __asm __volatile (lock "incw %w0" \
338 "i" (offsetof (tcbhead_t, multiple_threads))); \
339 else if (sizeof (*mem) == 4) \
340 __asm __volatile (lock "incl %0" \
343 "i" (offsetof (tcbhead_t, multiple_threads))); \
346 __typeof (mem) __memp = (mem); \
347 __typeof (*mem) __oldval = *__memp; \
348 __typeof (*mem) __tmpval; \
350 __tmpval = __oldval; \
351 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
352 (__memp, __oldval + 1, __oldval)) == __tmpval); \
356 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
358 #define __arch_increment_cprefix \
359 "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
361 #define catomic_increment(mem) \
362 __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
365 #define atomic_increment_and_test(mem) \
366 ({ unsigned char __result; \
367 if (sizeof (*mem) == 1) \
368 __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \
369 : "=m" (*mem), "=qm" (__result) \
371 else if (sizeof (*mem) == 2) \
372 __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \
373 : "=m" (*mem), "=qm" (__result) \
375 else if (sizeof (*mem) == 4) \
376 __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \
377 : "=m" (*mem), "=qm" (__result) \
384 #define __arch_decrement_body(lock, pfx, mem) \
386 if (sizeof (*mem) == 1) \
387 __asm __volatile (lock "decb %b0" \
390 "i" (offsetof (tcbhead_t, multiple_threads))); \
391 else if (sizeof (*mem) == 2) \
392 __asm __volatile (lock "decw %w0" \
395 "i" (offsetof (tcbhead_t, multiple_threads))); \
396 else if (sizeof (*mem) == 4) \
397 __asm __volatile (lock "decl %0" \
400 "i" (offsetof (tcbhead_t, multiple_threads))); \
403 __typeof (mem) __memp = (mem); \
404 __typeof (*mem) __oldval = *__memp; \
405 __typeof (*mem) __tmpval; \
407 __tmpval = __oldval; \
408 while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
409 (__memp, __oldval - 1, __oldval)) == __tmpval); \
413 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
415 #define __arch_decrement_cprefix \
416 "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
418 #define catomic_decrement(mem) \
419 __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
422 #define atomic_decrement_and_test(mem) \
423 ({ unsigned char __result; \
424 if (sizeof (*mem) == 1) \
425 __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
426 : "=m" (*mem), "=qm" (__result) \
428 else if (sizeof (*mem) == 2) \
429 __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
430 : "=m" (*mem), "=qm" (__result) \
432 else if (sizeof (*mem) == 4) \
433 __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
434 : "=m" (*mem), "=qm" (__result) \
441 #define atomic_bit_set(mem, bit) \
443 if (sizeof (*mem) == 1) \
444 __asm __volatile (LOCK_PREFIX "orb %b2, %0" \
446 : "m" (*mem), "iq" (1 << (bit))); \
447 else if (sizeof (*mem) == 2) \
448 __asm __volatile (LOCK_PREFIX "orw %w2, %0" \
450 : "m" (*mem), "ir" (1 << (bit))); \
451 else if (sizeof (*mem) == 4) \
452 __asm __volatile (LOCK_PREFIX "orl %2, %0" \
454 : "m" (*mem), "ir" (1 << (bit))); \
460 #define atomic_bit_test_set(mem, bit) \
461 ({ unsigned char __result; \
462 if (sizeof (*mem) == 1) \
463 __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \
464 : "=q" (__result), "=m" (*mem) \
465 : "m" (*mem), "ir" (bit)); \
466 else if (sizeof (*mem) == 2) \
467 __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \
468 : "=q" (__result), "=m" (*mem) \
469 : "m" (*mem), "ir" (bit)); \
470 else if (sizeof (*mem) == 4) \
471 __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \
472 : "=q" (__result), "=m" (*mem) \
473 : "m" (*mem), "ir" (bit)); \
479 #define atomic_delay() asm ("rep; nop")
482 #define __arch_and_body(lock, mem, mask) \
484 if (sizeof (*mem) == 1) \
485 __asm __volatile (lock "andb %b1, %0" \
487 : "iq" (mask), "m" (*mem), \
488 "i" (offsetof (tcbhead_t, multiple_threads))); \
489 else if (sizeof (*mem) == 2) \
490 __asm __volatile (lock "andw %w1, %0" \
492 : "ir" (mask), "m" (*mem), \
493 "i" (offsetof (tcbhead_t, multiple_threads))); \
494 else if (sizeof (*mem) == 4) \
495 __asm __volatile (lock "andl %1, %0" \
497 : "ir" (mask), "m" (*mem), \
498 "i" (offsetof (tcbhead_t, multiple_threads))); \
503 #define __arch_cprefix \
504 "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
506 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
508 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
511 #define __arch_or_body(lock, mem, mask) \
513 if (sizeof (*mem) == 1) \
514 __asm __volatile (lock "orb %b1, %0" \
516 : "iq" (mask), "m" (*mem), \
517 "i" (offsetof (tcbhead_t, multiple_threads))); \
518 else if (sizeof (*mem) == 2) \
519 __asm __volatile (lock "orw %w1, %0" \
521 : "ir" (mask), "m" (*mem), \
522 "i" (offsetof (tcbhead_t, multiple_threads))); \
523 else if (sizeof (*mem) == 4) \
524 __asm __volatile (lock "orl %1, %0" \
526 : "ir" (mask), "m" (*mem), \
527 "i" (offsetof (tcbhead_t, multiple_threads))); \
532 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
534 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)