1 /* Spin locks for communication between threads and signal handlers.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
22 #include "asyncsafe-spin.h"
27 # include <sys/atomic_op.h>
30 #if defined _WIN32 && ! defined __CYGWIN__
31 /* Use Windows threads. */
34 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
36 glwthread_spin_init (lock);
40 do_lock (asyncsafe_spinlock_t *lock)
42 glwthread_spin_lock (lock);
46 do_unlock (asyncsafe_spinlock_t *lock)
48 if (glwthread_spin_unlock (lock))
53 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
55 glwthread_spin_destroy (lock);
61 /* Use POSIX threads. */
63 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
64 because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10,
65 and also because on macOS only named semaphores work.
67 We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
68 require to link with -latomic. */
70 # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
71 || __clang_major > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
73 /* Use GCC built-ins (available in GCC >= 4.7 and clang >= 3.1) that operate on
74 the first byte of the lock.
76 <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
80 /* An implementation that verifies the unlocks. */
83 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
85 __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
89 do_lock (asyncsafe_spinlock_t *lock)
91 /* Wait until *lock becomes 0, then replace it with 1. */
92 asyncsafe_spinlock_t zero;
94 __atomic_compare_exchange_n (lock, &zero, 1, false,
95 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
100 do_unlock (asyncsafe_spinlock_t *lock)
102 /* If *lock is 1, then replace it with 0. */
103 asyncsafe_spinlock_t one = 1;
104 if (!__atomic_compare_exchange_n (lock, &one, 0, false,
105 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
110 /* An implementation that is a little bit more optimized, but does not verify
114 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
116 __atomic_clear (lock, __ATOMIC_SEQ_CST);
120 do_lock (asyncsafe_spinlock_t *lock)
122 while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
127 do_unlock (asyncsafe_spinlock_t *lock)
129 __atomic_clear (lock, __ATOMIC_SEQ_CST);
134 # elif (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
135 && !defined __sparc__) \
136 || __clang_major__ >= 3) \
137 && !defined __ibmxl__
138 /* Use GCC built-ins (available in GCC >= 4.1, except on SPARC, and
141 <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
144 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
146 volatile unsigned int *vp = lock;
148 __sync_synchronize ();
152 do_lock (asyncsafe_spinlock_t *lock)
154 /* Wait until *lock becomes 0, then replace it with 1. */
155 while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
160 do_unlock (asyncsafe_spinlock_t *lock)
162 /* If *lock is 1, then replace it with 0. */
163 if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
171 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
173 atomic_p vp = (int *) lock;
178 do_lock (asyncsafe_spinlock_t *lock)
180 atomic_p vp = (int *) lock;
181 while (_check_lock (vp, 0, 1))
186 do_unlock (asyncsafe_spinlock_t *lock)
188 atomic_p vp = (int *) lock;
189 if (_check_lock (vp, 1, 0))
193 # elif ((defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)) || (defined __TINYC__ && (defined __i386 || defined __x86_64__))
194 /* For older versions of GCC or clang, use inline assembly.
195 GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
196 asm syntax, but the plain Oracle Studio C 11 compiler understands only
198 /* An implementation that verifies the unlocks. */
201 memory_barrier (void)
203 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
204 # if defined __i386 || defined __x86_64__
205 # if defined __TINYC__ && defined __i386
206 /* Cannot use the SSE instruction "mfence" with this compiler. */
207 asm volatile ("lock orl $0,(%esp)");
209 asm volatile ("mfence");
213 asm volatile ("membar 2");
216 # if defined __i386 || defined __x86_64__
225 /* Store NEWVAL in *VP if the old value *VP is == CMP.
226 Return the old value. */
228 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
231 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
233 # if defined __i386 || defined __x86_64__
234 asm volatile (" lock\n cmpxchgl %3,(%1)"
235 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
238 asm volatile (" cas [%1],%2,%3\n"
240 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
243 # else /* __SUNPRO_C */
244 # if defined __x86_64__
245 asm (" movl %esi,%eax\n"
246 " lock\n cmpxchgl %edx,(%rdi)");
247 # elif defined __i386
248 asm (" movl 16(%ebp),%ecx\n"
249 " movl 12(%ebp),%eax\n"
250 " movl 8(%ebp),%edx\n"
251 " lock\n cmpxchgl %ecx,(%edx)");
254 asm (" cas [%i0],%i1,%i2\n"
261 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
263 volatile unsigned int *vp = lock;
269 do_lock (asyncsafe_spinlock_t *lock)
271 volatile unsigned int *vp = lock;
272 while (atomic_compare_and_swap (vp, 0, 1) != 0)
277 do_unlock (asyncsafe_spinlock_t *lock)
279 volatile unsigned int *vp = lock;
280 if (atomic_compare_and_swap (vp, 1, 0) != 1)
285 /* Fallback code. It has some race conditions. */
288 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
290 volatile unsigned int *vp = lock;
295 do_lock (asyncsafe_spinlock_t *lock)
297 volatile unsigned int *vp = lock;
304 do_unlock (asyncsafe_spinlock_t *lock)
306 volatile unsigned int *vp = lock;
313 /* Provide a dummy implementation for single-threaded applications. */
316 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
321 do_lock (asyncsafe_spinlock_t *lock)
326 do_unlock (asyncsafe_spinlock_t *lock)
333 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
340 asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
341 const sigset_t *mask, sigset_t *saved_mask)
343 sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
348 asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
351 sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */