1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include <sys/resource.h> /* getrlimit() */
31 #include <unistd.h> /* getpagesize() */
40 #if defined(__GLIBC__) && !defined(__UCLIBC__)
41 #include <gnu/libc-version.h> /* gnu_get_libc_version() */
45 #define NANOSEC ((uint64_t) 1e9)
47 #if defined(PTHREAD_BARRIER_SERIAL_THREAD)
48 STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
51 /* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
52 #if defined(_AIX) || \
53 defined(__OpenBSD__) || \
54 !defined(PTHREAD_BARRIER_SERIAL_THREAD)
55 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
56 struct _uv_barrier* b;
59 if (barrier == NULL || count == 0)
62 b = uv__malloc(sizeof(*b));
70 rc = uv_mutex_init(&b->mutex);
74 rc = uv_cond_init(&b->cond);
82 uv_mutex_destroy(&b->mutex);
89 int uv_barrier_wait(uv_barrier_t* barrier) {
90 struct _uv_barrier* b;
93 if (barrier == NULL || barrier->b == NULL)
97 uv_mutex_lock(&b->mutex);
99 if (++b->in == b->threshold) {
101 b->out = b->threshold;
102 uv_cond_signal(&b->cond);
105 uv_cond_wait(&b->cond, &b->mutex);
109 last = (--b->out == 0);
110 uv_cond_signal(&b->cond);
112 uv_mutex_unlock(&b->mutex);
117 void uv_barrier_destroy(uv_barrier_t* barrier) {
118 struct _uv_barrier* b;
121 uv_mutex_lock(&b->mutex);
125 uv_cond_wait(&b->cond, &b->mutex);
130 uv_mutex_unlock(&b->mutex);
131 uv_mutex_destroy(&b->mutex);
132 uv_cond_destroy(&b->cond);
134 uv__free(barrier->b);
140 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
141 return UV__ERR(pthread_barrier_init(barrier, NULL, count));
145 int uv_barrier_wait(uv_barrier_t* barrier) {
148 rc = pthread_barrier_wait(barrier);
150 if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
153 return rc == PTHREAD_BARRIER_SERIAL_THREAD;
157 void uv_barrier_destroy(uv_barrier_t* barrier) {
158 if (pthread_barrier_destroy(barrier))
165 /* On MacOS, threads other than the main thread are created with a reduced
166 * stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
168 * On Linux, threads created by musl have a much smaller stack than threads
169 * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
171 size_t uv__thread_stack_size(void) {
172 #if defined(__APPLE__) || defined(__linux__)
175 /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
176 * the system call wrapper invokes the wrong system call. Don't treat
177 * that as fatal, just use the default stack size instead.
179 if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
180 /* pthread_attr_setstacksize() expects page-aligned values. */
181 lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
183 /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
184 * too small to safely receive signals on.
186 * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
187 * the largest MINSIGSTKSZ of the architectures that musl supports) so
188 * let's use that as a lower bound.
190 * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
191 * is between 28 and 133 KB when compiling against glibc, depending
192 * on the architecture.
194 if (lim.rlim_cur >= 8192)
195 if (lim.rlim_cur >= PTHREAD_STACK_MIN)
200 #if !defined(__linux__)
202 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
203 return 4 << 20; /* glibc default. */
205 return 2 << 20; /* glibc default. */
210 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
211 uv_thread_options_t params;
212 params.flags = UV_THREAD_NO_FLAGS;
213 return uv_thread_create_ex(tid, ¶ms, entry, arg);
216 int uv_thread_create_ex(uv_thread_t* tid,
217 const uv_thread_options_t* params,
218 void (*entry)(void *arg),
221 pthread_attr_t* attr;
222 pthread_attr_t attr_storage;
226 /* Used to squelch a -Wcast-function-type warning. */
233 params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
236 if (stack_size == 0) {
237 stack_size = uv__thread_stack_size();
239 pagesize = (size_t)getpagesize();
240 /* Round up to the nearest page boundary. */
241 stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
242 #ifdef PTHREAD_STACK_MIN
243 if (stack_size < PTHREAD_STACK_MIN)
244 stack_size = PTHREAD_STACK_MIN;
248 if (stack_size > 0) {
249 attr = &attr_storage;
251 if (pthread_attr_init(attr))
254 if (pthread_attr_setstacksize(attr, stack_size))
259 err = pthread_create(tid, attr, f.out, arg);
262 pthread_attr_destroy(attr);
268 uv_thread_t uv_thread_self(void) {
269 return pthread_self();
272 int uv_thread_join(uv_thread_t *tid) {
273 return UV__ERR(pthread_join(*tid, NULL));
277 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
278 return pthread_equal(*t1, *t2);
282 int uv_mutex_init(uv_mutex_t* mutex) {
283 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
284 return UV__ERR(pthread_mutex_init(mutex, NULL));
286 pthread_mutexattr_t attr;
289 if (pthread_mutexattr_init(&attr))
292 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
295 err = pthread_mutex_init(mutex, &attr);
297 if (pthread_mutexattr_destroy(&attr))
305 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
306 pthread_mutexattr_t attr;
309 if (pthread_mutexattr_init(&attr))
312 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
315 err = pthread_mutex_init(mutex, &attr);
317 if (pthread_mutexattr_destroy(&attr))
324 void uv_mutex_destroy(uv_mutex_t* mutex) {
325 if (pthread_mutex_destroy(mutex))
330 void uv_mutex_lock(uv_mutex_t* mutex) {
331 if (pthread_mutex_lock(mutex))
336 int uv_mutex_trylock(uv_mutex_t* mutex) {
339 err = pthread_mutex_trylock(mutex);
341 if (err != EBUSY && err != EAGAIN)
350 void uv_mutex_unlock(uv_mutex_t* mutex) {
351 if (pthread_mutex_unlock(mutex))
356 int uv_rwlock_init(uv_rwlock_t* rwlock) {
357 return UV__ERR(pthread_rwlock_init(rwlock, NULL));
361 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
362 if (pthread_rwlock_destroy(rwlock))
367 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
368 if (pthread_rwlock_rdlock(rwlock))
373 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
376 err = pthread_rwlock_tryrdlock(rwlock);
378 if (err != EBUSY && err != EAGAIN)
387 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
388 if (pthread_rwlock_unlock(rwlock))
393 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
394 if (pthread_rwlock_wrlock(rwlock))
399 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
402 err = pthread_rwlock_trywrlock(rwlock);
404 if (err != EBUSY && err != EAGAIN)
413 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
414 if (pthread_rwlock_unlock(rwlock))
419 void uv_once(uv_once_t* guard, void (*callback)(void)) {
420 if (pthread_once(guard, callback))
424 #if defined(__APPLE__) && defined(__MACH__)
426 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
429 err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
430 if (err == KERN_SUCCESS)
432 if (err == KERN_INVALID_ARGUMENT)
434 if (err == KERN_RESOURCE_SHORTAGE)
438 return UV_EINVAL; /* Satisfy the compiler. */
442 void uv_sem_destroy(uv_sem_t* sem) {
443 if (semaphore_destroy(mach_task_self(), *sem))
448 void uv_sem_post(uv_sem_t* sem) {
449 if (semaphore_signal(*sem))
454 void uv_sem_wait(uv_sem_t* sem) {
458 r = semaphore_wait(*sem);
459 while (r == KERN_ABORTED);
461 if (r != KERN_SUCCESS)
466 int uv_sem_trywait(uv_sem_t* sem) {
467 mach_timespec_t interval;
471 interval.tv_nsec = 0;
473 err = semaphore_timedwait(*sem, interval);
474 if (err == KERN_SUCCESS)
476 if (err == KERN_OPERATION_TIMED_OUT)
480 return UV_EINVAL; /* Satisfy the compiler. */
483 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
485 #if defined(__GLIBC__) && !defined(__UCLIBC__)
487 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
488 * by providing a custom implementation for glibc < 2.21 in terms of other
489 * concurrency primitives.
490 * Refs: https://github.com/nodejs/node/issues/19903 */
492 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
493 * a pointer to the actual struct we're using underneath. */
495 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
496 static int platform_needs_custom_semaphore = 0;
498 static void glibc_version_check(void) {
499 const char* version = gnu_get_libc_version();
500 platform_needs_custom_semaphore =
501 version[0] == '2' && version[1] == '.' &&
502 atoi(version + 2) < 21;
505 #elif defined(__MVS__)
507 #define platform_needs_custom_semaphore 1
509 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
511 #define platform_needs_custom_semaphore 0
515 typedef struct uv_semaphore_s {
521 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
522 platform_needs_custom_semaphore
523 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
526 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
530 sem = uv__malloc(sizeof(*sem));
534 if ((err = uv_mutex_init(&sem->mutex)) != 0) {
539 if ((err = uv_cond_init(&sem->cond)) != 0) {
540 uv_mutex_destroy(&sem->mutex);
546 *(uv_semaphore_t**)sem_ = sem;
551 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
554 sem = *(uv_semaphore_t**)sem_;
555 uv_cond_destroy(&sem->cond);
556 uv_mutex_destroy(&sem->mutex);
561 static void uv__custom_sem_post(uv_sem_t* sem_) {
564 sem = *(uv_semaphore_t**)sem_;
565 uv_mutex_lock(&sem->mutex);
568 uv_cond_signal(&sem->cond);
569 uv_mutex_unlock(&sem->mutex);
573 static void uv__custom_sem_wait(uv_sem_t* sem_) {
576 sem = *(uv_semaphore_t**)sem_;
577 uv_mutex_lock(&sem->mutex);
578 while (sem->value == 0)
579 uv_cond_wait(&sem->cond, &sem->mutex);
581 uv_mutex_unlock(&sem->mutex);
585 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
588 sem = *(uv_semaphore_t**)sem_;
589 if (uv_mutex_trylock(&sem->mutex) != 0)
592 if (sem->value == 0) {
593 uv_mutex_unlock(&sem->mutex);
598 uv_mutex_unlock(&sem->mutex);
603 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
604 if (sem_init(sem, 0, value))
605 return UV__ERR(errno);
610 static void uv__sem_destroy(uv_sem_t* sem) {
611 if (sem_destroy(sem))
616 static void uv__sem_post(uv_sem_t* sem) {
622 static void uv__sem_wait(uv_sem_t* sem) {
627 while (r == -1 && errno == EINTR);
634 static int uv__sem_trywait(uv_sem_t* sem) {
638 r = sem_trywait(sem);
639 while (r == -1 && errno == EINTR);
650 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
651 #if defined(__GLIBC__) && !defined(__UCLIBC__)
652 uv_once(&glibc_version_check_once, glibc_version_check);
655 if (platform_needs_custom_semaphore)
656 return uv__custom_sem_init(sem, value);
658 return uv__sem_init(sem, value);
662 void uv_sem_destroy(uv_sem_t* sem) {
663 if (platform_needs_custom_semaphore)
664 uv__custom_sem_destroy(sem);
666 uv__sem_destroy(sem);
670 void uv_sem_post(uv_sem_t* sem) {
671 if (platform_needs_custom_semaphore)
672 uv__custom_sem_post(sem);
678 void uv_sem_wait(uv_sem_t* sem) {
679 if (platform_needs_custom_semaphore)
680 uv__custom_sem_wait(sem);
686 int uv_sem_trywait(uv_sem_t* sem) {
687 if (platform_needs_custom_semaphore)
688 return uv__custom_sem_trywait(sem);
690 return uv__sem_trywait(sem);
693 #endif /* defined(__APPLE__) && defined(__MACH__) */
696 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
698 int uv_cond_init(uv_cond_t* cond) {
699 return UV__ERR(pthread_cond_init(cond, NULL));
702 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
704 int uv_cond_init(uv_cond_t* cond) {
705 pthread_condattr_t attr;
708 err = pthread_condattr_init(&attr);
713 err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
718 err = pthread_cond_init(cond, &attr);
722 err = pthread_condattr_destroy(&attr);
729 pthread_cond_destroy(cond);
731 pthread_condattr_destroy(&attr);
735 #endif /* defined(__APPLE__) && defined(__MACH__) */
737 void uv_cond_destroy(uv_cond_t* cond) {
738 #if defined(__APPLE__) && defined(__MACH__)
739 /* It has been reported that destroying condition variables that have been
740 * signalled but not waited on can sometimes result in application crashes.
741 * See https://codereview.chromium.org/1323293005.
743 pthread_mutex_t mutex;
747 if (pthread_mutex_init(&mutex, NULL))
750 if (pthread_mutex_lock(&mutex))
756 err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
757 if (err != 0 && err != ETIMEDOUT)
760 if (pthread_mutex_unlock(&mutex))
763 if (pthread_mutex_destroy(&mutex))
765 #endif /* defined(__APPLE__) && defined(__MACH__) */
767 if (pthread_cond_destroy(cond))
771 void uv_cond_signal(uv_cond_t* cond) {
772 if (pthread_cond_signal(cond))
776 void uv_cond_broadcast(uv_cond_t* cond) {
777 if (pthread_cond_broadcast(cond))
781 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
782 if (pthread_cond_wait(cond, mutex))
787 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
794 #if defined(__APPLE__) && defined(__MACH__)
795 ts.tv_sec = timeout / NANOSEC;
796 ts.tv_nsec = timeout % NANOSEC;
797 r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
800 if (gettimeofday(&tv, NULL))
802 timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
804 timeout += uv__hrtime(UV_CLOCK_PRECISE);
806 ts.tv_sec = timeout / NANOSEC;
807 ts.tv_nsec = timeout % NANOSEC;
808 r = pthread_cond_timedwait(cond, mutex, &ts);
820 return UV_EINVAL; /* Satisfy the compiler. */
825 int uv_key_create(uv_key_t* key) {
826 return UV__ERR(pthread_key_create(key, NULL));
830 void uv_key_delete(uv_key_t* key) {
831 if (pthread_key_delete(*key))
836 void* uv_key_get(uv_key_t* key) {
837 return pthread_getspecific(*key);
841 void uv_key_set(uv_key_t* key, void* value) {
842 if (pthread_setspecific(*key, value))