1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #if defined(__MINGW64_VERSION_MAJOR)
27 /* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
28 * require this header in some versions of mingw64. */
35 static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
37 HANDLE existing_event, created_event;
39 created_event = CreateEvent(NULL, 1, 0, NULL);
40 if (created_event == 0) {
41 /* Could fail in a low-memory situation? */
42 uv_fatal_error(GetLastError(), "CreateEvent");
45 existing_event = InterlockedCompareExchangePointer(&guard->event,
49 if (existing_event == NULL) {
53 result = SetEvent(created_event);
58 /* We lost the race. Destroy the event we created and wait for the existing
59 * one to become signaled. */
60 CloseHandle(created_event);
61 result = WaitForSingleObject(existing_event, INFINITE);
62 assert(result == WAIT_OBJECT_0);
67 void uv_once(uv_once_t* guard, void (*callback)(void)) {
68 /* Fast case - avoid WaitForSingleObject. */
73 uv__once_inner(guard, callback);
77 /* Verify that uv_thread_t can be stored in a TLS slot. */
78 STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
80 static uv_key_t uv__current_thread_key;
81 static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
84 static void uv__init_current_thread_key(void) {
85 if (uv_key_create(&uv__current_thread_key))
91 void (*entry)(void* arg);
97 static UINT __stdcall uv__thread_start(void* arg) {
98 struct thread_ctx *ctx_p;
99 struct thread_ctx ctx;
105 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
106 uv_key_set(&uv__current_thread_key, ctx.self);
114 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
115 uv_thread_options_t params;
116 params.flags = UV_THREAD_NO_FLAGS;
117 return uv_thread_create_ex(tid, ¶ms, entry, arg);
120 int uv_thread_create_ex(uv_thread_t* tid,
121 const uv_thread_options_t* params,
122 void (*entry)(void *arg),
124 struct thread_ctx* ctx;
132 params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
134 if (stack_size != 0) {
135 GetNativeSystemInfo(&sysinfo);
136 pagesize = (size_t)sysinfo.dwPageSize;
137 /* Round up to the nearest page boundary. */
138 stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
140 if ((unsigned)stack_size != stack_size)
144 ctx = uv__malloc(sizeof(*ctx));
151 /* Create the thread in suspended state so we have a chance to pass
152 * its own creation handle to it */
153 thread = (HANDLE) _beginthreadex(NULL,
154 (unsigned)stack_size,
159 if (thread == NULL) {
166 ResumeThread(thread);
184 uv_thread_t uv_thread_self(void) {
185 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
186 uv_thread_t key = uv_key_get(&uv__current_thread_key);
188 /* If the thread wasn't started by uv_thread_create (such as the main
189 * thread), we assign an id to it now. */
190 if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
191 GetCurrentProcess(), &key, 0,
192 FALSE, DUPLICATE_SAME_ACCESS)) {
193 uv_fatal_error(GetLastError(), "DuplicateHandle");
195 uv_key_set(&uv__current_thread_key, key);
201 int uv_thread_join(uv_thread_t *tid) {
202 if (WaitForSingleObject(*tid, INFINITE))
203 return uv_translate_sys_error(GetLastError());
207 MemoryBarrier(); /* For feature parity with pthread_join(). */
213 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
218 int uv_mutex_init(uv_mutex_t* mutex) {
219 InitializeCriticalSection(mutex);
224 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
225 return uv_mutex_init(mutex);
229 void uv_mutex_destroy(uv_mutex_t* mutex) {
230 DeleteCriticalSection(mutex);
234 void uv_mutex_lock(uv_mutex_t* mutex) {
235 EnterCriticalSection(mutex);
239 int uv_mutex_trylock(uv_mutex_t* mutex) {
240 if (TryEnterCriticalSection(mutex))
247 void uv_mutex_unlock(uv_mutex_t* mutex) {
248 LeaveCriticalSection(mutex);
251 /* Ensure that the ABI for this type remains stable in v1.x */
253 STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
255 STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
258 int uv_rwlock_init(uv_rwlock_t* rwlock) {
259 memset(rwlock, 0, sizeof(*rwlock));
260 InitializeSRWLock(&rwlock->read_write_lock_);
266 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
267 /* SRWLock does not need explicit destruction so long as there are no waiting threads
268 See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
272 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
273 AcquireSRWLockShared(&rwlock->read_write_lock_);
277 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
278 if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
285 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
286 ReleaseSRWLockShared(&rwlock->read_write_lock_);
290 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
291 AcquireSRWLockExclusive(&rwlock->read_write_lock_);
295 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
296 if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
303 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
304 ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
308 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
309 *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
311 return uv_translate_sys_error(GetLastError());
317 void uv_sem_destroy(uv_sem_t* sem) {
318 if (!CloseHandle(*sem))
323 void uv_sem_post(uv_sem_t* sem) {
324 if (!ReleaseSemaphore(*sem, 1, NULL))
329 void uv_sem_wait(uv_sem_t* sem) {
330 if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
335 int uv_sem_trywait(uv_sem_t* sem) {
336 DWORD r = WaitForSingleObject(*sem, 0);
338 if (r == WAIT_OBJECT_0)
341 if (r == WAIT_TIMEOUT)
345 return -1; /* Satisfy the compiler. */
349 int uv_cond_init(uv_cond_t* cond) {
350 InitializeConditionVariable(&cond->cond_var);
355 void uv_cond_destroy(uv_cond_t* cond) {
361 void uv_cond_signal(uv_cond_t* cond) {
362 WakeConditionVariable(&cond->cond_var);
366 void uv_cond_broadcast(uv_cond_t* cond) {
367 WakeAllConditionVariable(&cond->cond_var);
371 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
372 if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
376 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
377 if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
379 if (GetLastError() != ERROR_TIMEOUT)
385 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
391 err = uv_mutex_init(&barrier->mutex);
395 err = uv_sem_init(&barrier->turnstile1, 0);
399 err = uv_sem_init(&barrier->turnstile2, 1);
406 uv_sem_destroy(&barrier->turnstile1);
408 uv_mutex_destroy(&barrier->mutex);
414 void uv_barrier_destroy(uv_barrier_t* barrier) {
415 uv_sem_destroy(&barrier->turnstile2);
416 uv_sem_destroy(&barrier->turnstile1);
417 uv_mutex_destroy(&barrier->mutex);
421 int uv_barrier_wait(uv_barrier_t* barrier) {
424 uv_mutex_lock(&barrier->mutex);
425 if (++barrier->count == barrier->n) {
426 uv_sem_wait(&barrier->turnstile2);
427 uv_sem_post(&barrier->turnstile1);
429 uv_mutex_unlock(&barrier->mutex);
431 uv_sem_wait(&barrier->turnstile1);
432 uv_sem_post(&barrier->turnstile1);
434 uv_mutex_lock(&barrier->mutex);
435 serial_thread = (--barrier->count == 0);
437 uv_sem_wait(&barrier->turnstile1);
438 uv_sem_post(&barrier->turnstile2);
440 uv_mutex_unlock(&barrier->mutex);
442 uv_sem_wait(&barrier->turnstile2);
443 uv_sem_post(&barrier->turnstile2);
444 return serial_thread;
448 int uv_key_create(uv_key_t* key) {
449 key->tls_index = TlsAlloc();
450 if (key->tls_index == TLS_OUT_OF_INDEXES)
456 void uv_key_delete(uv_key_t* key) {
457 if (TlsFree(key->tls_index) == FALSE)
459 key->tls_index = TLS_OUT_OF_INDEXES;
463 void* uv_key_get(uv_key_t* key) {
466 value = TlsGetValue(key->tls_index);
468 if (GetLastError() != ERROR_SUCCESS)
475 void uv_key_set(uv_key_t* key, void* value) {
476 if (TlsSetValue(key->tls_index, value) == FALSE)