1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <stddef.h> /* NULL */
25 #include <stdio.h> /* printf */
27 #include <string.h> /* strerror */
31 #include <sys/types.h>
34 #include <sys/socket.h>
36 #include <netinet/in.h>
37 #include <arpa/inet.h>
38 #include <limits.h> /* INT_MAX, PATH_MAX */
39 #include <sys/uio.h> /* writev */
42 # include <sys/ioctl.h>
46 # include <sys/types.h>
47 # include <sys/wait.h>
51 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
52 # include <sys/filio.h>
53 # include <sys/ioctl.h>
57 # include <sys/sysctl.h>
58 # include <sys/filio.h>
59 # include <sys/ioctl.h>
60 # include <sys/wait.h>
63 static void uv__run_pending(uv_loop_t* loop);
65 static uv_loop_t default_loop_struct;
66 static uv_loop_t* default_loop_ptr;
68 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
69 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
70 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
71 sizeof(((struct iovec*) 0)->iov_base));
72 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
73 sizeof(((struct iovec*) 0)->iov_len));
74 STATIC_ASSERT((uintptr_t) &((uv_buf_t*) 0)->base ==
75 (uintptr_t) &((struct iovec*) 0)->iov_base);
76 STATIC_ASSERT((uintptr_t) &((uv_buf_t*) 0)->len ==
77 (uintptr_t) &((struct iovec*) 0)->iov_len);
80 uint64_t uv_hrtime(void) {
85 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
86 assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
88 handle->flags |= UV_CLOSING;
89 handle->close_cb = close_cb;
91 switch (handle->type) {
93 uv__pipe_close((uv_pipe_t*)handle);
97 uv__stream_close((uv_stream_t*)handle);
101 uv__tcp_close((uv_tcp_t*)handle);
105 uv__udp_close((uv_udp_t*)handle);
109 uv__prepare_close((uv_prepare_t*)handle);
113 uv__check_close((uv_check_t*)handle);
117 uv__idle_close((uv_idle_t*)handle);
121 uv__async_close((uv_async_t*)handle);
125 uv__timer_close((uv_timer_t*)handle);
129 uv__process_close((uv_process_t*)handle);
133 uv__fs_event_close((uv_fs_event_t*)handle);
137 uv__poll_close((uv_poll_t*)handle);
141 uv__fs_poll_close((uv_fs_poll_t*)handle);
145 uv__signal_close((uv_signal_t*) handle);
146 /* Signal handles may not be closed immediately. The signal code will */
147 /* itself close uv__make_close_pending whenever appropriate. */
154 uv__make_close_pending(handle);
158 void uv__make_close_pending(uv_handle_t* handle) {
159 assert(handle->flags & UV_CLOSING);
160 assert(!(handle->flags & UV_CLOSED));
161 handle->next_closing = handle->loop->closing_handles;
162 handle->loop->closing_handles = handle;
166 static void uv__finish_close(uv_handle_t* handle) {
167 assert(!uv__is_active(handle));
168 assert(handle->flags & UV_CLOSING);
169 assert(!(handle->flags & UV_CLOSED));
170 handle->flags |= UV_CLOSED;
172 switch (handle->type) {
188 uv__stream_destroy((uv_stream_t*)handle);
192 uv__udp_finish_close((uv_udp_t*)handle);
200 uv__handle_unref(handle);
201 QUEUE_REMOVE(&handle->handle_queue);
203 if (handle->close_cb) {
204 handle->close_cb(handle);
209 static void uv__run_closing_handles(uv_loop_t* loop) {
213 p = loop->closing_handles;
214 loop->closing_handles = NULL;
224 int uv_is_closing(const uv_handle_t* handle) {
225 return handle->flags & (UV_CLOSING | UV_CLOSED);
229 uv_loop_t* uv_default_loop(void) {
230 if (default_loop_ptr)
231 return default_loop_ptr;
233 if (uv__loop_init(&default_loop_struct, /* default_loop? */ 1))
236 return (default_loop_ptr = &default_loop_struct);
240 uv_loop_t* uv_loop_new(void) {
243 if ((loop = malloc(sizeof(*loop))) == NULL)
246 if (uv__loop_init(loop, /* default_loop? */ 0)) {
255 void uv_loop_delete(uv_loop_t* loop) {
256 uv__loop_delete(loop);
258 memset(loop, -1, sizeof *loop);
260 if (loop == default_loop_ptr)
261 default_loop_ptr = NULL;
267 int uv_backend_fd(const uv_loop_t* loop) {
268 return loop->backend_fd;
272 int uv_backend_timeout(const uv_loop_t* loop) {
273 if (loop->stop_flag != 0)
276 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
279 if (!QUEUE_EMPTY(&loop->idle_handles))
282 if (loop->closing_handles)
285 return uv__next_timeout(loop);
289 static int uv__loop_alive(uv_loop_t* loop) {
290 return uv__has_active_handles(loop) ||
291 uv__has_active_reqs(loop) ||
292 loop->closing_handles != NULL;
296 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
300 r = uv__loop_alive(loop);
301 while (r != 0 && loop->stop_flag == 0) {
302 uv__update_time(loop);
303 uv__run_timers(loop);
305 uv__run_prepare(loop);
306 uv__run_pending(loop);
309 if ((mode & UV_RUN_NOWAIT) == 0)
310 timeout = uv_backend_timeout(loop);
312 uv__io_poll(loop, timeout);
314 uv__run_closing_handles(loop);
315 r = uv__loop_alive(loop);
317 if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
321 /* The if statement lets gcc compile it to a conditional store. Avoids
322 * dirtying a cache line.
324 if (loop->stop_flag != 0)
331 void uv_update_time(uv_loop_t* loop) {
332 uv__update_time(loop);
336 uint64_t uv_now(uv_loop_t* loop) {
341 int uv_is_active(const uv_handle_t* handle) {
342 return uv__is_active(handle);
346 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
347 int uv__socket(int domain, int type, int protocol) {
350 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
351 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
360 sockfd = socket(domain, type, protocol);
365 if (uv__nonblock(sockfd, 1) || uv__cloexec(sockfd, 1)) {
370 #if defined(SO_NOSIGPIPE)
373 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
382 int uv__accept(int sockfd) {
388 #if defined(__linux__)
389 static int no_accept4;
394 peerfd = uv__accept4(sockfd,
397 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
412 peerfd = accept(sockfd, NULL, NULL);
421 if (uv__cloexec(peerfd, 1) || uv__nonblock(peerfd, 1)) {
433 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
435 int uv__nonblock(int fd, int set) {
439 r = ioctl(fd, FIONBIO, &set);
440 while (r == -1 && errno == EINTR);
446 int uv__cloexec(int fd, int set) {
450 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
451 while (r == -1 && errno == EINTR);
456 #else /* !(defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) */
458 int uv__nonblock(int fd, int set) {
463 r = fcntl(fd, F_GETFL);
464 while (r == -1 && errno == EINTR);
469 /* Bail out now if already set/clear. */
470 if (!!(r & O_NONBLOCK) == !!set)
474 flags = r | O_NONBLOCK;
476 flags = r & ~O_NONBLOCK;
479 r = fcntl(fd, F_SETFL, flags);
480 while (r == -1 && errno == EINTR);
486 int uv__cloexec(int fd, int set) {
491 r = fcntl(fd, F_GETFD);
492 while (r == -1 && errno == EINTR);
497 /* Bail out now if already set/clear. */
498 if (!!(r & FD_CLOEXEC) == !!set)
502 flags = r | FD_CLOEXEC;
504 flags = r & ~FD_CLOEXEC;
507 r = fcntl(fd, F_SETFD, flags);
508 while (r == -1 && errno == EINTR);
513 #endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) */
516 /* This function is not execve-safe, there is a race window
517 * between the call to dup() and fcntl(FD_CLOEXEC).
519 int uv__dup(int fd) {
525 if (uv__cloexec(fd, 1)) {
526 SAVE_ERRNO(close(fd));
534 uv_err_t uv_cwd(char* buffer, size_t size) {
535 if (!buffer || !size) {
536 return uv__new_artificial_error(UV_EINVAL);
539 if (getcwd(buffer, size)) {
542 return uv__new_sys_error(errno);
547 uv_err_t uv_chdir(const char* dir) {
548 if (chdir(dir) == 0) {
551 return uv__new_sys_error(errno);
556 void uv_disable_stdio_inheritance(void) {
559 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
560 * first 16 file descriptors. After that, bail out after the first error.
563 if (uv__cloexec(fd, 1) && fd > 15)
568 static void uv__run_pending(uv_loop_t* loop) {
572 while (!QUEUE_EMPTY(&loop->pending_queue)) {
573 q = QUEUE_HEAD(&loop->pending_queue);
577 w = QUEUE_DATA(q, uv__io_t, pending_queue);
578 w->cb(loop, w, UV__POLLOUT);
583 static unsigned int next_power_of_two(unsigned int val) {
594 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
596 unsigned int nwatchers;
599 if (len <= loop->nwatchers)
602 nwatchers = next_power_of_two(len);
603 watchers = realloc(loop->watchers, nwatchers * sizeof(loop->watchers[0]));
605 if (watchers == NULL)
608 for (i = loop->nwatchers; i < nwatchers; i++)
611 loop->watchers = watchers;
612 loop->nwatchers = nwatchers;
616 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
619 QUEUE_INIT(&w->pending_queue);
620 QUEUE_INIT(&w->watcher_queue);
626 #if defined(UV_HAVE_KQUEUE)
629 #endif /* defined(UV_HAVE_KQUEUE) */
633 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
634 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
637 assert(w->fd < INT_MAX);
639 w->pevents |= events;
640 maybe_resize(loop, w->fd + 1);
643 /* The event ports backend needs to rearm all file descriptors on each and
644 * every tick of the event loop but the other backends allow us to
645 * short-circuit here if the event mask is unchanged.
647 if (w->events == w->pevents) {
648 if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) {
649 QUEUE_REMOVE(&w->watcher_queue);
650 QUEUE_INIT(&w->watcher_queue);
656 if (QUEUE_EMPTY(&w->watcher_queue))
657 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
659 if (loop->watchers[w->fd] == NULL) {
660 loop->watchers[w->fd] = w;
666 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
667 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
675 /* Happens when uv__io_stop() is called on a handle that was never started. */
676 if ((unsigned) w->fd >= loop->nwatchers)
679 w->pevents &= ~events;
681 if (w->pevents == 0) {
682 QUEUE_REMOVE(&w->watcher_queue);
683 QUEUE_INIT(&w->watcher_queue);
685 if (loop->watchers[w->fd] != NULL) {
686 assert(loop->watchers[w->fd] == w);
687 assert(loop->nfds > 0);
688 loop->watchers[w->fd] = NULL;
693 else if (QUEUE_EMPTY(&w->watcher_queue))
694 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
698 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
699 uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
700 QUEUE_REMOVE(&w->pending_queue);
704 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
705 if (QUEUE_EMPTY(&w->pending_queue))
706 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
710 int uv__io_active(const uv__io_t* w, unsigned int events) {
711 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
713 return 0 != (w->pevents & events);