1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <stddef.h> /* NULL */
25 #include <stdio.h> /* printf */
27 #include <string.h> /* strerror */
31 #include <sys/types.h>
34 #include <sys/socket.h>
36 #include <netinet/in.h>
37 #include <arpa/inet.h>
38 #include <limits.h> /* INT_MAX, PATH_MAX */
39 #include <sys/uio.h> /* writev */
42 # include <sys/ioctl.h>
46 # include <sys/types.h>
47 # include <sys/wait.h>
51 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
52 # include <sys/filio.h>
53 # include <sys/ioctl.h>
57 # include <sys/sysctl.h>
58 # include <sys/filio.h>
59 # include <sys/ioctl.h>
60 # include <sys/wait.h>
63 static void uv__run_pending(uv_loop_t* loop);
65 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
66 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
67 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
68 sizeof(((struct iovec*) 0)->iov_base));
69 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
70 sizeof(((struct iovec*) 0)->iov_len));
71 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
72 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
75 uint64_t uv_hrtime(void) {
76 return uv__hrtime(UV_CLOCK_PRECISE);
80 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
81 assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
83 handle->flags |= UV_CLOSING;
84 handle->close_cb = close_cb;
86 switch (handle->type) {
88 uv__pipe_close((uv_pipe_t*)handle);
92 uv__stream_close((uv_stream_t*)handle);
96 uv__tcp_close((uv_tcp_t*)handle);
100 uv__udp_close((uv_udp_t*)handle);
104 uv__prepare_close((uv_prepare_t*)handle);
108 uv__check_close((uv_check_t*)handle);
112 uv__idle_close((uv_idle_t*)handle);
116 uv__async_close((uv_async_t*)handle);
120 uv__timer_close((uv_timer_t*)handle);
124 uv__process_close((uv_process_t*)handle);
128 uv__fs_event_close((uv_fs_event_t*)handle);
132 uv__poll_close((uv_poll_t*)handle);
136 uv__fs_poll_close((uv_fs_poll_t*)handle);
140 uv__signal_close((uv_signal_t*) handle);
141 /* Signal handles may not be closed immediately. The signal code will */
142 /* itself close uv__make_close_pending whenever appropriate. */
149 uv__make_close_pending(handle);
153 void uv__make_close_pending(uv_handle_t* handle) {
154 assert(handle->flags & UV_CLOSING);
155 assert(!(handle->flags & UV_CLOSED));
156 handle->next_closing = handle->loop->closing_handles;
157 handle->loop->closing_handles = handle;
161 static void uv__finish_close(uv_handle_t* handle) {
162 /* Note: while the handle is in the UV_CLOSING state now, it's still possible
163 * for it to be active in the sense that uv__is_active() returns true.
164 * A good example is when the user calls uv_shutdown(), immediately followed
165 * by uv_close(). The handle is considered active at this point because the
166 * completion of the shutdown req is still pending.
168 assert(handle->flags & UV_CLOSING);
169 assert(!(handle->flags & UV_CLOSED));
170 handle->flags |= UV_CLOSED;
172 switch (handle->type) {
188 uv__stream_destroy((uv_stream_t*)handle);
192 uv__udp_finish_close((uv_udp_t*)handle);
200 uv__handle_unref(handle);
201 QUEUE_REMOVE(&handle->handle_queue);
203 if (handle->close_cb) {
204 handle->close_cb(handle);
209 static void uv__run_closing_handles(uv_loop_t* loop) {
213 p = loop->closing_handles;
214 loop->closing_handles = NULL;
224 int uv_is_closing(const uv_handle_t* handle) {
225 return uv__is_closing(handle);
229 int uv_backend_fd(const uv_loop_t* loop) {
230 return loop->backend_fd;
234 int uv_backend_timeout(const uv_loop_t* loop) {
235 if (loop->stop_flag != 0)
238 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
241 if (!QUEUE_EMPTY(&loop->idle_handles))
244 if (loop->closing_handles)
247 return uv__next_timeout(loop);
251 static int uv__loop_alive(const uv_loop_t* loop) {
252 return uv__has_active_handles(loop) ||
253 uv__has_active_reqs(loop) ||
254 loop->closing_handles != NULL;
258 int uv_loop_alive(const uv_loop_t* loop) {
259 return uv__loop_alive(loop);
263 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
267 r = uv__loop_alive(loop);
269 uv__update_time(loop);
271 while (r != 0 && loop->stop_flag == 0) {
272 UV_TICK_START(loop, mode);
274 uv__update_time(loop);
275 uv__run_timers(loop);
277 uv__run_prepare(loop);
278 uv__run_pending(loop);
281 if ((mode & UV_RUN_NOWAIT) == 0)
282 timeout = uv_backend_timeout(loop);
284 uv__io_poll(loop, timeout);
286 uv__run_closing_handles(loop);
288 if (mode == UV_RUN_ONCE) {
289 /* UV_RUN_ONCE implies forward progess: at least one callback must have
290 * been invoked when it returns. uv__io_poll() can return without doing
291 * I/O (meaning: no callbacks) when its timeout expires - which means we
292 * have pending timers that satisfy the forward progress constraint.
294 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
297 uv__update_time(loop);
298 uv__run_timers(loop);
301 r = uv__loop_alive(loop);
302 UV_TICK_STOP(loop, mode);
304 if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
308 /* The if statement lets gcc compile it to a conditional store. Avoids
309 * dirtying a cache line.
311 if (loop->stop_flag != 0)
318 void uv_update_time(uv_loop_t* loop) {
319 uv__update_time(loop);
323 int uv_is_active(const uv_handle_t* handle) {
324 return uv__is_active(handle);
328 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
329 int uv__socket(int domain, int type, int protocol) {
333 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
334 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
342 sockfd = socket(domain, type, protocol);
346 err = uv__nonblock(sockfd, 1);
348 err = uv__cloexec(sockfd, 1);
355 #if defined(SO_NOSIGPIPE)
358 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
366 int uv__accept(int sockfd) {
373 #if defined(__linux__)
374 static int no_accept4;
379 peerfd = uv__accept4(sockfd,
382 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
396 peerfd = accept(sockfd, NULL, NULL);
403 err = uv__cloexec(peerfd, 1);
405 err = uv__nonblock(peerfd, 1);
417 int uv__close(int fd) {
421 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
422 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
429 rc = -EINPROGRESS; /* For platform/libc consistency. */
437 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
439 int uv__nonblock(int fd, int set) {
443 r = ioctl(fd, FIONBIO, &set);
444 while (r == -1 && errno == EINTR);
453 int uv__cloexec(int fd, int set) {
457 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
458 while (r == -1 && errno == EINTR);
466 #else /* !(defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) */
468 int uv__nonblock(int fd, int set) {
473 r = fcntl(fd, F_GETFL);
474 while (r == -1 && errno == EINTR);
479 /* Bail out now if already set/clear. */
480 if (!!(r & O_NONBLOCK) == !!set)
484 flags = r | O_NONBLOCK;
486 flags = r & ~O_NONBLOCK;
489 r = fcntl(fd, F_SETFL, flags);
490 while (r == -1 && errno == EINTR);
499 int uv__cloexec(int fd, int set) {
504 r = fcntl(fd, F_GETFD);
505 while (r == -1 && errno == EINTR);
510 /* Bail out now if already set/clear. */
511 if (!!(r & FD_CLOEXEC) == !!set)
515 flags = r | FD_CLOEXEC;
517 flags = r & ~FD_CLOEXEC;
520 r = fcntl(fd, F_SETFD, flags);
521 while (r == -1 && errno == EINTR);
529 #endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) */
532 /* This function is not execve-safe, there is a race window
533 * between the call to dup() and fcntl(FD_CLOEXEC).
535 int uv__dup(int fd) {
543 err = uv__cloexec(fd, 1);
553 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
554 struct cmsghdr* cmsg;
558 #if defined(__linux__)
559 static int no_msg_cmsg_cloexec;
560 if (no_msg_cmsg_cloexec == 0) {
561 rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
566 rc = recvmsg(fd, msg, flags);
569 no_msg_cmsg_cloexec = 1;
571 rc = recvmsg(fd, msg, flags);
574 rc = recvmsg(fd, msg, flags);
578 if (msg->msg_controllen == 0)
580 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
581 if (cmsg->cmsg_type == SCM_RIGHTS)
582 for (pfd = (int*) CMSG_DATA(cmsg),
583 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
586 uv__cloexec(*pfd, 1);
591 int uv_cwd(char* buffer, size_t size) {
598 if (getcwd(buffer, size) == NULL)
605 int uv_chdir(const char* dir) {
613 void uv_disable_stdio_inheritance(void) {
616 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
617 * first 16 file descriptors. After that, bail out after the first error.
620 if (uv__cloexec(fd, 1) && fd > 15)
625 static void uv__run_pending(uv_loop_t* loop) {
629 while (!QUEUE_EMPTY(&loop->pending_queue)) {
630 q = QUEUE_HEAD(&loop->pending_queue);
634 w = QUEUE_DATA(q, uv__io_t, pending_queue);
635 w->cb(loop, w, UV__POLLOUT);
640 static unsigned int next_power_of_two(unsigned int val) {
651 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
653 void* fake_watcher_list;
654 void* fake_watcher_count;
655 unsigned int nwatchers;
658 if (len <= loop->nwatchers)
661 /* Preserve fake watcher list and count at the end of the watchers */
662 if (loop->watchers != NULL) {
663 fake_watcher_list = loop->watchers[loop->nwatchers];
664 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
666 fake_watcher_list = NULL;
667 fake_watcher_count = NULL;
670 nwatchers = next_power_of_two(len + 2) - 2;
671 watchers = realloc(loop->watchers,
672 (nwatchers + 2) * sizeof(loop->watchers[0]));
674 if (watchers == NULL)
676 for (i = loop->nwatchers; i < nwatchers; i++)
678 watchers[nwatchers] = fake_watcher_list;
679 watchers[nwatchers + 1] = fake_watcher_count;
681 loop->watchers = watchers;
682 loop->nwatchers = nwatchers;
686 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
689 QUEUE_INIT(&w->pending_queue);
690 QUEUE_INIT(&w->watcher_queue);
696 #if defined(UV_HAVE_KQUEUE)
699 #endif /* defined(UV_HAVE_KQUEUE) */
703 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
704 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
707 assert(w->fd < INT_MAX);
709 w->pevents |= events;
710 maybe_resize(loop, w->fd + 1);
713 /* The event ports backend needs to rearm all file descriptors on each and
714 * every tick of the event loop but the other backends allow us to
715 * short-circuit here if the event mask is unchanged.
717 if (w->events == w->pevents) {
718 if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) {
719 QUEUE_REMOVE(&w->watcher_queue);
720 QUEUE_INIT(&w->watcher_queue);
726 if (QUEUE_EMPTY(&w->watcher_queue))
727 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
729 if (loop->watchers[w->fd] == NULL) {
730 loop->watchers[w->fd] = w;
736 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
737 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
745 /* Happens when uv__io_stop() is called on a handle that was never started. */
746 if ((unsigned) w->fd >= loop->nwatchers)
749 w->pevents &= ~events;
751 if (w->pevents == 0) {
752 QUEUE_REMOVE(&w->watcher_queue);
753 QUEUE_INIT(&w->watcher_queue);
755 if (loop->watchers[w->fd] != NULL) {
756 assert(loop->watchers[w->fd] == w);
757 assert(loop->nfds > 0);
758 loop->watchers[w->fd] = NULL;
763 else if (QUEUE_EMPTY(&w->watcher_queue))
764 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
768 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
769 uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
770 QUEUE_REMOVE(&w->pending_queue);
772 /* Remove stale events for this file descriptor */
773 uv__platform_invalidate_fd(loop, w->fd);
777 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
778 if (QUEUE_EMPTY(&w->pending_queue))
779 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
783 int uv__io_active(const uv__io_t* w, unsigned int events) {
784 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
786 return 0 != (w->pevents & events);