1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <stddef.h> /* NULL */
25 #include <stdio.h> /* printf */
27 #include <string.h> /* strerror */
31 #include <sys/types.h>
34 #include <sys/socket.h>
36 #include <netinet/in.h>
37 #include <arpa/inet.h>
38 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
39 #include <sys/uio.h> /* writev */
40 #include <sys/resource.h> /* getrusage */
44 # include <sys/ioctl.h>
48 # include <sys/types.h>
49 # include <sys/wait.h>
53 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
54 # include <sys/filio.h>
55 # include <sys/ioctl.h>
58 #if defined(__FreeBSD__) || defined(__DragonFly__)
59 # include <sys/sysctl.h>
60 # include <sys/filio.h>
61 # include <sys/ioctl.h>
62 # include <sys/wait.h>
63 # define UV__O_CLOEXEC O_CLOEXEC
64 # if defined(__FreeBSD__) && __FreeBSD__ >= 10
65 # define uv__accept4 accept4
66 # define UV__SOCK_NONBLOCK SOCK_NONBLOCK
67 # define UV__SOCK_CLOEXEC SOCK_CLOEXEC
69 # if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
70 # define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
75 #include <sys/ioctl.h>
78 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
79 # include <dlfcn.h> /* for dlsym */
82 static int uv__run_pending(uv_loop_t* loop);
84 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
85 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
86 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
87 sizeof(((struct iovec*) 0)->iov_base));
88 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
89 sizeof(((struct iovec*) 0)->iov_len));
90 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
91 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
94 uint64_t uv_hrtime(void) {
95 return uv__hrtime(UV_CLOCK_PRECISE);
99 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
100 assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
102 handle->flags |= UV_CLOSING;
103 handle->close_cb = close_cb;
105 switch (handle->type) {
107 uv__pipe_close((uv_pipe_t*)handle);
111 uv__stream_close((uv_stream_t*)handle);
115 uv__tcp_close((uv_tcp_t*)handle);
119 uv__udp_close((uv_udp_t*)handle);
123 uv__prepare_close((uv_prepare_t*)handle);
127 uv__check_close((uv_check_t*)handle);
131 uv__idle_close((uv_idle_t*)handle);
135 uv__async_close((uv_async_t*)handle);
139 uv__timer_close((uv_timer_t*)handle);
143 uv__process_close((uv_process_t*)handle);
147 uv__fs_event_close((uv_fs_event_t*)handle);
151 uv__poll_close((uv_poll_t*)handle);
155 uv__fs_poll_close((uv_fs_poll_t*)handle);
159 uv__signal_close((uv_signal_t*) handle);
160 /* Signal handles may not be closed immediately. The signal code will */
161 /* itself close uv__make_close_pending whenever appropriate. */
168 uv__make_close_pending(handle);
171 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
176 if (handle == NULL || value == NULL)
179 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
180 fd = uv__stream_fd((uv_stream_t*) handle);
181 else if (handle->type == UV_UDP)
182 fd = ((uv_udp_t *) handle)->io_watcher.fd;
186 len = sizeof(*value);
189 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
191 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
199 void uv__make_close_pending(uv_handle_t* handle) {
200 assert(handle->flags & UV_CLOSING);
201 assert(!(handle->flags & UV_CLOSED));
202 handle->next_closing = handle->loop->closing_handles;
203 handle->loop->closing_handles = handle;
206 int uv__getiovmax(void) {
209 #elif defined(_SC_IOV_MAX)
210 static int iovmax = -1;
212 iovmax = sysconf(_SC_IOV_MAX);
213 /* On some embedded devices (arm-linux-uclibc based ip camera),
214 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
215 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
217 if (iovmax == -1) iovmax = 1;
226 static void uv__finish_close(uv_handle_t* handle) {
227 /* Note: while the handle is in the UV_CLOSING state now, it's still possible
228 * for it to be active in the sense that uv__is_active() returns true.
229 * A good example is when the user calls uv_shutdown(), immediately followed
230 * by uv_close(). The handle is considered active at this point because the
231 * completion of the shutdown req is still pending.
233 assert(handle->flags & UV_CLOSING);
234 assert(!(handle->flags & UV_CLOSED));
235 handle->flags |= UV_CLOSED;
237 switch (handle->type) {
253 uv__stream_destroy((uv_stream_t*)handle);
257 uv__udp_finish_close((uv_udp_t*)handle);
265 uv__handle_unref(handle);
266 QUEUE_REMOVE(&handle->handle_queue);
268 if (handle->close_cb) {
269 handle->close_cb(handle);
274 static void uv__run_closing_handles(uv_loop_t* loop) {
278 p = loop->closing_handles;
279 loop->closing_handles = NULL;
289 int uv_is_closing(const uv_handle_t* handle) {
290 return uv__is_closing(handle);
294 int uv_backend_fd(const uv_loop_t* loop) {
295 return loop->backend_fd;
299 int uv_backend_timeout(const uv_loop_t* loop) {
300 if (loop->stop_flag != 0)
303 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
306 if (!QUEUE_EMPTY(&loop->idle_handles))
309 if (!QUEUE_EMPTY(&loop->pending_queue))
312 if (loop->closing_handles)
315 return uv__next_timeout(loop);
319 static int uv__loop_alive(const uv_loop_t* loop) {
320 return uv__has_active_handles(loop) ||
321 uv__has_active_reqs(loop) ||
322 loop->closing_handles != NULL;
326 int uv_loop_alive(const uv_loop_t* loop) {
327 return uv__loop_alive(loop);
331 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
336 r = uv__loop_alive(loop);
338 uv__update_time(loop);
340 while (r != 0 && loop->stop_flag == 0) {
341 uv__update_time(loop);
342 uv__run_timers(loop);
343 ran_pending = uv__run_pending(loop);
345 uv__run_prepare(loop);
348 if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
349 timeout = uv_backend_timeout(loop);
351 uv__io_poll(loop, timeout);
353 uv__run_closing_handles(loop);
355 if (mode == UV_RUN_ONCE) {
356 /* UV_RUN_ONCE implies forward progress: at least one callback must have
357 * been invoked when it returns. uv__io_poll() can return without doing
358 * I/O (meaning: no callbacks) when its timeout expires - which means we
359 * have pending timers that satisfy the forward progress constraint.
361 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
364 uv__update_time(loop);
365 uv__run_timers(loop);
368 r = uv__loop_alive(loop);
369 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
373 /* The if statement lets gcc compile it to a conditional store. Avoids
374 * dirtying a cache line.
376 if (loop->stop_flag != 0)
383 void uv_update_time(uv_loop_t* loop) {
384 uv__update_time(loop);
388 int uv_is_active(const uv_handle_t* handle) {
389 return uv__is_active(handle);
393 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
394 int uv__socket(int domain, int type, int protocol) {
398 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
399 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
407 sockfd = socket(domain, type, protocol);
411 err = uv__nonblock(sockfd, 1);
413 err = uv__cloexec(sockfd, 1);
420 #if defined(SO_NOSIGPIPE)
423 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
431 int uv__accept(int sockfd) {
438 #if defined(__linux__) || __FreeBSD__ >= 10
439 static int no_accept4;
444 peerfd = uv__accept4(sockfd,
447 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
461 peerfd = accept(sockfd, NULL, NULL);
468 err = uv__cloexec(peerfd, 1);
470 err = uv__nonblock(peerfd, 1);
482 int uv__close(int fd) {
486 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
487 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
494 rc = -EINPROGRESS; /* For platform/libc consistency. */
502 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
503 defined(_AIX) || defined(__DragonFly__)
505 int uv__nonblock(int fd, int set) {
509 r = ioctl(fd, FIONBIO, &set);
510 while (r == -1 && errno == EINTR);
519 int uv__cloexec(int fd, int set) {
523 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
524 while (r == -1 && errno == EINTR);
532 #else /* !(defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
533 defined(_AIX) || defined(__DragonFly__)) */
535 int uv__nonblock(int fd, int set) {
540 r = fcntl(fd, F_GETFL);
541 while (r == -1 && errno == EINTR);
546 /* Bail out now if already set/clear. */
547 if (!!(r & O_NONBLOCK) == !!set)
551 flags = r | O_NONBLOCK;
553 flags = r & ~O_NONBLOCK;
556 r = fcntl(fd, F_SETFL, flags);
557 while (r == -1 && errno == EINTR);
566 int uv__cloexec(int fd, int set) {
571 r = fcntl(fd, F_GETFD);
572 while (r == -1 && errno == EINTR);
577 /* Bail out now if already set/clear. */
578 if (!!(r & FD_CLOEXEC) == !!set)
582 flags = r | FD_CLOEXEC;
584 flags = r & ~FD_CLOEXEC;
587 r = fcntl(fd, F_SETFD, flags);
588 while (r == -1 && errno == EINTR);
596 #endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
597 defined(_AIX) || defined(__DragonFly__) */
600 /* This function is not execve-safe, there is a race window
601 * between the call to dup() and fcntl(FD_CLOEXEC).
603 int uv__dup(int fd) {
611 err = uv__cloexec(fd, 1);
621 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
622 struct cmsghdr* cmsg;
626 #if defined(__linux__)
627 static int no_msg_cmsg_cloexec;
628 if (no_msg_cmsg_cloexec == 0) {
629 rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
634 rc = recvmsg(fd, msg, flags);
637 no_msg_cmsg_cloexec = 1;
639 rc = recvmsg(fd, msg, flags);
642 rc = recvmsg(fd, msg, flags);
646 if (msg->msg_controllen == 0)
648 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
649 if (cmsg->cmsg_type == SCM_RIGHTS)
650 for (pfd = (int*) CMSG_DATA(cmsg),
651 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
654 uv__cloexec(*pfd, 1);
659 int uv_cwd(char* buffer, size_t* size) {
660 if (buffer == NULL || size == NULL)
663 if (getcwd(buffer, *size) == NULL)
666 *size = strlen(buffer);
667 if (*size > 1 && buffer[*size - 1] == '/') {
668 buffer[*size-1] = '\0';
676 int uv_chdir(const char* dir) {
684 void uv_disable_stdio_inheritance(void) {
687 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
688 * first 16 file descriptors. After that, bail out after the first error.
691 if (uv__cloexec(fd, 1) && fd > 15)
696 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
699 switch (handle->type) {
703 fd_out = uv__stream_fd((uv_stream_t*) handle);
707 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
711 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
718 if (uv__is_closing(handle) || fd_out == -1)
726 static int uv__run_pending(uv_loop_t* loop) {
731 if (QUEUE_EMPTY(&loop->pending_queue))
734 QUEUE_MOVE(&loop->pending_queue, &pq);
736 while (!QUEUE_EMPTY(&pq)) {
740 w = QUEUE_DATA(q, uv__io_t, pending_queue);
741 w->cb(loop, w, UV__POLLOUT);
748 static unsigned int next_power_of_two(unsigned int val) {
759 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
761 void* fake_watcher_list;
762 void* fake_watcher_count;
763 unsigned int nwatchers;
766 if (len <= loop->nwatchers)
769 /* Preserve fake watcher list and count at the end of the watchers */
770 if (loop->watchers != NULL) {
771 fake_watcher_list = loop->watchers[loop->nwatchers];
772 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
774 fake_watcher_list = NULL;
775 fake_watcher_count = NULL;
778 nwatchers = next_power_of_two(len + 2) - 2;
779 watchers = uv__realloc(loop->watchers,
780 (nwatchers + 2) * sizeof(loop->watchers[0]));
782 if (watchers == NULL)
784 for (i = loop->nwatchers; i < nwatchers; i++)
786 watchers[nwatchers] = fake_watcher_list;
787 watchers[nwatchers + 1] = fake_watcher_count;
789 loop->watchers = watchers;
790 loop->nwatchers = nwatchers;
794 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
797 QUEUE_INIT(&w->pending_queue);
798 QUEUE_INIT(&w->watcher_queue);
804 #if defined(UV_HAVE_KQUEUE)
807 #endif /* defined(UV_HAVE_KQUEUE) */
811 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
812 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
815 assert(w->fd < INT_MAX);
817 w->pevents |= events;
818 maybe_resize(loop, w->fd + 1);
821 /* The event ports backend needs to rearm all file descriptors on each and
822 * every tick of the event loop but the other backends allow us to
823 * short-circuit here if the event mask is unchanged.
825 if (w->events == w->pevents) {
826 if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) {
827 QUEUE_REMOVE(&w->watcher_queue);
828 QUEUE_INIT(&w->watcher_queue);
834 if (QUEUE_EMPTY(&w->watcher_queue))
835 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
837 if (loop->watchers[w->fd] == NULL) {
838 loop->watchers[w->fd] = w;
844 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
845 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
853 /* Happens when uv__io_stop() is called on a handle that was never started. */
854 if ((unsigned) w->fd >= loop->nwatchers)
857 w->pevents &= ~events;
859 if (w->pevents == 0) {
860 QUEUE_REMOVE(&w->watcher_queue);
861 QUEUE_INIT(&w->watcher_queue);
863 if (loop->watchers[w->fd] != NULL) {
864 assert(loop->watchers[w->fd] == w);
865 assert(loop->nfds > 0);
866 loop->watchers[w->fd] = NULL;
871 else if (QUEUE_EMPTY(&w->watcher_queue))
872 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
876 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
877 uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
878 QUEUE_REMOVE(&w->pending_queue);
880 /* Remove stale events for this file descriptor */
881 uv__platform_invalidate_fd(loop, w->fd);
885 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
886 if (QUEUE_EMPTY(&w->pending_queue))
887 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
891 int uv__io_active(const uv__io_t* w, unsigned int events) {
892 assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
894 return 0 != (w->pevents & events);
898 int uv_getrusage(uv_rusage_t* rusage) {
901 if (getrusage(RUSAGE_SELF, &usage))
904 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
905 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
907 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
908 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
910 rusage->ru_maxrss = usage.ru_maxrss;
911 rusage->ru_ixrss = usage.ru_ixrss;
912 rusage->ru_idrss = usage.ru_idrss;
913 rusage->ru_isrss = usage.ru_isrss;
914 rusage->ru_minflt = usage.ru_minflt;
915 rusage->ru_majflt = usage.ru_majflt;
916 rusage->ru_nswap = usage.ru_nswap;
917 rusage->ru_inblock = usage.ru_inblock;
918 rusage->ru_oublock = usage.ru_oublock;
919 rusage->ru_msgsnd = usage.ru_msgsnd;
920 rusage->ru_msgrcv = usage.ru_msgrcv;
921 rusage->ru_nsignals = usage.ru_nsignals;
922 rusage->ru_nvcsw = usage.ru_nvcsw;
923 rusage->ru_nivcsw = usage.ru_nivcsw;
929 int uv__open_cloexec(const char* path, int flags) {
933 #if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 9) || \
934 defined(__DragonFly__)
935 static int no_cloexec;
938 fd = open(path, flags | UV__O_CLOEXEC);
945 /* O_CLOEXEC not supported. */
950 fd = open(path, flags);
954 err = uv__cloexec(fd, 1);
964 int uv__dup2_cloexec(int oldfd, int newfd) {
966 #if defined(__FreeBSD__) && __FreeBSD__ >= 10
967 r = dup3(oldfd, newfd, O_CLOEXEC);
971 #elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
972 r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
978 #elif defined(__linux__)
982 r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
983 while (r == -1 && errno == EBUSY);
995 r = dup2(oldfd, newfd);
996 #if defined(__linux__)
997 while (r == -1 && errno == EBUSY);
999 while (0); /* Never retry. */
1005 err = uv__cloexec(newfd, 1);
1016 int uv_os_homedir(char* buffer, size_t* size) {
1018 struct passwd* result;
1025 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1026 int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
1029 if (buffer == NULL || size == NULL || *size == 0)
1032 /* Check if the HOME environment variable is set first */
1033 buf = getenv("HOME");
1043 memcpy(buffer, buf, len + 1);
1049 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1050 getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
1051 if (getpwuid_r == NULL)
1055 /* HOME is not set, so call getpwuid() */
1056 initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1061 bufsize = (size_t) initsize;
1068 buf = uv__malloc(bufsize);
1073 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1086 if (result == NULL) {
1091 len = strlen(pw.pw_dir);
1099 memcpy(buffer, pw.pw_dir, len + 1);