1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "uv-common.h"
28 #include <stddef.h> /* NULL */
30 #include <stdlib.h> /* malloc */
31 #include <string.h> /* memset */
34 # include <malloc.h> /* malloc */
36 # include <net/if.h> /* if_nametoindex */
37 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
42 uv_malloc_func local_malloc;
43 uv_realloc_func local_realloc;
44 uv_calloc_func local_calloc;
45 uv_free_func local_free;
48 static uv__allocator_t uv__allocator = {
55 char* uv__strdup(const char* s) {
56 size_t len = strlen(s) + 1;
57 char* m = uv__malloc(len);
60 return memcpy(m, s, len);
63 char* uv__strndup(const char* s, size_t n) {
65 size_t len = strlen(s);
68 m = uv__malloc(len + 1);
72 return memcpy(m, s, len);
75 void* uv__malloc(size_t size) {
77 return uv__allocator.local_malloc(size);
81 void uv__free(void* ptr) {
84 /* Libuv expects that free() does not clobber errno. The system allocator
85 * honors that assumption but custom allocators may not be so careful.
88 uv__allocator.local_free(ptr);
92 void* uv__calloc(size_t count, size_t size) {
93 return uv__allocator.local_calloc(count, size);
96 void* uv__realloc(void* ptr, size_t size) {
98 return uv__allocator.local_realloc(ptr, size);
103 void* uv__reallocf(void* ptr, size_t size) {
106 newptr = uv__realloc(ptr, size);
114 int uv_replace_allocator(uv_malloc_func malloc_func,
115 uv_realloc_func realloc_func,
116 uv_calloc_func calloc_func,
117 uv_free_func free_func) {
118 if (malloc_func == NULL || realloc_func == NULL ||
119 calloc_func == NULL || free_func == NULL) {
123 uv__allocator.local_malloc = malloc_func;
124 uv__allocator.local_realloc = realloc_func;
125 uv__allocator.local_calloc = calloc_func;
126 uv__allocator.local_free = free_func;
131 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
133 size_t uv_handle_size(uv_handle_type type) {
135 UV_HANDLE_TYPE_MAP(XX)
141 size_t uv_req_size(uv_req_type type) {
152 size_t uv_loop_size(void) {
153 return sizeof(uv_loop_t);
157 uv_buf_t uv_buf_init(char* base, unsigned int len) {
165 static const char* uv__unknown_err_code(int err) {
169 snprintf(buf, sizeof(buf), "Unknown system error %d", err);
170 copy = uv__strdup(buf);
172 return copy != NULL ? copy : "Unknown system error";
175 #define UV_ERR_NAME_GEN_R(name, _) \
177 uv__strscpy(buf, #name, buflen); break;
178 char* uv_err_name_r(int err, char* buf, size_t buflen) {
180 UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
181 default: snprintf(buf, buflen, "Unknown system error %d", err);
185 #undef UV_ERR_NAME_GEN_R
188 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
189 const char* uv_err_name(int err) {
191 UV_ERRNO_MAP(UV_ERR_NAME_GEN)
193 return uv__unknown_err_code(err);
195 #undef UV_ERR_NAME_GEN
198 #define UV_STRERROR_GEN_R(name, msg) \
200 snprintf(buf, buflen, "%s", msg); break;
201 char* uv_strerror_r(int err, char* buf, size_t buflen) {
203 UV_ERRNO_MAP(UV_STRERROR_GEN_R)
204 default: snprintf(buf, buflen, "Unknown system error %d", err);
208 #undef UV_STRERROR_GEN_R
211 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
212 const char* uv_strerror(int err) {
214 UV_ERRNO_MAP(UV_STRERROR_GEN)
216 return uv__unknown_err_code(err);
218 #undef UV_STRERROR_GEN
220 #if !defined(CMAKE_BOOTSTRAP) || defined(_WIN32)
222 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
223 memset(addr, 0, sizeof(*addr));
224 addr->sin_family = AF_INET;
225 addr->sin_port = htons(port);
227 addr->sin_len = sizeof(*addr);
229 return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
233 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
234 char address_part[40];
235 size_t address_part_size;
236 const char* zone_index;
238 memset(addr, 0, sizeof(*addr));
239 addr->sin6_family = AF_INET6;
240 addr->sin6_port = htons(port);
242 addr->sin6_len = sizeof(*addr);
245 zone_index = strchr(ip, '%');
246 if (zone_index != NULL) {
247 address_part_size = zone_index - ip;
248 if (address_part_size >= sizeof(address_part))
249 address_part_size = sizeof(address_part) - 1;
251 memcpy(address_part, ip, address_part_size);
252 address_part[address_part_size] = '\0';
255 zone_index++; /* skip '%' */
256 /* NOTE: unknown interface (id=0) is silently ignored */
258 addr->sin6_scope_id = atoi(zone_index);
260 addr->sin6_scope_id = if_nametoindex(zone_index);
264 return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
268 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
269 return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
273 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
274 return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
278 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
279 switch (src->sa_family) {
281 return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
284 return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
287 return UV_EAFNOSUPPORT;
292 int uv_tcp_bind(uv_tcp_t* handle,
293 const struct sockaddr* addr,
294 unsigned int flags) {
295 unsigned int addrlen;
297 if (handle->type != UV_TCP)
300 if (addr->sa_family == AF_INET)
301 addrlen = sizeof(struct sockaddr_in);
302 else if (addr->sa_family == AF_INET6)
303 addrlen = sizeof(struct sockaddr_in6);
307 return uv__tcp_bind(handle, addr, addrlen, flags);
311 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
312 unsigned extra_flags;
316 /* Use the lower 8 bits for the domain. */
317 domain = flags & 0xFF;
318 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
321 /* Use the higher bits for extra flags. */
322 extra_flags = flags & ~0xFF;
323 if (extra_flags & ~UV_UDP_RECVMMSG)
326 rc = uv__udp_init_ex(loop, handle, flags, domain);
329 if (extra_flags & UV_UDP_RECVMMSG)
330 handle->flags |= UV_HANDLE_UDP_RECVMMSG;
336 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
337 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
341 int uv_udp_bind(uv_udp_t* handle,
342 const struct sockaddr* addr,
343 unsigned int flags) {
344 unsigned int addrlen;
346 if (handle->type != UV_UDP)
349 if (addr->sa_family == AF_INET)
350 addrlen = sizeof(struct sockaddr_in);
351 else if (addr->sa_family == AF_INET6)
352 addrlen = sizeof(struct sockaddr_in6);
356 return uv__udp_bind(handle, addr, addrlen, flags);
360 int uv_tcp_connect(uv_connect_t* req,
362 const struct sockaddr* addr,
364 unsigned int addrlen;
366 if (handle->type != UV_TCP)
369 if (addr->sa_family == AF_INET)
370 addrlen = sizeof(struct sockaddr_in);
371 else if (addr->sa_family == AF_INET6)
372 addrlen = sizeof(struct sockaddr_in6);
376 return uv__tcp_connect(req, handle, addr, addrlen, cb);
380 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
381 unsigned int addrlen;
383 if (handle->type != UV_UDP)
386 /* Disconnect the handle */
388 if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
391 return uv__udp_disconnect(handle);
394 if (addr->sa_family == AF_INET)
395 addrlen = sizeof(struct sockaddr_in);
396 else if (addr->sa_family == AF_INET6)
397 addrlen = sizeof(struct sockaddr_in6);
401 if (handle->flags & UV_HANDLE_UDP_CONNECTED)
404 return uv__udp_connect(handle, addr, addrlen);
408 int uv__udp_is_connected(uv_udp_t* handle) {
409 struct sockaddr_storage addr;
411 if (handle->type != UV_UDP)
414 addrlen = sizeof(addr);
415 if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
422 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
423 unsigned int addrlen;
425 if (handle->type != UV_UDP)
428 if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
431 if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
432 return UV_EDESTADDRREQ;
435 if (addr->sa_family == AF_INET)
436 addrlen = sizeof(struct sockaddr_in);
437 else if (addr->sa_family == AF_INET6)
438 addrlen = sizeof(struct sockaddr_in6);
439 #if defined(AF_UNIX) && !defined(_WIN32)
440 else if (addr->sa_family == AF_UNIX)
441 addrlen = sizeof(struct sockaddr_un);
453 int uv_udp_send(uv_udp_send_t* req,
455 const uv_buf_t bufs[],
457 const struct sockaddr* addr,
458 uv_udp_send_cb send_cb) {
461 addrlen = uv__udp_check_before_send(handle, addr);
465 return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
469 int uv_udp_try_send(uv_udp_t* handle,
470 const uv_buf_t bufs[],
472 const struct sockaddr* addr) {
475 addrlen = uv__udp_check_before_send(handle, addr);
479 return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
483 int uv_udp_recv_start(uv_udp_t* handle,
484 uv_alloc_cb alloc_cb,
485 uv_udp_recv_cb recv_cb) {
486 if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
489 return uv__udp_recv_start(handle, alloc_cb, recv_cb);
493 int uv_udp_recv_stop(uv_udp_t* handle) {
494 if (handle->type != UV_UDP)
497 return uv__udp_recv_stop(handle);
502 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
507 QUEUE_MOVE(&loop->handle_queue, &queue);
508 while (!QUEUE_EMPTY(&queue)) {
509 q = QUEUE_HEAD(&queue);
510 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
513 QUEUE_INSERT_TAIL(&loop->handle_queue, q);
515 if (h->flags & UV_HANDLE_INTERNAL) continue;
521 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
527 loop = uv_default_loop();
529 QUEUE_FOREACH(q, &loop->handle_queue) {
530 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
532 if (only_active && !uv__is_active(h))
536 #define X(uc, lc) case UV_##uc: type = #lc; break;
537 UV_HANDLE_TYPE_MAP(X)
539 default: type = "<unknown>";
543 "[%c%c%c] %-8s %p\n",
544 "R-"[!(h->flags & UV_HANDLE_REF)],
545 "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
546 "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
553 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
554 uv__print_handles(loop, 0, stream);
558 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
559 uv__print_handles(loop, 1, stream);
563 void uv_ref(uv_handle_t* handle) {
564 uv__handle_ref(handle);
568 void uv_unref(uv_handle_t* handle) {
569 uv__handle_unref(handle);
573 int uv_has_ref(const uv_handle_t* handle) {
574 return uv__has_ref(handle);
578 void uv_stop(uv_loop_t* loop) {
583 uint64_t uv_now(const uv_loop_t* loop) {
589 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
594 for (i = 0; i < nbufs; i++)
595 bytes += (size_t) bufs[i].len;
600 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
601 return uv__socket_sockopt(handle, SO_RCVBUF, value);
604 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
605 return uv__socket_sockopt(handle, SO_SNDBUF, value);
608 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
611 if (!uv__is_active(handle)) {
616 required_len = strlen(handle->path);
617 if (required_len >= *size) {
618 *size = required_len + 1;
622 memcpy(buffer, handle->path, required_len);
623 *size = required_len;
624 buffer[required_len] = '\0';
629 /* The windows implementation does not have the same structure layout as
630 * the unix implementation (nbufs is not directly inside req but is
631 * contained in a nested union/struct) so this function locates it.
633 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
635 return &req->fs.info.nbufs;
641 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
642 * systems. So, the memory should be released using free(). On Windows,
643 * uv__malloc() is used, so use uv__free() to free memory.
646 # define uv__fs_scandir_free uv__free
648 # define uv__fs_scandir_free free
651 void uv__fs_scandir_cleanup(uv_fs_t* req) {
652 uv__dirent_t** dents;
654 unsigned int* nbufs = uv__get_nbufs(req);
657 if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
659 for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
660 uv__fs_scandir_free(dents[*nbufs]);
662 uv__fs_scandir_free(req->ptr);
667 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
668 uv__dirent_t** dents;
672 /* Check to see if req passed */
676 /* Ptr will be null if req was canceled or no files found */
680 nbufs = uv__get_nbufs(req);
685 /* Free previous entity */
687 uv__fs_scandir_free(dents[*nbufs - 1]);
689 /* End was already reached */
690 if (*nbufs == (unsigned int) req->result) {
691 uv__fs_scandir_free(dents);
696 dent = dents[(*nbufs)++];
698 ent->name = dent->d_name;
699 ent->type = uv__fs_get_dirent_type(dent);
704 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
705 uv_dirent_type_t type;
707 #ifdef HAVE_DIRENT_TYPES
708 switch (dent->d_type) {
710 type = UV_DIRENT_DIR;
713 type = UV_DIRENT_FILE;
716 type = UV_DIRENT_LINK;
719 type = UV_DIRENT_FIFO;
722 type = UV_DIRENT_SOCKET;
725 type = UV_DIRENT_CHAR;
728 type = UV_DIRENT_BLOCK;
731 type = UV_DIRENT_UNKNOWN;
734 type = UV_DIRENT_UNKNOWN;
740 void uv__fs_readdir_cleanup(uv_fs_t* req) {
742 uv_dirent_t* dirents;
745 if (req->ptr == NULL)
749 dirents = dir->dirents;
755 for (i = 0; i < req->result; ++i) {
756 uv__free((char*) dirents[i].name);
757 dirents[i].name = NULL;
762 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
766 va_start(ap, option);
767 /* Any platform-agnostic options should be handled here. */
768 err = uv__loop_configure(loop, option, ap);
775 static uv_loop_t default_loop_struct;
776 static uv_loop_t* default_loop_ptr;
779 uv_loop_t* uv_default_loop(void) {
780 if (default_loop_ptr != NULL)
781 return default_loop_ptr;
783 if (uv_loop_init(&default_loop_struct))
786 default_loop_ptr = &default_loop_struct;
787 return default_loop_ptr;
791 uv_loop_t* uv_loop_new(void) {
794 loop = uv__malloc(sizeof(*loop));
798 if (uv_loop_init(loop)) {
807 int uv_loop_close(uv_loop_t* loop) {
814 if (uv__has_active_reqs(loop))
817 QUEUE_FOREACH(q, &loop->handle_queue) {
818 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
819 if (!(h->flags & UV_HANDLE_INTERNAL))
823 uv__loop_close(loop);
826 saved_data = loop->data;
827 memset(loop, -1, sizeof(*loop));
828 loop->data = saved_data;
830 if (loop == default_loop_ptr)
831 default_loop_ptr = NULL;
837 void uv_loop_delete(uv_loop_t* loop) {
838 uv_loop_t* default_loop;
841 default_loop = default_loop_ptr;
843 err = uv_loop_close(loop);
844 (void) err; /* Squelch compiler warnings. */
846 if (loop != default_loop)
851 int uv_read_start(uv_stream_t* stream,
852 uv_alloc_cb alloc_cb,
853 uv_read_cb read_cb) {
854 if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
857 if (stream->flags & UV_HANDLE_CLOSING)
860 if (stream->flags & UV_HANDLE_READING)
863 if (!(stream->flags & UV_HANDLE_READABLE))
866 return uv__read_start(stream, alloc_cb, read_cb);
870 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
873 for (i = 0; i < count; i++) {
874 uv__free(envitems[i].name);
881 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
884 for (i = 0; i < count; i++)
885 uv__free(cpu_infos[i].model);
891 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
892 * threads have already been forcibly terminated by the operating system
893 * by the time destructors run, ergo, it's not safe to try to clean them up.
895 #if defined(__GNUC__) && !defined(_WIN32)
896 __attribute__((destructor))
898 void uv_library_shutdown(void) {
899 static int was_shutdown;
901 if (uv__load_relaxed(&was_shutdown))
904 uv__process_title_cleanup();
905 uv__signal_cleanup();
907 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
910 uv__threadpool_cleanup();
912 uv__store_relaxed(&was_shutdown, 1);
916 void uv__metrics_update_idle_time(uv_loop_t* loop) {
917 uv__loop_metrics_t* loop_metrics;
921 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
924 loop_metrics = uv__get_loop_metrics(loop);
926 /* The thread running uv__metrics_update_idle_time() is always the same
927 * thread that sets provider_entry_time. So it's unnecessary to lock before
928 * retrieving this value.
930 if (loop_metrics->provider_entry_time == 0)
933 exit_time = uv_hrtime();
935 uv_mutex_lock(&loop_metrics->lock);
936 entry_time = loop_metrics->provider_entry_time;
937 loop_metrics->provider_entry_time = 0;
938 loop_metrics->provider_idle_time += exit_time - entry_time;
939 uv_mutex_unlock(&loop_metrics->lock);
943 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
944 uv__loop_metrics_t* loop_metrics;
947 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
951 loop_metrics = uv__get_loop_metrics(loop);
952 uv_mutex_lock(&loop_metrics->lock);
953 loop_metrics->provider_entry_time = now;
954 uv_mutex_unlock(&loop_metrics->lock);
958 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
959 uv__loop_metrics_t* loop_metrics;
963 loop_metrics = uv__get_loop_metrics(loop);
964 uv_mutex_lock(&loop_metrics->lock);
965 idle_time = loop_metrics->provider_idle_time;
966 entry_time = loop_metrics->provider_entry_time;
967 uv_mutex_unlock(&loop_metrics->lock);
970 idle_time += uv_hrtime() - entry_time;