1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 #include "uv-common.h"
28 #include <stddef.h> /* NULL */
30 #include <stdlib.h> /* malloc */
31 #include <string.h> /* memset */
34 # include <malloc.h> /* malloc */
36 # include <net/if.h> /* if_nametoindex */
37 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
42 uv_malloc_func local_malloc;
43 uv_realloc_func local_realloc;
44 uv_calloc_func local_calloc;
45 uv_free_func local_free;
48 static uv__allocator_t uv__allocator = {
55 char* uv__strdup(const char* s) {
56 size_t len = strlen(s) + 1;
57 char* m = uv__malloc(len);
60 return memcpy(m, s, len);
63 char* uv__strndup(const char* s, size_t n) {
65 size_t len = strlen(s);
68 m = uv__malloc(len + 1);
72 return memcpy(m, s, len);
75 void* uv__malloc(size_t size) {
77 return uv__allocator.local_malloc(size);
81 void uv__free(void* ptr) {
84 /* Libuv expects that free() does not clobber errno. The system allocator
85 * honors that assumption but custom allocators may not be so careful.
88 uv__allocator.local_free(ptr);
92 void* uv__calloc(size_t count, size_t size) {
93 return uv__allocator.local_calloc(count, size);
96 void* uv__realloc(void* ptr, size_t size) {
98 return uv__allocator.local_realloc(ptr, size);
103 void* uv__reallocf(void* ptr, size_t size) {
106 newptr = uv__realloc(ptr, size);
114 int uv_replace_allocator(uv_malloc_func malloc_func,
115 uv_realloc_func realloc_func,
116 uv_calloc_func calloc_func,
117 uv_free_func free_func) {
118 if (malloc_func == NULL || realloc_func == NULL ||
119 calloc_func == NULL || free_func == NULL) {
123 uv__allocator.local_malloc = malloc_func;
124 uv__allocator.local_realloc = realloc_func;
125 uv__allocator.local_calloc = calloc_func;
126 uv__allocator.local_free = free_func;
131 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
133 size_t uv_handle_size(uv_handle_type type) {
135 UV_HANDLE_TYPE_MAP(XX)
141 size_t uv_req_size(uv_req_type type) {
152 size_t uv_loop_size(void) {
153 return sizeof(uv_loop_t);
157 uv_buf_t uv_buf_init(char* base, unsigned int len) {
165 static const char* uv__unknown_err_code(int err) {
169 snprintf(buf, sizeof(buf), "Unknown system error %d", err);
170 copy = uv__strdup(buf);
172 return copy != NULL ? copy : "Unknown system error";
175 #define UV_ERR_NAME_GEN_R(name, _) \
177 uv__strscpy(buf, #name, buflen); break;
178 char* uv_err_name_r(int err, char* buf, size_t buflen) {
180 UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
181 default: snprintf(buf, buflen, "Unknown system error %d", err);
185 #undef UV_ERR_NAME_GEN_R
188 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
189 const char* uv_err_name(int err) {
191 UV_ERRNO_MAP(UV_ERR_NAME_GEN)
193 return uv__unknown_err_code(err);
195 #undef UV_ERR_NAME_GEN
198 #define UV_STRERROR_GEN_R(name, msg) \
200 snprintf(buf, buflen, "%s", msg); break;
201 char* uv_strerror_r(int err, char* buf, size_t buflen) {
203 UV_ERRNO_MAP(UV_STRERROR_GEN_R)
204 default: snprintf(buf, buflen, "Unknown system error %d", err);
208 #undef UV_STRERROR_GEN_R
211 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
212 const char* uv_strerror(int err) {
214 UV_ERRNO_MAP(UV_STRERROR_GEN)
216 return uv__unknown_err_code(err);
218 #undef UV_STRERROR_GEN
220 #if !defined(CMAKE_BOOTSTRAP) || defined(_WIN32)
222 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
223 memset(addr, 0, sizeof(*addr));
224 addr->sin_family = AF_INET;
225 addr->sin_port = htons(port);
227 addr->sin_len = sizeof(*addr);
229 return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
233 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
234 char address_part[40];
235 size_t address_part_size;
236 const char* zone_index;
238 memset(addr, 0, sizeof(*addr));
239 addr->sin6_family = AF_INET6;
240 addr->sin6_port = htons(port);
242 addr->sin6_len = sizeof(*addr);
245 zone_index = strchr(ip, '%');
246 if (zone_index != NULL) {
247 address_part_size = zone_index - ip;
248 if (address_part_size >= sizeof(address_part))
249 address_part_size = sizeof(address_part) - 1;
251 memcpy(address_part, ip, address_part_size);
252 address_part[address_part_size] = '\0';
255 zone_index++; /* skip '%' */
256 /* NOTE: unknown interface (id=0) is silently ignored */
258 addr->sin6_scope_id = atoi(zone_index);
260 addr->sin6_scope_id = if_nametoindex(zone_index);
264 return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
268 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
269 return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
273 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
274 return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
278 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
279 switch (src->sa_family) {
281 return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
284 return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
287 return UV_EAFNOSUPPORT;
292 int uv_tcp_bind(uv_tcp_t* handle,
293 const struct sockaddr* addr,
294 unsigned int flags) {
295 unsigned int addrlen;
297 if (handle->type != UV_TCP)
299 if (uv__is_closing(handle)) {
302 if (addr->sa_family == AF_INET)
303 addrlen = sizeof(struct sockaddr_in);
304 else if (addr->sa_family == AF_INET6)
305 addrlen = sizeof(struct sockaddr_in6);
309 return uv__tcp_bind(handle, addr, addrlen, flags);
313 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
314 unsigned extra_flags;
318 /* Use the lower 8 bits for the domain. */
319 domain = flags & 0xFF;
320 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
323 /* Use the higher bits for extra flags. */
324 extra_flags = flags & ~0xFF;
325 if (extra_flags & ~UV_UDP_RECVMMSG)
328 rc = uv__udp_init_ex(loop, handle, flags, domain);
331 if (extra_flags & UV_UDP_RECVMMSG)
332 handle->flags |= UV_HANDLE_UDP_RECVMMSG;
338 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
339 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
343 int uv_udp_bind(uv_udp_t* handle,
344 const struct sockaddr* addr,
345 unsigned int flags) {
346 unsigned int addrlen;
348 if (handle->type != UV_UDP)
351 if (addr->sa_family == AF_INET)
352 addrlen = sizeof(struct sockaddr_in);
353 else if (addr->sa_family == AF_INET6)
354 addrlen = sizeof(struct sockaddr_in6);
358 return uv__udp_bind(handle, addr, addrlen, flags);
362 int uv_tcp_connect(uv_connect_t* req,
364 const struct sockaddr* addr,
366 unsigned int addrlen;
368 if (handle->type != UV_TCP)
371 if (addr->sa_family == AF_INET)
372 addrlen = sizeof(struct sockaddr_in);
373 else if (addr->sa_family == AF_INET6)
374 addrlen = sizeof(struct sockaddr_in6);
378 return uv__tcp_connect(req, handle, addr, addrlen, cb);
382 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
383 unsigned int addrlen;
385 if (handle->type != UV_UDP)
388 /* Disconnect the handle */
390 if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
393 return uv__udp_disconnect(handle);
396 if (addr->sa_family == AF_INET)
397 addrlen = sizeof(struct sockaddr_in);
398 else if (addr->sa_family == AF_INET6)
399 addrlen = sizeof(struct sockaddr_in6);
403 if (handle->flags & UV_HANDLE_UDP_CONNECTED)
406 return uv__udp_connect(handle, addr, addrlen);
410 int uv__udp_is_connected(uv_udp_t* handle) {
411 struct sockaddr_storage addr;
413 if (handle->type != UV_UDP)
416 addrlen = sizeof(addr);
417 if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
424 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
425 unsigned int addrlen;
427 if (handle->type != UV_UDP)
430 if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
433 if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
434 return UV_EDESTADDRREQ;
437 if (addr->sa_family == AF_INET)
438 addrlen = sizeof(struct sockaddr_in);
439 else if (addr->sa_family == AF_INET6)
440 addrlen = sizeof(struct sockaddr_in6);
441 #if defined(AF_UNIX) && !defined(_WIN32)
442 else if (addr->sa_family == AF_UNIX)
443 addrlen = sizeof(struct sockaddr_un);
455 int uv_udp_send(uv_udp_send_t* req,
457 const uv_buf_t bufs[],
459 const struct sockaddr* addr,
460 uv_udp_send_cb send_cb) {
463 addrlen = uv__udp_check_before_send(handle, addr);
467 return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
471 int uv_udp_try_send(uv_udp_t* handle,
472 const uv_buf_t bufs[],
474 const struct sockaddr* addr) {
477 addrlen = uv__udp_check_before_send(handle, addr);
481 return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
485 int uv_udp_recv_start(uv_udp_t* handle,
486 uv_alloc_cb alloc_cb,
487 uv_udp_recv_cb recv_cb) {
488 if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
491 return uv__udp_recv_start(handle, alloc_cb, recv_cb);
495 int uv_udp_recv_stop(uv_udp_t* handle) {
496 if (handle->type != UV_UDP)
499 return uv__udp_recv_stop(handle);
504 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
509 QUEUE_MOVE(&loop->handle_queue, &queue);
510 while (!QUEUE_EMPTY(&queue)) {
511 q = QUEUE_HEAD(&queue);
512 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
515 QUEUE_INSERT_TAIL(&loop->handle_queue, q);
517 if (h->flags & UV_HANDLE_INTERNAL) continue;
523 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
529 loop = uv_default_loop();
531 QUEUE_FOREACH(q, &loop->handle_queue) {
532 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
534 if (only_active && !uv__is_active(h))
538 #define X(uc, lc) case UV_##uc: type = #lc; break;
539 UV_HANDLE_TYPE_MAP(X)
541 default: type = "<unknown>";
545 "[%c%c%c] %-8s %p\n",
546 "R-"[!(h->flags & UV_HANDLE_REF)],
547 "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
548 "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
555 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
556 uv__print_handles(loop, 0, stream);
560 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
561 uv__print_handles(loop, 1, stream);
565 void uv_ref(uv_handle_t* handle) {
566 uv__handle_ref(handle);
570 void uv_unref(uv_handle_t* handle) {
571 uv__handle_unref(handle);
575 int uv_has_ref(const uv_handle_t* handle) {
576 return uv__has_ref(handle);
580 void uv_stop(uv_loop_t* loop) {
585 uint64_t uv_now(const uv_loop_t* loop) {
591 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
596 for (i = 0; i < nbufs; i++)
597 bytes += (size_t) bufs[i].len;
602 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
603 return uv__socket_sockopt(handle, SO_RCVBUF, value);
606 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
607 return uv__socket_sockopt(handle, SO_SNDBUF, value);
610 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
613 if (!uv__is_active(handle)) {
618 required_len = strlen(handle->path);
619 if (required_len >= *size) {
620 *size = required_len + 1;
624 memcpy(buffer, handle->path, required_len);
625 *size = required_len;
626 buffer[required_len] = '\0';
631 /* The windows implementation does not have the same structure layout as
632 * the unix implementation (nbufs is not directly inside req but is
633 * contained in a nested union/struct) so this function locates it.
635 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
637 return &req->fs.info.nbufs;
643 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
644 * systems. So, the memory should be released using free(). On Windows,
645 * uv__malloc() is used, so use uv__free() to free memory.
648 # define uv__fs_scandir_free uv__free
650 # define uv__fs_scandir_free free
653 void uv__fs_scandir_cleanup(uv_fs_t* req) {
654 uv__dirent_t** dents;
656 unsigned int* nbufs = uv__get_nbufs(req);
659 if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
661 for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
662 uv__fs_scandir_free(dents[*nbufs]);
664 uv__fs_scandir_free(req->ptr);
669 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
670 uv__dirent_t** dents;
674 /* Check to see if req passed */
678 /* Ptr will be null if req was canceled or no files found */
682 nbufs = uv__get_nbufs(req);
687 /* Free previous entity */
689 uv__fs_scandir_free(dents[*nbufs - 1]);
691 /* End was already reached */
692 if (*nbufs == (unsigned int) req->result) {
693 uv__fs_scandir_free(dents);
698 dent = dents[(*nbufs)++];
700 ent->name = dent->d_name;
701 ent->type = uv__fs_get_dirent_type(dent);
706 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
707 uv_dirent_type_t type;
709 #ifdef HAVE_DIRENT_TYPES
710 switch (dent->d_type) {
712 type = UV_DIRENT_DIR;
715 type = UV_DIRENT_FILE;
718 type = UV_DIRENT_LINK;
721 type = UV_DIRENT_FIFO;
724 type = UV_DIRENT_SOCKET;
727 type = UV_DIRENT_CHAR;
730 type = UV_DIRENT_BLOCK;
733 type = UV_DIRENT_UNKNOWN;
736 type = UV_DIRENT_UNKNOWN;
742 void uv__fs_readdir_cleanup(uv_fs_t* req) {
744 uv_dirent_t* dirents;
747 if (req->ptr == NULL)
751 dirents = dir->dirents;
757 for (i = 0; i < req->result; ++i) {
758 uv__free((char*) dirents[i].name);
759 dirents[i].name = NULL;
764 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
768 va_start(ap, option);
769 /* Any platform-agnostic options should be handled here. */
770 err = uv__loop_configure(loop, option, ap);
777 static uv_loop_t default_loop_struct;
778 static uv_loop_t* default_loop_ptr;
781 uv_loop_t* uv_default_loop(void) {
782 if (default_loop_ptr != NULL)
783 return default_loop_ptr;
785 if (uv_loop_init(&default_loop_struct))
788 default_loop_ptr = &default_loop_struct;
789 return default_loop_ptr;
793 uv_loop_t* uv_loop_new(void) {
796 loop = uv__malloc(sizeof(*loop));
800 if (uv_loop_init(loop)) {
809 int uv_loop_close(uv_loop_t* loop) {
816 if (uv__has_active_reqs(loop))
819 QUEUE_FOREACH(q, &loop->handle_queue) {
820 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
821 if (!(h->flags & UV_HANDLE_INTERNAL))
825 uv__loop_close(loop);
828 saved_data = loop->data;
829 memset(loop, -1, sizeof(*loop));
830 loop->data = saved_data;
832 if (loop == default_loop_ptr)
833 default_loop_ptr = NULL;
839 void uv_loop_delete(uv_loop_t* loop) {
840 uv_loop_t* default_loop;
843 default_loop = default_loop_ptr;
845 err = uv_loop_close(loop);
846 (void) err; /* Squelch compiler warnings. */
848 if (loop != default_loop)
853 int uv_read_start(uv_stream_t* stream,
854 uv_alloc_cb alloc_cb,
855 uv_read_cb read_cb) {
856 if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
859 if (stream->flags & UV_HANDLE_CLOSING)
862 if (stream->flags & UV_HANDLE_READING)
865 if (!(stream->flags & UV_HANDLE_READABLE))
868 return uv__read_start(stream, alloc_cb, read_cb);
872 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
875 for (i = 0; i < count; i++) {
876 uv__free(envitems[i].name);
883 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
886 for (i = 0; i < count; i++)
887 uv__free(cpu_infos[i].model);
893 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
894 * threads have already been forcibly terminated by the operating system
895 * by the time destructors run, ergo, it's not safe to try to clean them up.
897 #if defined(__GNUC__) && !defined(_WIN32)
898 __attribute__((destructor))
900 void uv_library_shutdown(void) {
901 static int was_shutdown;
903 if (uv__load_relaxed(&was_shutdown))
906 uv__process_title_cleanup();
907 uv__signal_cleanup();
909 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
912 uv__threadpool_cleanup();
914 uv__store_relaxed(&was_shutdown, 1);
918 void uv__metrics_update_idle_time(uv_loop_t* loop) {
919 uv__loop_metrics_t* loop_metrics;
923 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
926 loop_metrics = uv__get_loop_metrics(loop);
928 /* The thread running uv__metrics_update_idle_time() is always the same
929 * thread that sets provider_entry_time. So it's unnecessary to lock before
930 * retrieving this value.
932 if (loop_metrics->provider_entry_time == 0)
935 exit_time = uv_hrtime();
937 uv_mutex_lock(&loop_metrics->lock);
938 entry_time = loop_metrics->provider_entry_time;
939 loop_metrics->provider_entry_time = 0;
940 loop_metrics->provider_idle_time += exit_time - entry_time;
941 uv_mutex_unlock(&loop_metrics->lock);
945 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
946 uv__loop_metrics_t* loop_metrics;
949 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
953 loop_metrics = uv__get_loop_metrics(loop);
954 uv_mutex_lock(&loop_metrics->lock);
955 loop_metrics->provider_entry_time = now;
956 uv_mutex_unlock(&loop_metrics->lock);
960 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
961 uv__loop_metrics_t* loop_metrics;
965 loop_metrics = uv__get_loop_metrics(loop);
966 uv_mutex_lock(&loop_metrics->lock);
967 idle_time = loop_metrics->provider_idle_time;
968 entry_time = loop_metrics->provider_entry_time;
969 uv_mutex_unlock(&loop_metrics->lock);
972 idle_time += uv_hrtime() - entry_time;