2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_feature_status_libuv(struct lws_context_creation_info *info)
27 if (lws_check_opt(info->options, LWS_SERVER_OPTION_LIBUV))
28 lwsl_notice("libuv support compiled in and enabled\n");
30 lwsl_notice("libuv support compiled in but disabled\n");
34 lws_uv_idle(uv_idle_t *handle
35 #if UV_VERSION_MAJOR == 0
40 struct lws_context_per_thread *pt = lws_container_of(handle,
41 struct lws_context_per_thread, uv_idle);
43 lwsl_debug("%s\n", __func__);
46 * is there anybody with pending stuff that needs service forcing?
48 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
49 /* -1 timeout means just do forced service */
50 _lws_plat_service_tsi(pt->context, -1, pt->tid);
51 /* still somebody left who wants forced service? */
52 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
53 /* yes... come back again later */
54 lwsl_debug("%s: done again\n", __func__);
58 /* there is nobody who needs service forcing, shut down idle */
61 lwsl_debug("%s: done stop\n", __func__);
65 lws_io_cb(uv_poll_t *watcher, int status, int revents)
67 struct lws_io_watcher *lws_io = lws_container_of(watcher,
68 struct lws_io_watcher, uv_watcher);
69 struct lws *wsi = lws_container_of(lws_io, struct lws, w_read);
70 struct lws_context *context = wsi->context;
71 struct lws_pollfd eventfd;
73 #if defined(WIN32) || defined(_WIN32)
74 eventfd.fd = watcher->socket;
76 eventfd.fd = watcher->io_watcher.fd;
82 /* at this point status will be an UV error, like UV_EBADF,
83 we treat all errors as LWS_POLLHUP */
85 /* you might want to return; instead of servicing the fd in some cases */
86 if (status == UV_EAGAIN)
89 eventfd.events |= LWS_POLLHUP;
90 eventfd.revents |= LWS_POLLHUP;
92 if (revents & UV_READABLE) {
93 eventfd.events |= LWS_POLLIN;
94 eventfd.revents |= LWS_POLLIN;
96 if (revents & UV_WRITABLE) {
97 eventfd.events |= LWS_POLLOUT;
98 eventfd.revents |= LWS_POLLOUT;
101 lws_service_fd(context, &eventfd);
103 uv_idle_start(&context->pt[(int)wsi->tsi].uv_idle, lws_uv_idle);
107 lws_uv_sigint_cb(uv_signal_t *watcher, int signum)
109 lwsl_err("internal signal handler caught signal %d\n", signum);
110 lws_libuv_stop(watcher->data);
114 lws_uv_sigint_cfg(struct lws_context *context, int use_uv_sigint,
117 context->use_ev_sigint = use_uv_sigint;
119 context->lws_uv_sigint_cb = cb;
121 context->lws_uv_sigint_cb = &lws_uv_sigint_cb;
127 lws_uv_timeout_cb(uv_timer_t *timer
128 #if UV_VERSION_MAJOR == 0
133 struct lws_context_per_thread *pt = lws_container_of(timer,
134 struct lws_context_per_thread, uv_timeout_watcher);
136 if (pt->context->requested_kill)
139 lwsl_debug("%s\n", __func__);
141 lws_service_fd_tsi(pt->context, NULL, pt->tid);
144 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
147 lws_uv_initvhost(struct lws_vhost* vh, struct lws* wsi)
149 struct lws_context_per_thread *pt;
152 if (!LWS_LIBUV_ENABLED(vh->context))
158 if (wsi->w_read.context)
161 pt = &vh->context->pt[(int)wsi->tsi];
165 wsi->w_read.context = vh->context;
166 n = uv_poll_init_socket(pt->io_loop_uv,
167 &wsi->w_read.uv_watcher, wsi->sock);
169 lwsl_err("uv_poll_init failed %d, sockfd=%p\n",
170 n, (void *)(long)wsi->sock);
174 lws_libuv_io(wsi, LWS_EV_START | LWS_EV_READ);
180 * This needs to be called after vhosts have been defined.
182 * If later, after server start, another vhost is added, this must be
183 * called again to bind the vhost
187 lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
189 struct lws_context_per_thread *pt = &context->pt[tsi];
190 struct lws_vhost *vh = context->vhost_list;
191 int status = 0, n, ns, first = 1;
193 if (!pt->io_loop_uv) {
195 loop = lws_malloc(sizeof(*loop));
200 #if UV_VERSION_MAJOR > 0
203 lwsl_err("This libuv is too old to work...\n");
206 pt->ev_loop_foreign = 0;
208 lwsl_notice(" Using foreign event loop...\n");
209 pt->ev_loop_foreign = 1;
212 pt->io_loop_uv = loop;
213 uv_idle_init(loop, &pt->uv_idle);
215 ns = ARRAY_SIZE(sigs);
216 if (lws_check_opt(context->options,
217 LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
220 if (pt->context->use_ev_sigint) {
221 assert(ns <= ARRAY_SIZE(pt->signals));
222 for (n = 0; n < ns; n++) {
223 uv_signal_init(loop, &pt->signals[n]);
224 pt->signals[n].data = pt->context;
225 uv_signal_start(&pt->signals[n],
226 context->lws_uv_sigint_cb, sigs[n]);
233 * Initialize the accept wsi read watcher with all the listening sockets
234 * and register a callback for read operations
236 * We have to do it here because the uv loop(s) are not
237 * initialized until after context creation.
240 if (lws_uv_initvhost(vh, vh->lserv_wsi) == -1)
246 uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
247 uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb,
254 static void lws_uv_close_cb(uv_handle_t *handle)
256 //lwsl_err("%s: handle %p\n", __func__, handle);
259 static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
261 uv_close(handle, lws_uv_close_cb);
265 lws_libuv_destroyloop(struct lws_context *context, int tsi)
267 struct lws_context_per_thread *pt = &context->pt[tsi];
268 // struct lws_context *ctx;
269 int m, budget = 100, ns;
271 if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
277 lwsl_notice("%s: closing signals + timers context %p\n", __func__, context);
279 if (context->use_ev_sigint) {
280 uv_signal_stop(&pt->w_sigint.uv_watcher);
282 ns = ARRAY_SIZE(sigs);
283 if (lws_check_opt(context->options, LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
286 for (m = 0; m < ns; m++) {
287 uv_signal_stop(&pt->signals[m]);
288 uv_close((uv_handle_t *)&pt->signals[m], lws_uv_close_cb);
292 uv_timer_stop(&pt->uv_timeout_watcher);
293 uv_close((uv_handle_t *)&pt->uv_timeout_watcher, lws_uv_close_cb);
295 uv_idle_stop(&pt->uv_idle);
296 uv_close((uv_handle_t *)&pt->uv_idle, lws_uv_close_cb);
298 if (pt->ev_loop_foreign)
301 while (budget-- && uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
304 lwsl_notice("%s: closing all loop handles context %p\n", __func__, context);
306 uv_stop(pt->io_loop_uv);
308 uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL);
310 while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
312 #if UV_VERSION_MAJOR > 0
313 m = uv_loop_close(pt->io_loop_uv);
315 lwsl_err("%s: uv_loop_close: UV_EBUSY\n", __func__);
317 lws_free(pt->io_loop_uv);
321 lws_libuv_accept(struct lws *wsi, lws_sockfd_type accept_fd)
323 struct lws_context *context = lws_get_context(wsi);
324 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
326 if (!LWS_LIBUV_ENABLED(context))
329 lwsl_debug("%s: new wsi %p\n", __func__, wsi);
331 wsi->w_read.context = context;
333 uv_poll_init_socket(pt->io_loop_uv, &wsi->w_read.uv_watcher, accept_fd);
337 lws_libuv_io(struct lws *wsi, int flags)
339 struct lws_context *context = lws_get_context(wsi);
340 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
341 #if defined(WIN32) || defined(_WIN32)
342 int current_events = wsi->w_read.uv_watcher.events &
343 (UV_READABLE | UV_WRITABLE);
345 int current_events = wsi->w_read.uv_watcher.io_watcher.pevents &
346 (UV_READABLE | UV_WRITABLE);
348 struct lws_io_watcher *w = &wsi->w_read;
350 if (!LWS_LIBUV_ENABLED(context))
353 // lwsl_notice("%s: wsi: %p, flags:0x%x\n", __func__, wsi, flags);
355 // w->context is set after the loop is initialized
357 if (!pt->io_loop_uv || !w->context) {
358 lwsl_info("%s: no io loop yet\n", __func__);
362 if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
363 (flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
364 lwsl_err("%s: assert: flags %d", __func__, flags);
368 if (flags & LWS_EV_START) {
369 if (flags & LWS_EV_WRITE)
370 current_events |= UV_WRITABLE;
372 if (flags & LWS_EV_READ)
373 current_events |= UV_READABLE;
375 uv_poll_start(&w->uv_watcher, current_events, lws_io_cb);
377 if (flags & LWS_EV_WRITE)
378 current_events &= ~UV_WRITABLE;
380 if (flags & LWS_EV_READ)
381 current_events &= ~UV_READABLE;
383 if (!(current_events & (UV_READABLE | UV_WRITABLE)))
384 uv_poll_stop(&w->uv_watcher);
386 uv_poll_start(&w->uv_watcher, current_events,
392 lws_libuv_init_fd_table(struct lws_context *context)
396 if (!LWS_LIBUV_ENABLED(context))
399 for (n = 0; n < context->count_threads; n++)
400 context->pt[n].w_sigint.context = context;
406 lws_libuv_run(const struct lws_context *context, int tsi)
408 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
409 uv_run(context->pt[tsi].io_loop_uv, 0);
413 lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
415 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
416 uv_stop(context->pt[tsi].io_loop_uv);
420 lws_libuv_kill(const struct lws_context *context)
424 lwsl_notice("%s\n", __func__);
426 for (n = 0; n < context->count_threads; n++)
427 if (context->pt[n].io_loop_uv &&
428 LWS_LIBUV_ENABLED(context) )//&&
429 //!context->pt[n].ev_loop_foreign)
430 uv_stop(context->pt[n].io_loop_uv);
434 * This does not actually stop the event loop. The reason is we have to pass
435 * libuv handle closures through its event loop. So this tries to close all
436 * wsi, and set a flag; when all the wsi closures are finalized then we
437 * actually stop the libuv event loops.
441 lws_libuv_stop(struct lws_context *context)
443 struct lws_context_per_thread *pt;
446 if (context->requested_kill)
449 context->requested_kill = 1;
451 m = context->count_threads;
452 context->being_destroyed = 1;
455 pt = &context->pt[m];
457 for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
458 struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
462 lws_close_free_wsi(wsi,
463 LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
464 /* no protocol close */);
469 lwsl_info("%s: feels everything closed\n", __func__);
470 if (context->count_wsi_allocated == 0)
471 lws_libuv_kill(context);
474 LWS_VISIBLE uv_loop_t *
475 lws_uv_getloop(struct lws_context *context, int tsi)
477 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
478 return context->pt[tsi].io_loop_uv;
484 lws_libuv_closewsi(uv_handle_t* handle)
486 struct lws *n = NULL, *wsi = (struct lws *)(((char *)handle) -
487 (char *)(&n->w_read.uv_watcher));
488 struct lws_context *context = lws_get_context(wsi);
491 if (wsi->mode == LWSCM_SERVER_LISTENER &&
492 wsi->context->deprecated) {
494 context->deprecation_pending_listen_close_count--;
495 if (!context->deprecation_pending_listen_close_count)
499 lws_close_free_wsi_final(wsi);
501 if (lspd == 2 && context->deprecation_cb) {
502 lwsl_notice("calling deprecation callback\n");
503 context->deprecation_cb();
506 //lwsl_notice("%s: ctx %p: wsi left %d\n", __func__, context, context->count_wsi_allocated);
508 if (context->requested_kill && context->count_wsi_allocated == 0)
509 lws_libuv_kill(context);
513 lws_libuv_closehandle(struct lws *wsi)
515 struct lws_context *context = lws_get_context(wsi);
517 /* required to defer actual deletion until libuv has processed it */
519 uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi);
521 if (context->requested_kill && context->count_wsi_allocated == 0)
522 lws_libuv_kill(context);
525 #if defined(LWS_WITH_PLUGINS) && (UV_VERSION_MAJOR > 0)
528 lws_plat_plugins_init(struct lws_context *context, const char * const *d)
530 struct lws_plugin_capability lcaps;
531 struct lws_plugin *plugin;
532 lws_plugin_init_func initfunc;
546 lwsl_notice(" Plugins:\n");
550 lwsl_notice(" Scanning %s\n", *d);
551 m =uv_fs_scandir(&loop, &req, *d, 0, NULL);
553 lwsl_err("Scandir on %s failed\n", *d);
557 while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
558 if (strlen(dent.name) < 7)
561 lwsl_notice(" %s\n", dent.name);
563 lws_snprintf(path, sizeof(path) - 1, "%s/%s", *d, dent.name);
564 if (uv_dlopen(path, &lib)) {
566 lwsl_err("Error loading DSO: %s\n", lib.errmsg);
569 /* we could open it, can we get his init function? */
571 m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
572 dent.name + 3 /* snip lib... */);
573 path[m - 3] = '\0'; /* snip the .so */
575 m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
577 path[m - 4] = '\0'; /* snip the .dll */
579 if (uv_dlsym(&lib, path, &v)) {
581 lwsl_err("Failed to get init on %s: %s",
582 dent.name, lib.errmsg);
585 initfunc = (lws_plugin_init_func)v;
586 lcaps.api_magic = LWS_PLUGIN_API_MAGIC;
587 m = initfunc(context, &lcaps);
589 lwsl_err("Initializing %s failed %d\n", dent.name, m);
593 plugin = lws_malloc(sizeof(*plugin));
598 plugin->list = context->plugin_list;
599 context->plugin_list = plugin;
600 strncpy(plugin->name, dent.name, sizeof(plugin->name) - 1);
601 plugin->name[sizeof(plugin->name) - 1] = '\0';
603 plugin->caps = lcaps;
604 context->plugin_protocol_count += lcaps.count_protocols;
605 context->plugin_extension_count += lcaps.count_extensions;
613 uv_fs_req_cleanup(&req);
617 uv_run(&loop, UV_RUN_NOWAIT);
618 uv_loop_close(&loop);
625 lws_plat_plugins_destroy(struct lws_context *context)
627 struct lws_plugin *plugin = context->plugin_list, *p;
628 lws_plugin_destroy_func func;
636 // lwsl_notice("%s\n", __func__);
641 m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + 3);
644 m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name);
648 if (uv_dlsym(&plugin->lib, path, &v)) {
649 uv_dlerror(&plugin->lib);
650 lwsl_err("Failed to get init on %s: %s",
651 plugin->name, plugin->lib.errmsg);
653 func = (lws_plugin_destroy_func)v;
656 lwsl_err("Destroying %s failed %d\n",
666 context->plugin_list = NULL;