2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_feature_status_libuv(struct lws_context_creation_info *info)
27 if (lws_check_opt(info->options, LWS_SERVER_OPTION_LIBUV))
28 lwsl_notice("libuv support compiled in and enabled\n");
30 lwsl_notice("libuv support compiled in but disabled\n");
34 lws_uv_idle(uv_idle_t *handle
35 #if UV_VERSION_MAJOR == 0
40 struct lws_context_per_thread *pt = lws_container_of(handle,
41 struct lws_context_per_thread, uv_idle);
43 lwsl_debug("%s\n", __func__);
46 * is there anybody with pending stuff that needs service forcing?
48 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
49 /* -1 timeout means just do forced service */
50 lws_plat_service_tsi(pt->context, -1, pt->tid);
51 /* still somebody left who wants forced service? */
52 if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
53 /* yes... come back again later */
54 lwsl_debug("%s: done again\n", __func__);
58 /* there is nobody who needs service forcing, shut down idle */
61 lwsl_debug("%s: done stop\n", __func__);
65 lws_io_cb(uv_poll_t *watcher, int status, int revents)
67 struct lws_io_watcher *lws_io = lws_container_of(watcher,
68 struct lws_io_watcher, uv_watcher);
69 struct lws *wsi = lws_container_of(lws_io, struct lws, w_read);
70 struct lws_context *context = lws_io->context;
71 struct lws_pollfd eventfd;
73 #if defined(WIN32) || defined(_WIN32)
74 eventfd.fd = watcher->socket;
76 eventfd.fd = watcher->io_watcher.fd;
82 /* at this point status will be an UV error, like UV_EBADF,
83 we treat all errors as LWS_POLLHUP */
85 /* you might want to return; instead of servicing the fd in some cases */
86 if (status == UV_EAGAIN)
89 eventfd.events |= LWS_POLLHUP;
90 eventfd.revents |= LWS_POLLHUP;
92 if (revents & UV_READABLE) {
93 eventfd.events |= LWS_POLLIN;
94 eventfd.revents |= LWS_POLLIN;
96 if (revents & UV_WRITABLE) {
97 eventfd.events |= LWS_POLLOUT;
98 eventfd.revents |= LWS_POLLOUT;
101 lws_service_fd(context, &eventfd);
103 uv_idle_start(&context->pt[(int)wsi->tsi].uv_idle, lws_uv_idle);
107 lws_uv_sigint_cb(uv_signal_t *watcher, int signum)
109 lwsl_info("internal signal handler caught signal %d\n", signum);
110 lws_libuv_stop(watcher->data);
114 lws_uv_sigint_cfg(struct lws_context *context, int use_uv_sigint,
117 context->use_ev_sigint = use_uv_sigint;
119 context->lws_uv_sigint_cb = cb;
121 context->lws_uv_sigint_cb = &lws_uv_sigint_cb;
127 lws_uv_timeout_cb(uv_timer_t *timer
128 #if UV_VERSION_MAJOR == 0
133 struct lws_context_per_thread *pt = lws_container_of(timer,
134 struct lws_context_per_thread, uv_timeout_watcher);
136 lwsl_debug("%s\n", __func__);
138 lws_service_fd_tsi(pt->context, NULL, pt->tid);
141 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE };
144 lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
146 struct lws_context_per_thread *pt = &context->pt[tsi];
147 struct lws_vhost *vh = context->vhost_list;
151 loop = lws_malloc(sizeof(*loop));
152 #if UV_VERSION_MAJOR > 0
155 lwsl_err("This libuv is too old to work...\n");
158 pt->ev_loop_foreign = 0;
160 pt->ev_loop_foreign = 1;
162 pt->io_loop_uv = loop;
163 uv_idle_init(loop, &pt->uv_idle);
165 if (pt->context->use_ev_sigint) {
166 assert(ARRAY_SIZE(sigs) <= ARRAY_SIZE(pt->signals));
167 for (n = 0; n < ARRAY_SIZE(sigs); n++) {
168 uv_signal_init(loop, &pt->signals[n]);
169 pt->signals[n].data = pt->context;
170 uv_signal_start(&pt->signals[n],
171 context->lws_uv_sigint_cb, sigs[n]);
176 * Initialize the accept wsi read watcher with all the listening sockets
177 * and register a callback for read operations
179 * We have to do it here because the uv loop(s) are not
180 * initialized until after context creation.
184 vh->lserv_wsi->w_read.context = context;
185 n = uv_poll_init_socket(pt->io_loop_uv,
186 &vh->lserv_wsi->w_read.uv_watcher,
187 vh->lserv_wsi->sock);
189 lwsl_err("uv_poll_init failed %d, sockfd=%p\n",
190 n, (void *)(long)vh->lserv_wsi->sock);
194 uv_poll_start(&vh->lserv_wsi->w_read.uv_watcher,
195 UV_READABLE, lws_io_cb);
200 uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
201 uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb, 1000, 1000);
206 void lws_uv_close_cb(uv_handle_t *handle)
211 void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
213 uv_close(handle, lws_uv_close_cb);
217 lws_libuv_destroyloop(struct lws_context *context, int tsi)
219 struct lws_context_per_thread *pt = &context->pt[tsi];
222 if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
228 if (context->use_ev_sigint)
229 uv_signal_stop(&pt->w_sigint.uv_watcher);
230 for (m = 0; m < ARRAY_SIZE(sigs); m++)
231 uv_signal_stop(&pt->signals[m]);
232 if (!pt->ev_loop_foreign) {
233 uv_stop(pt->io_loop_uv);
234 uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL);
235 while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT));
236 #if UV_VERSION_MAJOR > 0
237 m = uv_loop_close(pt->io_loop_uv);
239 lwsl_debug("%s: uv_loop_close: UV_EBUSY\n", __func__);
241 lws_free(pt->io_loop_uv);
246 lws_libuv_accept(struct lws *wsi, int accept_fd)
248 struct lws_context *context = lws_get_context(wsi);
249 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
251 if (!LWS_LIBUV_ENABLED(context))
254 lwsl_debug("%s: new wsi %p\n", __func__, wsi);
256 wsi->w_read.context = context;
258 uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher, accept_fd);
262 lws_libuv_io(struct lws *wsi, int flags)
264 struct lws_context *context = lws_get_context(wsi);
265 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
266 #if defined(WIN32) || defined(_WIN32)
267 int current_events = wsi->w_read.uv_watcher.events &
268 (UV_READABLE | UV_WRITABLE);
270 int current_events = wsi->w_read.uv_watcher.io_watcher.pevents &
271 (UV_READABLE | UV_WRITABLE);
273 struct lws_io_watcher *w = &wsi->w_read;
275 if (!LWS_LIBUV_ENABLED(context))
278 lwsl_debug("%s: wsi: %p, flags:0x%x\n", __func__, wsi, flags);
280 if (!pt->io_loop_uv) {
281 lwsl_info("%s: no io loop yet\n", __func__);
285 assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
286 (flags & (LWS_EV_READ | LWS_EV_WRITE)));
288 if (flags & LWS_EV_START) {
289 if (flags & LWS_EV_WRITE)
290 current_events |= UV_WRITABLE;
292 if (flags & LWS_EV_READ)
293 current_events |= UV_READABLE;
295 uv_poll_start(&w->uv_watcher, current_events, lws_io_cb);
297 if (flags & LWS_EV_WRITE)
298 current_events &= ~UV_WRITABLE;
300 if (flags & LWS_EV_READ)
301 current_events &= ~UV_READABLE;
303 if (!(current_events & (UV_READABLE | UV_WRITABLE)))
304 uv_poll_stop(&w->uv_watcher);
306 uv_poll_start(&w->uv_watcher, current_events,
312 lws_libuv_init_fd_table(struct lws_context *context)
316 if (!LWS_LIBUV_ENABLED(context))
319 for (n = 0; n < context->count_threads; n++)
320 context->pt[n].w_sigint.context = context;
326 lws_libuv_run(const struct lws_context *context, int tsi)
328 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
329 uv_run(context->pt[tsi].io_loop_uv, 0);
333 lws_libuv_kill(const struct lws_context *context)
337 for (n = 0; n < context->count_threads; n++)
338 if (context->pt[n].io_loop_uv && LWS_LIBUV_ENABLED(context))
339 uv_stop(context->pt[n].io_loop_uv);
340 // TODO uv_stop check foreign loop? or not?
344 * This does not actually stop the event loop. The reason is we have to pass
345 * libuv handle closures through its event loop. So this tries to close all
346 * wsi, and set a flag; when all the wsi closures are finalized then we
347 * actually stop the libuv event loops.
351 lws_libuv_stop(struct lws_context *context)
353 struct lws_context_per_thread *pt;
356 context->requested_kill = 1;
358 m = context->count_threads;
359 context->being_destroyed = 1;
362 pt = &context->pt[m];
364 for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
365 struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
369 lws_close_free_wsi(wsi,
370 LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
371 /* no protocol close */);
376 lwsl_info("%s: feels everything closed\n", __func__);
377 if (context->count_wsi_allocated == 0)
378 lws_libuv_kill(context);
381 LWS_VISIBLE uv_loop_t *
382 lws_uv_getloop(struct lws_context *context, int tsi)
384 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
385 return context->pt[tsi].io_loop_uv;
391 lws_libuv_closewsi(uv_handle_t* handle)
393 struct lws *n = NULL, *wsi = (struct lws *)(((char *)handle) -
394 (char *)(&n->w_read.uv_watcher));
395 struct lws_context *context = lws_get_context(wsi);
397 lws_close_free_wsi_final(wsi);
399 if (context->requested_kill && context->count_wsi_allocated == 0)
400 lws_libuv_kill(context);
404 lws_libuv_closehandle(struct lws *wsi)
406 struct lws_context *context = lws_get_context(wsi);
408 /* required to defer actual deletion until libuv has processed it */
410 uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi);
412 if (context->requested_kill && context->count_wsi_allocated == 0)
413 lws_libuv_kill(context);
416 #if defined(LWS_WITH_PLUGINS) && (UV_VERSION_MAJOR > 0)
419 lws_plat_plugins_init(struct lws_context * context, const char *d)
421 struct lws_plugin_capability lcaps;
422 struct lws_plugin *plugin;
423 lws_plugin_init_func initfunc;
437 if (!uv_fs_scandir(&loop, &req, d, 0, NULL)) {
438 lwsl_err("Scandir on %s failed\n", d);
442 lwsl_notice(" Plugins:\n");
444 while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
445 if (strlen(dent.name) < 7)
448 lwsl_notice(" %s\n", dent.name);
450 snprintf(path, sizeof(path) - 1, "%s/%s", d, dent.name);
451 if (uv_dlopen(path, &lib)) {
453 lwsl_err("Error loading DSO: %s\n", lib.errmsg);
456 /* we could open it, can we get his init function? */
457 m = snprintf(path, sizeof(path) - 1, "init_%s",
458 dent.name + 3 /* snip lib... */);
459 path[m - 3] = '\0'; /* snip the .so */
460 if (uv_dlsym(&lib, path, &v)) {
462 lwsl_err("Failed to get init on %s: %s",
463 dent.name, lib.errmsg);
466 initfunc = (lws_plugin_init_func)v;
467 lcaps.api_magic = LWS_PLUGIN_API_MAGIC;
468 m = initfunc(context, &lcaps);
470 lwsl_err("Initializing %s failed %d\n", dent.name, m);
474 plugin = lws_malloc(sizeof(*plugin));
479 plugin->list = context->plugin_list;
480 context->plugin_list = plugin;
481 strncpy(plugin->name, dent.name, sizeof(plugin->name) - 1);
482 plugin->name[sizeof(plugin->name) - 1] = '\0';
484 plugin->caps = lcaps;
485 context->plugin_protocol_count += lcaps.count_protocols;
486 context->plugin_extension_count += lcaps.count_extensions;
495 uv_fs_req_cleanup(&req);
496 uv_loop_close(&loop);
503 lws_plat_plugins_destroy(struct lws_context * context)
505 struct lws_plugin *plugin = context->plugin_list, *p;
506 lws_plugin_destroy_func func;
514 lwsl_notice("%s\n", __func__);
518 m = snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + 3);
521 if (uv_dlsym(&plugin->lib, path, &v)) {
522 uv_dlerror(&plugin->lib);
523 lwsl_err("Failed to get init on %s: %s",
524 plugin->name, plugin->lib.errmsg);
526 func = (lws_plugin_destroy_func)v;
529 lwsl_err("Destroying %s failed %d\n",
539 context->plugin_list = NULL;