2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_feature_status_libuv(struct lws_context_creation_info *info)
27 if (info->options & LWS_SERVER_OPTION_LIBUV)
28 lwsl_notice("libuv support compiled in and enabled\n");
30 lwsl_notice("libuv support compiled in but disabled\n");
34 lws_io_cb(uv_poll_t *watcher, int status, int revents)
36 struct lws_io_watcher *lws_io = container_of(watcher,
37 struct lws_io_watcher, uv_watcher);
38 struct lws_context *context = lws_io->context;
39 struct lws_pollfd eventfd;
41 eventfd.fd = watcher->io_watcher.fd;
46 /* at this point status will be an UV error, like UV_EBADF,
47 we treat all errors as LWS_POLLHUP */
49 /* you might want to return; instead of servicing the fd in some cases */
50 if (status == UV_EAGAIN)
53 eventfd.events |= LWS_POLLHUP;
54 eventfd.revents |= LWS_POLLHUP;
56 if (revents & UV_READABLE) {
57 eventfd.events |= LWS_POLLIN;
58 eventfd.revents |= LWS_POLLIN;
60 if (revents & UV_WRITABLE) {
61 eventfd.events |= LWS_POLLOUT;
62 eventfd.revents |= LWS_POLLOUT;
65 lws_service_fd(context, &eventfd);
69 lws_uv_sigint_cb(uv_loop_t *loop, uv_signal_t *watcher, int signum)
71 lwsl_info("internal signal handler caught signal %d\n", signum);
72 lws_libuv_stop(watcher->data);
76 lws_uv_sigint_cfg(struct lws_context *context, int use_uv_sigint,
77 lws_uv_signal_cb_t *cb)
79 context->use_ev_sigint = use_uv_sigint;
81 context->lws_uv_sigint_cb = cb;
83 context->lws_uv_sigint_cb = &lws_uv_sigint_cb;
89 lws_uv_timeout_cb(uv_timer_t *timer)
91 struct lws_context_per_thread *pt = container_of(timer,
92 struct lws_context_per_thread, uv_timeout_watcher);
94 lwsl_debug("%s\n", __func__);
95 /* do timeout check only */
96 lws_service_fd_tsi(pt->context, NULL, pt->tid);
99 static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE };
101 struct lws_uv_sigint_ctx {
102 struct lws_context *context;
103 lws_uv_signal_cb_t *cb;
106 static void lws_uv_sigint_cb_wrapper(uv_signal_t *signal, int signum)
108 struct lws_uv_sigint_ctx *p = signal->data;
109 uv_signal_t signal_fwd = *signal;
110 signal_fwd.data = p->context;
111 p->cb(signal->loop, &signal_fwd, signum);
115 lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, uv_signal_cb cb,
118 struct lws_context_per_thread *pt = &context->pt[tsi];
119 struct lws *wsi = wsi_from_fd(context, pt->lserv_fd);
123 loop = lws_malloc(sizeof(*loop));
125 pt->ev_loop_foreign = 0;
127 pt->ev_loop_foreign = 1;
129 pt->io_loop_uv = loop;
131 if (pt->context->use_ev_sigint) {
132 assert(ARRAY_SIZE(sigs) <= ARRAY_SIZE(pt->signals));
133 for (n = 0; n < ARRAY_SIZE(sigs); n++) {
134 struct lws_uv_sigint_ctx *wrap_ctx = lws_malloc(sizeof(*wrap_ctx));
135 wrap_ctx->context = pt->context;
136 wrap_ctx->cb = context->lws_uv_sigint_cb;
137 uv_signal_init(loop, &pt->signals[n]);
138 pt->signals[n].data = wrap_ctx;
139 uv_signal_start(&pt->signals[n], lws_uv_sigint_cb_wrapper, sigs[n]);
144 * Initialize the accept wsi read watcher with the listening socket
145 * and register a callback for read operations
147 * We have to do it here because the uv loop(s) are not
148 * initialized until after context creation.
151 wsi->w_read.context = context;
152 uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher,
154 uv_poll_start(&wsi->w_read.uv_watcher, UV_READABLE,
158 uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
159 uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb, 1000, 1000);
164 void lws_uv_close_cb(uv_handle_t *handle)
169 void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
171 uv_close(handle, lws_uv_close_cb);
175 lws_libuv_destroyloop(struct lws_context *context, int tsi)
177 struct lws_context_per_thread *pt = &context->pt[tsi];
180 if (!(context->options & LWS_SERVER_OPTION_LIBUV))
186 if (context->use_ev_sigint)
187 uv_signal_stop(&pt->w_sigint.uv_watcher);
188 for (m = 0; m < ARRAY_SIZE(sigs); m++) {
189 uv_signal_stop(&pt->signals[m]);
190 lws_free(pt->signals[m].data);
192 if (!pt->ev_loop_foreign) {
193 uv_stop(pt->io_loop_uv);
194 uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL);
195 while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT));
196 m = uv_loop_close(pt->io_loop_uv);
198 lwsl_debug("%s: uv_loop_close: UV_EBUSY\n", __func__);
199 lws_free(pt->io_loop_uv);
204 lws_libuv_accept(struct lws *wsi, int accept_fd)
206 struct lws_context *context = lws_get_context(wsi);
207 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
209 if (!LWS_LIBUV_ENABLED(context))
212 lwsl_debug("%s: new wsi %p\n", __func__, wsi);
214 wsi->w_read.context = context;
216 uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher, accept_fd);
220 lws_libuv_io(struct lws *wsi, int flags)
222 struct lws_context *context = lws_get_context(wsi);
223 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
224 int current_events = wsi->w_read.uv_watcher.io_watcher.pevents &
225 (UV_READABLE | UV_WRITABLE);
226 struct lws_io_watcher *w = &wsi->w_read;
228 if (!LWS_LIBUV_ENABLED(context))
231 lwsl_debug("%s: wsi: %p, flags:%d\n", __func__, wsi, flags);
233 if (!pt->io_loop_uv) {
234 lwsl_info("%s: no io loop yet\n", __func__);
238 assert((flags & (LWS_EV_START | LWS_EV_STOP)) &&
239 (flags & (LWS_EV_READ | LWS_EV_WRITE)));
241 if (flags & LWS_EV_START) {
242 if (flags & LWS_EV_WRITE)
243 current_events |= UV_WRITABLE;
245 if (flags & LWS_EV_READ)
246 current_events |= UV_READABLE;
248 uv_poll_start(&w->uv_watcher, current_events, lws_io_cb);
250 if (flags & LWS_EV_WRITE)
251 current_events &= ~UV_WRITABLE;
253 if (flags & LWS_EV_READ)
254 current_events &= ~UV_READABLE;
256 if (!(current_events & (UV_READABLE | UV_WRITABLE)))
257 uv_poll_stop(&w->uv_watcher);
259 uv_poll_start(&w->uv_watcher, current_events,
265 lws_libuv_init_fd_table(struct lws_context *context)
269 if (!LWS_LIBUV_ENABLED(context))
272 for (n = 0; n < context->count_threads; n++) {
273 context->pt[n].w_sigint.context = context;
280 lws_libuv_run(const struct lws_context *context, int tsi)
282 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
283 uv_run(context->pt[tsi].io_loop_uv, 0);
287 lws_libuv_kill(const struct lws_context *context)
291 for (n = 0; n < context->count_threads; n++)
292 if (context->pt[n].io_loop_uv && LWS_LIBUV_ENABLED(context))
293 uv_stop(context->pt[n].io_loop_uv);
294 // TODO uv_stop check foreign loop? or not?
298 * This does not actually stop the event loop. The reason is we have to pass
299 * libuv handle closures through its event loop. So this tries to close all
300 * wsi, and set a flag; when all the wsi closures are finalized then we
301 * actually stop the libuv event loops.
305 lws_libuv_stop(struct lws_context *context)
307 struct lws_context_per_thread *pt;
310 context->requested_kill = 1;
312 m = context->count_threads;
313 context->being_destroyed = 1;
316 pt = &context->pt[m];
318 for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
319 struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
323 lws_close_free_wsi(wsi,
324 LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
325 /* no protocol close */);
330 if (context->count_wsi_allocated == 0)
331 lws_libuv_kill(context);
334 LWS_VISIBLE uv_loop_t *
335 lws_uv_getloop(struct lws_context *context, int tsi)
337 if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
338 return context->pt[tsi].io_loop_uv;
344 lws_libuv_closewsi(uv_handle_t* handle)
346 struct lws *n = NULL, *wsi = (struct lws *)(((void *)handle) -
347 (void *)(&n->w_read.uv_watcher));
348 struct lws_context *context = lws_get_context(wsi);
350 lws_close_free_wsi_final(wsi);
352 if (context->requested_kill && context->count_wsi_allocated == 0)
353 lws_libuv_kill(context);
357 lws_libuv_closehandle(struct lws *wsi)
359 struct lws_context *context = lws_get_context(wsi);
361 /* required to defer actual deletion until libuv has processed it */
363 uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi);
365 if (context->requested_kill && context->count_wsi_allocated == 0)
366 lws_libuv_kill(context);