2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 _lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
27 struct lws_context_per_thread *pt;
28 struct lws_context *context;
29 int ret = 0, pa_events = 1;
30 struct lws_pollfd *pfd;
33 if (!wsi || wsi->position_in_fds_table < 0)
36 if (wsi->handling_pollout && !_and && _or == LWS_POLLOUT) {
38 * Happening alongside service thread handling POLLOUT.
39 * The danger is when he is finished, he will disable POLLOUT,
40 * countermanding what we changed here.
42 * Instead of changing the fds, inform the service thread
43 * what happened, and ask it to leave POLLOUT active on exit
45 wsi->leave_pollout_active = 1;
47 * by definition service thread is not in poll wait, so no need
51 lwsl_debug("%s: using leave_pollout_active\n", __func__);
56 context = wsi->context;
57 pt = &context->pt[(int)wsi->tsi];
58 assert(wsi->position_in_fds_table >= 0 &&
59 wsi->position_in_fds_table < pt->fds_count);
61 pfd = &pt->fds[wsi->position_in_fds_table];
62 pa->fd = wsi->desc.sockfd;
63 pa->prev_events = pfd->events;
64 pa->events = pfd->events = (pfd->events & ~_and) | _or;
66 //lwsl_notice("%s: wsi %p, posin %d. from %d -> %d\n", __func__, wsi, wsi->position_in_fds_table, pa->prev_events, pa->events);
69 if (wsi->http2_substream)
72 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_CHANGE_MODE_POLL_FD,
73 wsi->user_space, (void *)pa, 0)) {
78 if (_and & LWS_POLLIN) {
79 lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_READ);
80 lws_libuv_io(wsi, LWS_EV_STOP | LWS_EV_READ);
81 lws_libevent_io(wsi, LWS_EV_STOP | LWS_EV_READ);
83 if (_or & LWS_POLLIN) {
84 lws_libev_io(wsi, LWS_EV_START | LWS_EV_READ);
85 lws_libuv_io(wsi, LWS_EV_START | LWS_EV_READ);
86 lws_libevent_io(wsi, LWS_EV_START | LWS_EV_READ);
88 if (_and & LWS_POLLOUT) {
89 lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
90 lws_libuv_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
91 lws_libevent_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
93 if (_or & LWS_POLLOUT) {
94 lws_libev_io(wsi, LWS_EV_START | LWS_EV_WRITE);
95 lws_libuv_io(wsi, LWS_EV_START | LWS_EV_WRITE);
96 lws_libevent_io(wsi, LWS_EV_START | LWS_EV_WRITE);
100 * if we changed something in this pollfd...
101 * ... and we're running in a different thread context
102 * than the service thread...
103 * ... and the service thread is waiting ...
104 * then cancel it to force a restart with our changed events
107 pa_events = pa->prev_events != pa->events;
112 if (lws_plat_change_pollfd(context, wsi, pfd)) {
113 lwsl_info("%s failed\n", __func__);
118 sampled_tid = context->service_tid;
120 tid = wsi->vhost->protocols[0].callback(wsi,
121 LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
126 if (tid != sampled_tid)
127 lws_cancel_service_pt(wsi);
134 #ifndef LWS_NO_SERVER
136 lws_accept_modulation(struct lws_context_per_thread *pt, int allow)
138 // multithread listen seems broken
140 struct lws_vhost *vh = context->vhost_list;
141 struct lws_pollargs pa1;
145 _lws_change_pollfd(pt->wsi_listening,
146 0, LWS_POLLIN, &pa1);
148 _lws_change_pollfd(pt->wsi_listening,
149 LWS_POLLIN, 0, &pa1);
157 insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
159 struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
160 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
164 lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n",
165 __func__, wsi, wsi->tsi, wsi->desc.sockfd, pt->fds_count);
167 if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
168 lwsl_err("Too many fds (%d vs %d)\n", context->max_fds,
169 context->fd_limit_per_thread );
173 #if !defined(_WIN32) && !defined(LWS_WITH_ESP8266)
174 if (wsi->desc.sockfd >= context->max_fds) {
175 lwsl_err("Socket fd %d is too high (%d)\n",
176 wsi->desc.sockfd, context->max_fds);
183 assert(lws_socket_is_valid(wsi->desc.sockfd));
185 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
186 wsi->user_space, (void *) &pa, 1))
191 insert_wsi(context, wsi);
192 #if defined(LWS_WITH_ESP8266)
193 if (wsi->position_in_fds_table == -1)
195 wsi->position_in_fds_table = pt->fds_count;
197 // lwsl_notice("%s: %p: setting posinfds %d\n", __func__, wsi, wsi->position_in_fds_table);
199 pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
201 pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
203 pt->fds[wsi->position_in_fds_table].events = 0; // LWS_POLLIN;
205 pa.events = pt->fds[pt->fds_count].events;
207 lws_plat_insert_socket_into_fds(context, wsi);
209 /* external POLL support via protocol 0 */
210 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
211 wsi->user_space, (void *) &pa, 0))
213 #ifndef LWS_NO_SERVER
214 /* if no more room, defeat accepts on this thread */
215 if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
216 lws_accept_modulation(pt, 0);
220 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
221 wsi->user_space, (void *)&pa, 1))
228 remove_wsi_socket_from_fds(struct lws *wsi)
230 struct lws_context *context = wsi->context;
231 struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
232 #if !defined(LWS_WITH_ESP8266)
233 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
239 #if !defined(_WIN32) && !defined(LWS_WITH_ESP8266)
240 if (wsi->desc.sockfd > context->max_fds) {
241 lwsl_err("fd %d too high (%d)\n", wsi->desc.sockfd, context->max_fds);
246 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
247 wsi->user_space, (void *)&pa, 1))
250 lws_same_vh_protocol_remove(wsi);
252 /* the guy who is to be deleted's slot index in pt->fds */
253 m = wsi->position_in_fds_table;
255 #if !defined(LWS_WITH_ESP8266)
256 lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE | LWS_EV_PREPARE_DELETION);
257 lws_libuv_io(wsi, LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE | LWS_EV_PREPARE_DELETION);
261 lwsl_debug("%s: wsi=%p, sock=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
262 __func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
263 pt->fds_count, pt->fds[pt->fds_count].fd);
265 /* have the last guy take up the now vacant slot */
266 pt->fds[m] = pt->fds[pt->fds_count - 1];
268 /* this decrements pt->fds_count */
269 lws_plat_delete_socket_from_fds(context, wsi, m);
270 #if !defined(LWS_WITH_ESP8266)
271 v = (int) pt->fds[m].fd;
272 /* end guy's "position in fds table" is now the deletion guy's old one */
273 end_wsi = wsi_from_fd(context, v);
275 lwsl_err("no wsi found for sock fd %d at pos %d, pt->fds_count=%d\n", (int)pt->fds[m].fd, m, pt->fds_count);
278 end_wsi->position_in_fds_table = m;
280 /* deletion guy's lws_lookup entry needs nuking */
281 delete_from_fd(context, wsi->desc.sockfd);
282 /* removed wsi has no position any more */
283 wsi->position_in_fds_table = -1;
285 /* remove also from external POLL support via protocol 0 */
286 if (lws_socket_is_valid(wsi->desc.sockfd))
287 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
288 wsi->user_space, (void *) &pa, 0))
290 #ifndef LWS_NO_SERVER
291 if (!context->being_destroyed)
292 /* if this made some room, accept connects on this thread */
293 if ((unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
294 lws_accept_modulation(pt, 1);
298 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
299 wsi->user_space, (void *) &pa, 1))
306 lws_change_pollfd(struct lws *wsi, int _and, int _or)
308 struct lws_context_per_thread *pt;
309 struct lws_context *context;
310 struct lws_pollargs pa;
313 if (!wsi || !wsi->protocol || wsi->position_in_fds_table < 0)
316 context = lws_get_context(wsi);
320 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
321 wsi->user_space, (void *) &pa, 0))
324 pt = &context->pt[(int)wsi->tsi];
327 ret = _lws_change_pollfd(wsi, _and, _or, &pa);
329 if (wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
330 wsi->user_space, (void *) &pa, 0))
337 lws_callback_on_writable(struct lws *wsi)
339 struct lws_context_per_thread *pt;
341 struct lws *network_wsi, *wsi2;
345 if (wsi->state == LWSS_SHUTDOWN)
348 if (wsi->socket_is_permanently_unusable)
351 pt = &wsi->context->pt[(int)wsi->tsi];
352 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB_REQ, 1);
353 #if defined(LWS_WITH_STATS)
354 if (!wsi->active_writable_req_us) {
355 wsi->active_writable_req_us = time_in_microseconds();
356 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
361 lwsl_info("%s: %p\n", __func__, wsi);
363 if (wsi->mode != LWSCM_HTTP2_SERVING)
366 if (wsi->u.http2.requested_POLLOUT) {
367 lwsl_info("already pending writable\n");
371 if (wsi->u.http2.tx_credit <= 0) {
373 * other side is not able to cope with us sending
374 * anything so no matter if we have POLLOUT on our side.
376 * Delay waiting for our POLLOUT until peer indicates he has
377 * space for more using tx window command in http2 layer
379 lwsl_info("%s: %p: waiting_tx_credit (%d)\n", __func__, wsi,
380 wsi->u.http2.tx_credit);
381 wsi->u.http2.waiting_tx_credit = 1;
385 network_wsi = lws_http2_get_network_wsi(wsi);
386 already = network_wsi->u.http2.requested_POLLOUT;
388 /* mark everybody above him as requesting pollout */
392 wsi2->u.http2.requested_POLLOUT = 1;
393 lwsl_info("mark %p pending writable\n", wsi2);
394 wsi2 = wsi2->u.http2.parent_wsi;
397 /* for network action, act only on the network wsi */
405 if (lws_ext_cb_active(wsi, LWS_EXT_CB_REQUEST_ON_WRITEABLE, NULL, 0))
408 if (wsi->position_in_fds_table < 0) {
409 lwsl_err("%s: failed to find socket %d\n", __func__, wsi->desc.sockfd);
413 if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
420 * stitch protocol choice into the vh protocol linked list
421 * We always insert ourselves at the start of the list
426 * Illegal to attach more than once without detach inbetween
429 lws_same_vh_protocol_insert(struct lws *wsi, int n)
431 //lwsl_err("%s: pre insert vhost start wsi %p, that wsi prev == %p\n",
433 // wsi->vhost->same_vh_protocol_list[n],
434 // wsi->same_vh_protocol_prev);
436 if (wsi->same_vh_protocol_prev || wsi->same_vh_protocol_next) {
437 lwsl_err("Attempted to attach wsi twice to same vh prot\n");
441 wsi->same_vh_protocol_prev = /* guy who points to us */
442 &wsi->vhost->same_vh_protocol_list[n];
443 wsi->same_vh_protocol_next = /* old first guy is our next */
444 wsi->vhost->same_vh_protocol_list[n];
445 /* we become the new first guy */
446 wsi->vhost->same_vh_protocol_list[n] = wsi;
448 if (wsi->same_vh_protocol_next)
449 /* old first guy points back to us now */
450 wsi->same_vh_protocol_next->same_vh_protocol_prev =
451 &wsi->same_vh_protocol_next;
455 lws_same_vh_protocol_remove(struct lws *wsi)
458 * detach ourselves from vh protocol list if we're on one
460 * A -> C , or, B -> C, or A -> B
462 * OK to call on already-detached wsi
464 lwsl_info("%s: removing same prot wsi %p\n", __func__, wsi);
466 if (wsi->same_vh_protocol_prev) {
467 assert (*(wsi->same_vh_protocol_prev) == wsi);
468 lwsl_info("have prev %p, setting him to our next %p\n",
469 wsi->same_vh_protocol_prev,
470 wsi->same_vh_protocol_next);
472 /* guy who pointed to us should point to our next */
473 *(wsi->same_vh_protocol_prev) = wsi->same_vh_protocol_next;
476 /* our next should point back to our prev */
477 if (wsi->same_vh_protocol_next) {
478 wsi->same_vh_protocol_next->same_vh_protocol_prev =
479 wsi->same_vh_protocol_prev;
482 wsi->same_vh_protocol_prev = NULL;
483 wsi->same_vh_protocol_next = NULL;
488 lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
489 const struct lws_protocols *protocol)
493 if (protocol < vhost->protocols ||
494 protocol >= (vhost->protocols + vhost->count_protocols)) {
496 lwsl_err("%s: protocol %p is not from vhost %p (%p - %p)\n",
497 __func__, protocol, vhost->protocols, vhost,
498 (vhost->protocols + vhost->count_protocols));
503 wsi = vhost->same_vh_protocol_list[protocol - vhost->protocols];
504 //lwsl_notice("%s: protocol %p, start wsi %p\n", __func__, protocol, wsi);
506 //lwsl_notice("%s: protocol %p, this wsi %p (wsi->protocol=%p)\n",
507 // __func__, protocol, wsi, wsi->protocol);
508 assert(wsi->protocol == protocol);
509 assert(*wsi->same_vh_protocol_prev == wsi);
510 if (wsi->same_vh_protocol_next) {
511 // lwsl_err("my next says %p\n", wsi->same_vh_protocol_next);
512 // lwsl_err("my next's prev says %p\n",
513 // wsi->same_vh_protocol_next->same_vh_protocol_prev);
514 assert(wsi->same_vh_protocol_next->same_vh_protocol_prev == &wsi->same_vh_protocol_next);
516 //lwsl_notice(" apv: %p\n", wsi);
517 lws_callback_on_writable(wsi);
518 wsi = wsi->same_vh_protocol_next;
525 lws_callback_on_writable_all_protocol(const struct lws_context *context,
526 const struct lws_protocols *protocol)
528 struct lws_vhost *vhost = context->vhost_list;
532 for (n = 0; n < vhost->count_protocols; n++)
533 if (protocol->callback ==
534 vhost->protocols[n].callback &&
535 !strcmp(protocol->name, vhost->protocols[n].name))
537 if (n != vhost->count_protocols)
538 lws_callback_on_writable_all_protocol_vhost(
539 vhost, &vhost->protocols[n]);
541 vhost = vhost->vhost_next;