2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_calllback_as_writeable(struct lws *wsi)
27 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
30 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB, 1);
31 #if defined(LWS_WITH_STATS)
33 uint64_t ul = time_in_microseconds() - wsi->active_writable_req_us;
35 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_WRITABLE_DELAY, ul);
36 lws_stats_atomic_max(wsi->context, pt, LWSSTATS_MS_WORST_WRITABLE_DELAY, ul);
37 wsi->active_writable_req_us = 0;
43 n = LWS_CALLBACK_RAW_WRITEABLE;
45 case LWSCM_RAW_FILEDESC:
46 n = LWS_CALLBACK_RAW_WRITEABLE_FILE;
49 n = LWS_CALLBACK_CLIENT_WRITEABLE;
51 case LWSCM_WSCL_ISSUE_HTTP_BODY:
52 n = LWS_CALLBACK_CLIENT_HTTP_WRITEABLE;
54 case LWSCM_WS_SERVING:
55 n = LWS_CALLBACK_SERVER_WRITEABLE;
58 n = LWS_CALLBACK_HTTP_WRITEABLE;
62 return user_callback_handle_rxflow(wsi->protocol->callback,
63 wsi, (enum lws_callback_reasons) n,
64 wsi->user_space, NULL, 0);
68 lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
70 int write_type = LWS_WRITE_PONG;
71 struct lws_tokens eff_buf;
77 // lwsl_err("%s: %p\n", __func__, wsi);
79 wsi->leave_pollout_active = 0;
80 wsi->handling_pollout = 1;
82 * if another thread wants POLLOUT on us, from here on while
83 * handling_pollout is set, he will only set leave_pollout_active.
84 * If we are going to disable POLLOUT, we will check that first.
88 * user callback is lowest priority to get these notifications
89 * actually, since other pending things cannot be disordered
92 /* Priority 1: pending truncated sends are incomplete ws fragments
93 * If anything else sent first the protocol would be
97 if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
98 wsi->trunc_len) < 0) {
99 lwsl_info("%s signalling to close\n", __func__);
102 /* leave POLLOUT active either way */
105 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
106 wsi->socket_is_permanently_unusable = 1;
107 goto bail_die; /* retry closing now */
110 if (wsi->mode == LWSCM_WSCL_ISSUE_HTTP_BODY)
115 /* Priority 2: protocol packets
118 lwsl_info("servicing pps %d\n", wsi->pps);
120 case LWS_PPS_HTTP2_MY_SETTINGS:
121 case LWS_PPS_HTTP2_ACK_SETTINGS:
122 lws_http2_do_pps_send(lws_get_context(wsi), wsi);
127 wsi->pps = LWS_PPS_NONE;
128 lws_rx_flow_control(wsi, 1);
130 goto bail_ok; /* leave POLLOUT active */
138 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
139 lwsl_info("failed at set pollfd\n");
142 goto user_service_go_again;
146 /* Priority 3: pending control packets (pong or close)
148 * 3a: close notification packet requested from close api
151 if (wsi->state == LWSS_WAITING_TO_SEND_CLOSE_NOTIFICATION) {
152 lwsl_debug("sending close packet\n");
153 wsi->waiting_to_send_close_frame = 0;
154 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
155 wsi->u.ws.close_in_ping_buffer_len,
158 wsi->state = LWSS_AWAITING_CLOSE_ACK;
159 lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 1);
160 lwsl_debug("sent close indication, awaiting ack\n");
168 /* else, the send failed and we should just hang up */
170 if ((wsi->state == LWSS_ESTABLISHED &&
171 wsi->u.ws.ping_pending_flag) ||
172 (wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
173 wsi->u.ws.payload_is_close)) {
175 if (wsi->u.ws.payload_is_close)
176 write_type = LWS_WRITE_CLOSE;
178 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
179 wsi->u.ws.ping_payload_len, write_type);
183 /* well he is sent, mark him done */
184 wsi->u.ws.ping_pending_flag = 0;
185 if (wsi->u.ws.payload_is_close)
186 /* oh... a close frame was it... then we are done */
189 /* otherwise for PING, leave POLLOUT active either way */
193 if (wsi->state == LWSS_ESTABLISHED &&
194 !wsi->socket_is_permanently_unusable &&
195 wsi->u.ws.send_check_ping) {
197 lwsl_info("issuing ping on wsi %p\n", wsi);
198 wsi->u.ws.send_check_ping = 0;
199 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
205 * we apparently were able to send the PING in a reasonable time
206 * now reset the clock on our peer to be able to send the
207 * PONG in a reasonable time.
210 lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_GET_PONG,
211 wsi->context->timeout_secs);
216 /* Priority 4: if we are closing, not allowed to send more data frags
217 * which means user callback or tx ext flush banned now
219 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
222 /* Priority 5: Tx path extension with more to send
224 * These are handled as new fragments each time around
225 * So while we must block new writeable callback to enforce
226 * payload ordering, but since they are always complete
227 * fragments control packets can interleave OK.
229 if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
230 lwsl_ext("SERVICING TX EXT DRAINING\n");
231 if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
233 /* leave POLLOUT active */
237 /* Priority 6: user can get the callback
239 m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
242 #ifndef LWS_NO_EXTENSIONS
243 if (!wsi->extension_data_pending)
247 * check in on the active extensions, see if they
248 * had pending stuff to spill... they need to get the
249 * first look-in otherwise sequence will be disordered
251 * NULL, zero-length eff_buf means just spill pending
255 if (wsi->mode == LWSCM_RAW || wsi->mode == LWSCM_RAW_FILEDESC)
259 /* default to nobody has more to spill */
262 eff_buf.token = NULL;
263 eff_buf.token_len = 0;
265 /* give every extension a chance to spill */
267 m = lws_ext_cb_active(wsi,
268 LWS_EXT_CB_PACKET_TX_PRESEND,
271 lwsl_err("ext reports fatal error\n");
276 * at least one extension told us he has more
277 * to spill, so we will go around again after
281 /* assuming they gave us something to send, send it */
283 if (eff_buf.token_len) {
284 n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
287 lwsl_info("closing from POLLOUT spill\n");
291 * Keep amount spilled small to minimize chance of this
293 if (n != eff_buf.token_len) {
294 lwsl_err("Unable to spill ext %d vs %d\n",
295 eff_buf.token_len, n);
301 /* no extension has more to spill */
307 * There's more to spill from an extension, but we just sent
308 * something... did that leave the pipe choked?
311 if (!lws_send_pipe_choked(wsi))
312 /* no we could add more */
315 lwsl_info("choked in POLLOUT service\n");
318 * Yes, he's choked. Leave the POLLOUT masked on so we will
319 * come back here when he is unchoked. Don't call the user
320 * callback to enforce ordering of spilling, he'll get called
321 * when we come back here and there's nothing more to spill.
326 #ifndef LWS_NO_EXTENSIONS
327 wsi->extension_data_pending = 0;
332 if (wsi->parent_carries_io) {
333 wsi->handling_pollout = 0;
334 wsi->leave_pollout_active = 0;
336 return lws_calllback_as_writeable(wsi);
340 int eff = wsi->leave_pollout_active;
343 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
344 lwsl_info("failed at set pollfd\n");
348 wsi->handling_pollout = 0;
350 /* cannot get leave_pollout_active set after the above */
351 if (!eff && wsi->leave_pollout_active)
352 /* got set inbetween sampling eff and clearing
353 * handling_pollout, force POLLOUT on */
354 lws_calllback_as_writeable(wsi);
356 wsi->leave_pollout_active = 0;
359 if (wsi->mode != LWSCM_WSCL_ISSUE_HTTP_BODY &&
360 !wsi->hdr_parsing_completed)
365 user_service_go_again:
370 * we are the 'network wsi' for potentially many muxed child wsi with
371 * no network connection of their own, who have to use us for all their
372 * network actions. So we use a round-robin scheme to share out the
373 * POLLOUT notifications to our children.
375 * But because any child could exhaust the socket's ability to take
376 * writes, we can only let one child get notified each time.
378 * In addition children may be closed / deleted / added between POLLOUT
379 * notifications, so we can't hold pointers
382 if (wsi->mode != LWSCM_HTTP2_SERVING) {
383 lwsl_info("%s: non http2\n", __func__);
387 wsi->u.http2.requested_POLLOUT = 0;
388 if (!wsi->u.http2.initialized) {
389 lwsl_info("pollout on uninitialized http2 conn\n");
393 lwsl_info("%s: doing children\n", __func__);
397 wsi2 = wsi2->u.http2.next_child_wsi;
398 lwsl_info("%s: child %p\n", __func__, wsi2);
401 if (!wsi2->u.http2.requested_POLLOUT)
403 wsi2->u.http2.requested_POLLOUT = 0;
404 if (lws_calllback_as_writeable(wsi2)) {
405 lwsl_debug("Closing POLLOUT child\n");
406 lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
409 } while (wsi2 != NULL && !lws_send_pipe_choked(wsi));
411 lwsl_info("%s: completed\n", __func__);
416 wsi->handling_pollout = 0;
417 wsi->leave_pollout_active = 0;
419 return lws_calllback_as_writeable(wsi);
422 * since these don't disable the POLLOUT, they are always doing the
423 * right thing for leave_pollout_active whether it was set or not.
427 wsi->handling_pollout = 0;
428 wsi->leave_pollout_active = 0;
433 wsi->handling_pollout = 0;
434 wsi->leave_pollout_active = 0;
440 lws_service_timeout_check(struct lws *wsi, unsigned int sec)
443 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
450 * if extensions want in on it (eg, we are a mux parent)
451 * give them a chance to service child timeouts
453 if (lws_ext_cb_active(wsi, LWS_EXT_CB_1HZ, NULL, sec) < 0)
456 if (!wsi->pending_timeout)
460 * if we went beyond the allowed time, kill the
463 if ((time_t)sec > wsi->pending_timeout_limit) {
465 if (wsi->desc.sockfd != LWS_SOCK_INVALID && wsi->position_in_fds_table >= 0)
466 n = pt->fds[wsi->position_in_fds_table].events;
468 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_TIMEOUTS, 1);
470 /* no need to log normal idle keepalive timeout */
471 if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
472 lwsl_notice("wsi %p: TIMEDOUT WAITING on %d (did hdr %d, ah %p, wl %d, pfd events %d) %llu vs %llu\n",
473 (void *)wsi, wsi->pending_timeout,
474 wsi->hdr_parsing_completed, wsi->u.hdr.ah,
475 pt->ah_wait_list_length, n, (unsigned long long)sec, (unsigned long long)wsi->pending_timeout_limit);
478 * Since he failed a timeout, he already had a chance to do
479 * something and was unable to... that includes situations like
480 * half closed connections. So process this "failed timeout"
481 * close as a violent death and don't try to do protocol
482 * cleanup like flush partials.
484 wsi->socket_is_permanently_unusable = 1;
485 if (wsi->mode == LWSCM_WSCL_WAITING_SSL)
486 wsi->vhost->protocols[0].callback(wsi,
487 LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
488 wsi->user_space, (void *)"Timed out waiting SSL", 21);
490 lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
498 int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
500 /* his RX is flowcontrolled, don't send remaining now */
501 if (wsi->rxflow_buffer) {
502 /* rxflow while we were spilling prev rxflow */
503 lwsl_info("stalling in existing rxflow buf\n");
507 /* a new rxflow, buffer it and warn caller */
508 lwsl_info("new rxflow input buffer len %d\n", len - n);
509 wsi->rxflow_buffer = lws_malloc(len - n);
510 if (!wsi->rxflow_buffer)
512 wsi->rxflow_len = len - n;
514 memcpy(wsi->rxflow_buffer, buf + n, len - n);
519 /* this is used by the platform service code to stop us waiting for network
520 * activity in poll() when we have something that already needs service
523 LWS_VISIBLE LWS_EXTERN int
524 lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
526 struct lws_context_per_thread *pt = &context->pt[tsi];
529 /* Figure out if we really want to wait in poll()
530 * We only need to wait if really nothing already to do and we have
531 * to wait for something from network
534 /* 1) if we know we are draining rx ext, do not wait in poll */
535 if (pt->rx_draining_ext_list)
538 #ifdef LWS_OPENSSL_SUPPORT
539 /* 2) if we know we have non-network pending data, do not wait in poll */
540 if (lws_ssl_anybody_has_buffered_read_tsi(context, tsi)) {
541 lwsl_info("ssl buffered read\n");
546 /* 3) if any ah has pending rx, do not wait in poll */
547 for (n = 0; n < context->max_http_header_pool; n++)
548 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen) {
549 /* any ah with pending rx must be attached to someone */
550 if (!pt->ah_pool[n].wsi) {
551 lwsl_err("%s: assert: no wsi attached to ah\n", __func__);
561 * guys that need POLLIN service again without waiting for network action
562 * can force POLLIN here if not flowcontrolled, so they will get service.
564 * Return nonzero if anybody got their POLLIN faked
567 lws_service_flag_pending(struct lws_context *context, int tsi)
569 struct lws_context_per_thread *pt = &context->pt[tsi];
570 #ifdef LWS_OPENSSL_SUPPORT
571 struct lws *wsi_next;
580 * 1) For all guys with already-available ext data to drain, if they are
581 * not flowcontrolled, fake their POLLIN status
583 wsi = pt->rx_draining_ext_list;
585 pt->fds[wsi->position_in_fds_table].revents |=
586 pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
587 if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
591 wsi = wsi->u.ws.rx_draining_ext_list;
594 #ifdef LWS_OPENSSL_SUPPORT
596 * 2) For all guys with buffered SSL read data already saved up, if they
597 * are not flowcontrolled, fake their POLLIN status so they'll get
598 * service to use up the buffered incoming data, even though their
599 * network socket may have nothing
601 wsi = pt->pending_read_list;
603 wsi_next = wsi->pending_read_list_next;
604 pt->fds[wsi->position_in_fds_table].revents |=
605 pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
606 if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
609 * he's going to get serviced now, take him off the
610 * list of guys with buffered SSL. If he still has some
611 * at the end of the service, he'll get put back on the
614 lws_ssl_remove_wsi_from_buffered_list(wsi);
621 * 3) For any wsi who have an ah with pending RX who did not
622 * complete their current headers, and are not flowcontrolled,
623 * fake their POLLIN status so they will be able to drain the
624 * rx buffered in the ah
626 for (n = 0; n < context->max_http_header_pool; n++)
627 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen &&
628 !pt->ah_pool[n].wsi->hdr_parsing_completed) {
629 pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |=
630 pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events &
632 if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents &
640 #ifndef LWS_NO_CLIENT
643 lws_http_client_read(struct lws *wsi, char **buf, int *len)
647 rlen = lws_ssl_capable_read(wsi, (unsigned char *)*buf, *len);
650 /* allow the source to signal he has data again next time */
651 lws_change_pollfd(wsi, 0, LWS_POLLIN);
653 if (rlen == LWS_SSL_CAPABLE_ERROR) {
654 lwsl_notice("%s: SSL capable error\n", __func__);
665 wsi->client_rx_avail = 0;
668 * server may insist on transfer-encoding: chunked,
669 * so http client must deal with it
672 while (wsi->chunked && (wsi->chunk_parser != ELCP_CONTENT) && *len) {
673 switch (wsi->chunk_parser) {
675 if ((*buf)[0] == '\x0d') {
676 wsi->chunk_parser = ELCP_CR;
679 n = char_to_hex((*buf)[0]);
681 lwsl_debug("chunking failure\n");
684 wsi->chunk_remaining <<= 4;
685 wsi->chunk_remaining |= n;
688 if ((*buf)[0] != '\x0a') {
689 lwsl_debug("chunking failure\n");
692 wsi->chunk_parser = ELCP_CONTENT;
693 lwsl_info("chunk %d\n", wsi->chunk_remaining);
694 if (wsi->chunk_remaining)
696 lwsl_info("final chunk\n");
703 if ((*buf)[0] != '\x0d') {
704 lwsl_debug("chunking failure\n");
709 wsi->chunk_parser = ELCP_POST_LF;
713 if ((*buf)[0] != '\x0a')
716 wsi->chunk_parser = ELCP_HEX;
717 wsi->chunk_remaining = 0;
724 if (wsi->chunked && !wsi->chunk_remaining)
727 if (wsi->u.http.content_remain &&
728 wsi->u.http.content_remain < *len)
729 n = (int)wsi->u.http.content_remain;
733 if (wsi->chunked && wsi->chunk_remaining &&
734 wsi->chunk_remaining < n)
735 n = wsi->chunk_remaining;
737 #ifdef LWS_WITH_HTTP_PROXY
739 if (wsi->perform_rewrite)
740 lws_rewrite_parse(wsi->rw, (unsigned char *)*buf, n);
743 if (user_callback_handle_rxflow(wsi->protocol->callback,
744 wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
745 wsi->user_space, *buf, n)) {
746 lwsl_debug("%s: LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ returned -1\n", __func__);
751 if (wsi->chunked && wsi->chunk_remaining) {
753 wsi->chunk_remaining -= n;
757 if (wsi->chunked && !wsi->chunk_remaining)
758 wsi->chunk_parser = ELCP_POST_CR;
760 if (wsi->chunked && *len)
766 /* if we know the content length, decrement the content remaining */
767 if (wsi->u.http.content_length > 0)
768 wsi->u.http.content_remain -= n;
770 if (wsi->u.http.content_remain || !wsi->u.http.content_length)
774 if (user_callback_handle_rxflow(wsi->protocol->callback,
775 wsi, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
776 wsi->user_space, NULL, 0)) {
777 lwsl_debug("Completed call returned -1\n");
781 if (lws_http_transaction_completed_client(wsi)) {
782 lwsl_notice("%s: transaction completed says -1\n", __func__);
791 lws_is_ws_with_ext(struct lws *wsi)
793 #if defined(LWS_NO_EXTENSIONS)
796 return wsi->state == LWSS_ESTABLISHED &&
797 !!wsi->count_act_ext;
802 lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
804 struct lws_context_per_thread *pt = &context->pt[tsi];
805 lws_sockfd_type our_fd = 0, tmp_fd;
806 struct lws_tokens eff_buf;
807 unsigned int pending = 0;
808 struct lws *wsi, *wsi1;
809 char draining_flow = 0;
815 if (!context->protocol_init_done)
816 lws_protocol_init(context);
821 * handle case that system time was uninitialized when lws started
822 * at boot, and got initialized a little later
824 if (context->time_up < 1464083026 && now > 1464083026)
825 context->time_up = now;
827 /* TODO: if using libev, we should probably use timeout watchers... */
828 if (context->last_timeout_check_s != now) {
829 context->last_timeout_check_s = now;
831 #if defined(LWS_WITH_STATS)
832 if (!tsi && now - context->last_dump > 10) {
833 lws_stats_log_dump(context);
834 context->last_dump = now;
838 lws_plat_service_periodic(context);
840 lws_check_deferred_free(context, 0);
842 /* retire unused deprecated context */
843 #if !defined(LWS_PLAT_OPTEE) && !defined(LWS_WITH_ESP32)
844 #if LWS_POSIX && !defined(_WIN32)
845 if (context->deprecated && !context->count_wsi_allocated) {
846 lwsl_notice("%s: ending deprecated context\n", __func__);
847 kill(getpid(), SIGINT);
852 /* global timeout check once per second */
857 wsi = context->pt[tsi].timeout_list;
859 /* we have to take copies, because he may be deleted */
860 wsi1 = wsi->timeout_list;
861 tmp_fd = wsi->desc.sockfd;
862 if (lws_service_timeout_check(wsi, (unsigned int)now)) {
863 /* he did time out... */
864 if (tmp_fd == our_fd)
865 /* it was the guy we came to service! */
867 /* he's gone, no need to mark as handled */
872 lws_cgi_kill_terminated(pt);
878 for (n = 0; n < context->count_threads; n++)
879 p += sprintf(p, " %7lu (%5d), ",
880 context->pt[n].count_conns,
881 context->pt[n].fds_count);
883 lwsl_notice("load: %s\n", s);
889 * at intervals, check for ws connections needing ping-pong checks
892 if (context->ws_ping_pong_interval &&
893 context->last_ws_ping_pong_check_s < now + 10) {
894 struct lws_vhost *vh = context->vhost_list;
895 context->last_ws_ping_pong_check_s = now;
898 for (n = 0; n < vh->count_protocols; n++) {
899 wsi = vh->same_vh_protocol_list[n];
902 if (wsi->state == LWSS_ESTABLISHED &&
903 !wsi->socket_is_permanently_unusable &&
904 !wsi->u.ws.send_check_ping &&
905 wsi->u.ws.time_next_ping_check &&
906 wsi->u.ws.time_next_ping_check < now) {
908 lwsl_info("requesting ping-pong on wsi %p\n", wsi);
909 wsi->u.ws.send_check_ping = 1;
910 lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_SEND_PING,
911 context->timeout_secs);
912 lws_callback_on_writable(wsi);
913 wsi->u.ws.time_next_ping_check = now +
914 wsi->context->ws_ping_pong_interval;
916 wsi = wsi->same_vh_protocol_next;
923 /* the socket we came to service timed out, nothing to do */
927 /* just here for timeout management? */
931 /* no, here to service a socket descriptor */
932 wsi = wsi_from_fd(context, pollfd->fd);
934 /* not lws connection ... leave revents alone and return */
938 * so that caller can tell we handled, past here we need to
939 * zero down pollfd->revents after handling
943 /* handle session socket closed */
945 if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
946 (pollfd->revents & LWS_POLLHUP)) {
947 wsi->socket_is_permanently_unusable = 1;
948 lwsl_debug("Session Socket %p (fd=%d) dead\n",
949 (void *)wsi, pollfd->fd);
951 goto close_and_handled;
955 if (pollfd->revents & LWS_POLLOUT)
956 wsi->sock_send_blocking = FALSE;
961 // lwsl_debug("fd=%d, revents=%d, mode=%d, state=%d\n", pollfd->fd, pollfd->revents, (int)wsi->mode, (int)wsi->state);
962 if (pollfd->revents & LWS_POLLHUP) {
963 lwsl_debug("pollhup\n");
964 wsi->socket_is_permanently_unusable = 1;
965 goto close_and_handled;
969 #ifdef LWS_OPENSSL_SUPPORT
970 if ((wsi->state == LWSS_SHUTDOWN) && lws_is_ssl(wsi) && wsi->ssl)
972 n = SSL_shutdown(wsi->ssl);
973 lwsl_debug("SSL_shutdown=%d for fd %d\n", n, wsi->desc.sockfd);
976 n = shutdown(wsi->desc.sockfd, SHUT_WR);
977 goto close_and_handled;
981 lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
987 int shutdown_error = SSL_get_error(wsi->ssl, n);
988 lwsl_debug("SSL_shutdown returned %d, SSL_get_error: %d\n", n, shutdown_error);
989 if (shutdown_error == SSL_ERROR_WANT_READ) {
990 lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
993 } else if (shutdown_error == SSL_ERROR_WANT_WRITE) {
994 lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLOUT);
999 // actual error occurred, just close the connection
1000 n = shutdown(wsi->desc.sockfd, SHUT_WR);
1001 goto close_and_handled;
1006 /* okay, what we came here to do... */
1008 switch (wsi->mode) {
1009 case LWSCM_HTTP_SERVING:
1010 case LWSCM_HTTP_CLIENT:
1011 case LWSCM_HTTP_SERVING_ACCEPTED:
1012 case LWSCM_SERVER_LISTENER:
1013 case LWSCM_SSL_ACK_PENDING:
1014 case LWSCM_SSL_ACK_PENDING_RAW:
1015 if (wsi->state == LWSS_CLIENT_HTTP_ESTABLISHED)
1019 if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
1020 n = lws_handle_POLLOUT_event(wsi, pollfd);
1022 goto close_and_handled;
1028 n = lws_server_socket_service(context, wsi, pollfd);
1029 if (n) /* closed by above */
1033 case LWSCM_RAW_FILEDESC:
1035 if (pollfd->revents & LWS_POLLOUT) {
1036 n = lws_calllback_as_writeable(wsi);
1037 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
1038 lwsl_info("failed at set pollfd\n");
1042 goto close_and_handled;
1044 n = LWS_CALLBACK_RAW_RX;
1045 if (wsi->mode == LWSCM_RAW_FILEDESC)
1046 n = LWS_CALLBACK_RAW_RX_FILE;
1048 if (pollfd->revents & LWS_POLLIN) {
1049 if (user_callback_handle_rxflow(
1050 wsi->protocol->callback,
1052 wsi->user_space, NULL, 0)) {
1053 lwsl_debug("raw rx callback closed it\n");
1054 goto close_and_handled;
1058 if (pollfd->revents & LWS_POLLHUP)
1059 goto close_and_handled;
1063 case LWSCM_WS_SERVING:
1064 case LWSCM_WS_CLIENT:
1065 case LWSCM_HTTP2_SERVING:
1066 case LWSCM_HTTP_CLIENT_ACCEPTED:
1068 /* 1: something requested a callback when it was OK to write */
1070 if (wsi->state == LWSS_WAITING_TO_SEND_CLOSE_NOTIFICATION)
1071 lwsl_notice("xxx\n");
1073 if ((pollfd->revents & LWS_POLLOUT) &&
1074 ((wsi->state == LWSS_ESTABLISHED ||
1075 wsi->state == LWSS_HTTP2_ESTABLISHED ||
1076 wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
1077 wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
1078 wsi->state == LWSS_WAITING_TO_SEND_CLOSE_NOTIFICATION ||
1079 wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)) &&
1080 lws_handle_POLLOUT_event(wsi, pollfd)) {
1081 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
1082 wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
1083 lwsl_info("lws_service_fd: closing\n");
1084 goto close_and_handled;
1087 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
1088 wsi->state == LWSS_WAITING_TO_SEND_CLOSE_NOTIFICATION ||
1089 wsi->state == LWSS_AWAITING_CLOSE_ACK) {
1091 * we stopped caring about anything except control
1092 * packets. Force flow control off, defeat tx
1095 lws_rx_flow_control(wsi, 1);
1096 wsi->u.ws.tx_draining_ext = 0;
1099 if (wsi->u.ws.tx_draining_ext)
1100 /* we cannot deal with new RX until the TX ext
1101 * path has been drained. It's because new
1102 * rx will, eg, crap on the wsi rx buf that
1103 * may be needed to retain state.
1105 * TX ext drain path MUST go through event loop
1106 * to avoid blocking.
1110 if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
1111 /* We cannot deal with any kind of new RX
1112 * because we are RX-flowcontrolled.
1116 /* 2: RX Extension needs to be drained
1119 if (wsi->state == LWSS_ESTABLISHED &&
1120 wsi->u.ws.rx_draining_ext) {
1122 lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
1123 #ifndef LWS_NO_CLIENT
1124 if (wsi->mode == LWSCM_WS_CLIENT) {
1125 n = lws_client_rx_sm(wsi, 0);
1131 n = lws_rx_sm(wsi, 0);
1136 if (wsi->u.ws.rx_draining_ext)
1138 * We have RX EXT content to drain, but can't do it
1139 * right now. That means we cannot do anything lower
1144 /* 3: RX Flowcontrol buffer needs to be drained
1147 if (wsi->rxflow_buffer) {
1148 lwsl_info("draining rxflow (len %d)\n",
1149 wsi->rxflow_len - wsi->rxflow_pos
1151 /* well, drain it */
1152 eff_buf.token = (char *)wsi->rxflow_buffer +
1154 eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
1159 /* 4: any incoming (or ah-stashed incoming rx) data ready?
1160 * notice if rx flow going off raced poll(), rx flow wins
1163 if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
1167 /* all the union members start with hdr, so even in ws mode
1168 * we can deal with the ah via u.hdr
1170 if (wsi->u.hdr.ah) {
1171 lwsl_info("%s: %p: inherited ah rx\n", __func__, wsi);
1172 eff_buf.token_len = wsi->u.hdr.ah->rxlen -
1173 wsi->u.hdr.ah->rxpos;
1174 eff_buf.token = (char *)wsi->u.hdr.ah->rx +
1175 wsi->u.hdr.ah->rxpos;
1177 if (wsi->mode != LWSCM_HTTP_CLIENT_ACCEPTED) {
1179 * extension may not consume everything (eg, pmd may be constrained
1180 * as to what it can output...) has to go in per-wsi rx buf area.
1181 * Otherwise in large temp serv_buf area.
1183 eff_buf.token = (char *)pt->serv_buf;
1184 if (lws_is_ws_with_ext(wsi)) {
1185 eff_buf.token_len = wsi->u.ws.rx_ubuf_alloc;
1187 eff_buf.token_len = context->pt_serv_buf_size;
1190 if ((unsigned int)eff_buf.token_len > context->pt_serv_buf_size)
1191 eff_buf.token_len = context->pt_serv_buf_size;
1193 eff_buf.token_len = lws_ssl_capable_read(wsi,
1194 (unsigned char *)eff_buf.token, pending ? pending :
1196 switch (eff_buf.token_len) {
1198 lwsl_info("%s: zero length read\n", __func__);
1199 goto close_and_handled;
1200 case LWS_SSL_CAPABLE_MORE_SERVICE:
1201 lwsl_info("SSL Capable more service\n");
1204 case LWS_SSL_CAPABLE_ERROR:
1205 lwsl_info("Closing when error\n");
1206 goto close_and_handled;
1208 // lwsl_notice("Actual RX %d\n", eff_buf.token_len);
1213 #ifndef LWS_NO_CLIENT
1214 if (wsi->mode == LWSCM_HTTP_CLIENT_ACCEPTED &&
1215 !wsi->told_user_closed) {
1218 * In SSL mode we get POLLIN notification about
1219 * encrypted data in.
1221 * But that is not necessarily related to decrypted
1222 * data out becoming available; in may need to perform
1223 * other in or out before that happens.
1225 * simply mark ourselves as having readable data
1226 * and turn off our POLLIN
1228 wsi->client_rx_avail = 1;
1229 lws_change_pollfd(wsi, LWS_POLLIN, 0);
1231 /* let user code know, he'll usually ask for writeable
1232 * callback and drain / re-enable it there
1234 if (user_callback_handle_rxflow(
1235 wsi->protocol->callback,
1236 wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
1237 wsi->user_space, NULL, 0)) {
1238 lwsl_notice("LWS_CALLBACK_RECEIVE_CLIENT_HTTP closed it\n");
1239 goto close_and_handled;
1247 * give any active extensions a chance to munge the buffer
1248 * before parse. We pass in a pointer to an lws_tokens struct
1249 * prepared with the default buffer and content length that's in
1250 * there. Rather than rewrite the default buffer, extensions
1251 * that expect to grow the buffer can adapt .token to
1252 * point to their own per-connection buffer in the extension
1253 * user allocation. By default with no extensions or no
1254 * extension callback handling, just the normal input buffer is
1255 * used then so it is efficient.
1260 m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
1263 goto close_and_handled;
1267 /* service incoming data */
1269 if (eff_buf.token_len) {
1271 * if draining from rxflow buffer, not
1272 * critical to track what was used since at the
1273 * use it bumps wsi->rxflow_pos. If we come
1274 * around again it will pick up from where it
1277 // lwsl_notice("doing lws_read from pt->serv_buf %p %p for len %d\n", pt->serv_buf, eff_buf.token, (int)eff_buf.token_len);
1279 n = lws_read(wsi, (unsigned char *)eff_buf.token,
1288 eff_buf.token = NULL;
1289 eff_buf.token_len = 0;
1292 if (wsi->u.hdr.ah) {
1293 lwsl_notice("%s: %p: detaching\n",
1295 lws_header_table_force_to_detachable_state(wsi);
1296 /* we can run the normal ah detach flow despite
1297 * being in ws union mode, since all union members
1299 lws_header_table_detach(wsi, 0);
1302 pending = lws_ssl_pending(wsi);
1304 if (lws_is_ws_with_ext(wsi))
1305 pending = pending > wsi->u.ws.rx_ubuf_alloc ?
1306 wsi->u.ws.rx_ubuf_alloc : pending;
1308 pending = pending > context->pt_serv_buf_size ?
1309 context->pt_serv_buf_size : pending;
1313 if (draining_flow && wsi->rxflow_buffer &&
1314 wsi->rxflow_pos == wsi->rxflow_len) {
1315 lwsl_info("flow buffer: drained\n");
1316 lws_free_set_NULL(wsi->rxflow_buffer);
1317 /* having drained the rxflow buffer, can rearm POLLIN */
1318 #ifdef LWS_NO_SERVER
1321 _lws_rx_flow_control(wsi);
1322 /* n ignored, needed for NO_SERVER case */
1327 case LWSCM_CGI: /* we exist to handle a cgi's stdin/out/err data...
1328 * do the callback on our master wsi
1331 struct lws_cgi_args args;
1333 if (wsi->cgi_channel >= LWS_STDOUT &&
1334 !(pollfd->revents & pollfd->events & LWS_POLLIN))
1336 if (wsi->cgi_channel == LWS_STDIN &&
1337 !(pollfd->revents & pollfd->events & LWS_POLLOUT))
1340 if (wsi->cgi_channel == LWS_STDIN)
1341 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
1342 lwsl_info("failed at set pollfd\n");
1346 args.ch = wsi->cgi_channel;
1347 args.stdwsi = &wsi->parent->cgi->stdwsi[0];
1348 args.hdr_state = wsi->hdr_state;
1350 //lwsl_err("CGI LWS_STDOUT waiting wsi %p mode %d state %d\n",
1351 // wsi->parent, wsi->parent->mode, wsi->parent->state);
1353 if (user_callback_handle_rxflow(
1354 wsi->parent->protocol->callback,
1355 wsi->parent, LWS_CALLBACK_CGI,
1356 wsi->parent->user_space,
1364 #ifdef LWS_NO_CLIENT
1367 if ((pollfd->revents & LWS_POLLOUT) &&
1368 lws_handle_POLLOUT_event(wsi, pollfd)) {
1369 lwsl_debug("POLLOUT event closed it\n");
1370 goto close_and_handled;
1373 n = lws_client_socket_service(context, wsi, pollfd);
1384 lwsl_debug("%p: Close and handled\n", wsi);
1385 lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
1387 * pollfd may point to something else after the close
1388 * due to pollfd swapping scheme on delete on some platforms
1389 * we can't clear revents now because it'd be the wrong guy's revents
1394 pollfd->revents = 0;
1399 lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
1401 return lws_service_fd_tsi(context, pollfd, 0);
1405 lws_service(struct lws_context *context, int timeout_ms)
1407 return lws_plat_service(context, timeout_ms);
1411 lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
1413 return _lws_plat_service_tsi(context, timeout_ms, tsi);