avoid illegal sockfd on timeout
[platform/upstream/libwebsockets.git] / lib / service.c
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation:
9  *  version 2.1 of the License.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19  *  MA  02110-1301  USA
20  */
21
22 #include "private-libwebsockets.h"
23
24 static int
25 lws_calllback_as_writeable(struct lws *wsi)
26 {
27         int n;
28
29         switch (wsi->mode) {
30         case LWSCM_WS_CLIENT:
31                 n = LWS_CALLBACK_CLIENT_WRITEABLE;
32                 break;
33         case LWSCM_WS_SERVING:
34                 n = LWS_CALLBACK_SERVER_WRITEABLE;
35                 break;
36         default:
37                 n = LWS_CALLBACK_HTTP_WRITEABLE;
38                 break;
39         }
40         lwsl_debug("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
41         return user_callback_handle_rxflow(wsi->protocol->callback,
42                                            wsi, (enum lws_callback_reasons) n,
43                                            wsi->user_space, NULL, 0);
44 }
45
46 int
47 lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
48 {
49         int write_type = LWS_WRITE_PONG;
50         struct lws_tokens eff_buf;
51 #ifdef LWS_USE_HTTP2
52         struct lws *wsi2;
53 #endif
54         int ret, m, n;
55
56         //lwsl_err("%s: %p\n", __func__, wsi);
57
58         /*
59          * user callback is lowest priority to get these notifications
60          * actually, since other pending things cannot be disordered
61          */
62
63         /* Priority 1: pending truncated sends are incomplete ws fragments
64          *             If anything else sent first the protocol would be
65          *             corrupted.
66          */
67         if (wsi->trunc_len) {
68                 if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
69                                   wsi->trunc_len) < 0) {
70                         lwsl_info("%s signalling to close\n", __func__);
71                         return -1;
72                 }
73                 /* leave POLLOUT active either way */
74                 return 0;
75         } else
76                 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
77                         return -1; /* retry closing now */
78
79
80 #ifdef LWS_USE_HTTP2
81         /* Priority 2: protocol packets
82          */
83         if (wsi->pps) {
84                 lwsl_info("servicing pps %d\n", wsi->pps);
85                 switch (wsi->pps) {
86                 case LWS_PPS_HTTP2_MY_SETTINGS:
87                 case LWS_PPS_HTTP2_ACK_SETTINGS:
88                         lws_http2_do_pps_send(lws_get_context(wsi), wsi);
89                         break;
90                 default:
91                         break;
92                 }
93                 wsi->pps = LWS_PPS_NONE;
94                 lws_rx_flow_control(wsi, 1);
95
96                 return 0; /* leave POLLOUT active */
97         }
98 #endif
99
100 #ifdef LWS_WITH_CGI
101         if (wsi->cgi)
102                 goto user_service_go_again;
103 #endif
104
105         /* Priority 3: pending control packets (pong or close)
106          */
107         if ((wsi->state == LWSS_ESTABLISHED &&
108              wsi->u.ws.ping_pending_flag) ||
109             (wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
110              wsi->u.ws.payload_is_close)) {
111
112                 if (wsi->u.ws.payload_is_close)
113                         write_type = LWS_WRITE_CLOSE;
114
115                 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
116                               wsi->u.ws.ping_payload_len, write_type);
117                 if (n < 0)
118                         return -1;
119
120                 /* well he is sent, mark him done */
121                 wsi->u.ws.ping_pending_flag = 0;
122                 if (wsi->u.ws.payload_is_close)
123                         /* oh... a close frame was it... then we are done */
124                         return -1;
125
126                 /* otherwise for PING, leave POLLOUT active either way */
127                 return 0;
128         }
129
130         /* Priority 4: if we are closing, not allowed to send more data frags
131          *             which means user callback or tx ext flush banned now
132          */
133         if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
134                 goto user_service;
135
136         /* Priority 5: Tx path extension with more to send
137          *
138          *             These are handled as new fragments each time around
139          *             So while we must block new writeable callback to enforce
140          *             payload ordering, but since they are always complete
141          *             fragments control packets can interleave OK.
142          */
143         if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
144                 lwsl_ext("SERVICING TX EXT DRAINING\n");
145                 if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
146                         return -1;
147                 /* leave POLLOUT active */
148                 return 0;
149         }
150
151         /* Priority 6: user can get the callback
152          */
153         m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
154         if (m)
155                 return -1;
156 #ifndef LWS_NO_EXTENSIONS
157         if (!wsi->extension_data_pending)
158                 goto user_service;
159 #endif
160         /*
161          * check in on the active extensions, see if they
162          * had pending stuff to spill... they need to get the
163          * first look-in otherwise sequence will be disordered
164          *
165          * NULL, zero-length eff_buf means just spill pending
166          */
167
168         ret = 1;
169         while (ret == 1) {
170
171                 /* default to nobody has more to spill */
172
173                 ret = 0;
174                 eff_buf.token = NULL;
175                 eff_buf.token_len = 0;
176
177                 /* give every extension a chance to spill */
178
179                 m = lws_ext_cb_active(wsi,
180                                         LWS_EXT_CB_PACKET_TX_PRESEND,
181                                                &eff_buf, 0);
182                 if (m < 0) {
183                         lwsl_err("ext reports fatal error\n");
184                         return -1;
185                 }
186                 if (m)
187                         /*
188                          * at least one extension told us he has more
189                          * to spill, so we will go around again after
190                          */
191                         ret = 1;
192
193                 /* assuming they gave us something to send, send it */
194
195                 if (eff_buf.token_len) {
196                         n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
197                                           eff_buf.token_len);
198                         if (n < 0) {
199                                 lwsl_info("closing from POLLOUT spill\n");
200                                 return -1;
201                         }
202                         /*
203                          * Keep amount spilled small to minimize chance of this
204                          */
205                         if (n != eff_buf.token_len) {
206                                 lwsl_err("Unable to spill ext %d vs %s\n",
207                                                           eff_buf.token_len, n);
208                                 return -1;
209                         }
210                 } else
211                         continue;
212
213                 /* no extension has more to spill */
214
215                 if (!ret)
216                         continue;
217
218                 /*
219                  * There's more to spill from an extension, but we just sent
220                  * something... did that leave the pipe choked?
221                  */
222
223                 if (!lws_send_pipe_choked(wsi))
224                         /* no we could add more */
225                         continue;
226
227                 lwsl_info("choked in POLLOUT service\n");
228
229                 /*
230                  * Yes, he's choked.  Leave the POLLOUT masked on so we will
231                  * come back here when he is unchoked.  Don't call the user
232                  * callback to enforce ordering of spilling, he'll get called
233                  * when we come back here and there's nothing more to spill.
234                  */
235
236                 return 0;
237         }
238 #ifndef LWS_NO_EXTENSIONS
239         wsi->extension_data_pending = 0;
240 #endif
241 user_service:
242         /* one shot */
243
244         if (pollfd)
245                 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
246                         lwsl_info("failed at set pollfd\n");
247                         return 1;
248                 }
249
250
251         if (!wsi->hdr_parsing_completed)
252                 return 0;
253
254 #ifdef LWS_WITH_CGI
255 user_service_go_again:
256 #endif
257
258 #ifdef LWS_USE_HTTP2
259         /*
260          * we are the 'network wsi' for potentially many muxed child wsi with
261          * no network connection of their own, who have to use us for all their
262          * network actions.  So we use a round-robin scheme to share out the
263          * POLLOUT notifications to our children.
264          *
265          * But because any child could exhaust the socket's ability to take
266          * writes, we can only let one child get notified each time.
267          *
268          * In addition children may be closed / deleted / added between POLLOUT
269          * notifications, so we can't hold pointers
270          */
271
272         if (wsi->mode != LWSCM_HTTP2_SERVING) {
273                 lwsl_info("%s: non http2\n", __func__);
274                 goto notify;
275         }
276
277         wsi->u.http2.requested_POLLOUT = 0;
278         if (!wsi->u.http2.initialized) {
279                 lwsl_info("pollout on uninitialized http2 conn\n");
280                 return 0;
281         }
282
283         lwsl_info("%s: doing children\n", __func__);
284
285         wsi2 = wsi;
286         do {
287                 wsi2 = wsi2->u.http2.next_child_wsi;
288                 lwsl_info("%s: child %p\n", __func__, wsi2);
289                 if (!wsi2)
290                         continue;
291                 if (!wsi2->u.http2.requested_POLLOUT)
292                         continue;
293                 wsi2->u.http2.requested_POLLOUT = 0;
294                 if (lws_calllback_as_writeable(wsi2)) {
295                         lwsl_debug("Closing POLLOUT child\n");
296                         lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
297                 }
298                 wsi2 = wsi;
299         } while (wsi2 != NULL && !lws_send_pipe_choked(wsi));
300
301         lwsl_info("%s: completed\n", __func__);
302
303         return 0;
304 notify:
305 #endif
306         return lws_calllback_as_writeable(wsi);
307 }
308
309 int
310 lws_service_timeout_check(struct lws *wsi, unsigned int sec)
311 {
312         struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
313         int n = 0;
314
315         /*
316          * if extensions want in on it (eg, we are a mux parent)
317          * give them a chance to service child timeouts
318          */
319         if (lws_ext_cb_active(wsi, LWS_EXT_CB_1HZ, NULL, sec) < 0)
320                 return 0;
321
322         if (!wsi->pending_timeout)
323                 return 0;
324
325         /*
326          * if we went beyond the allowed time, kill the
327          * connection
328          */
329         if ((time_t)sec > wsi->pending_timeout_limit) {
330 #if LWS_POSIX
331                 if (wsi->sock != LWS_SOCK_INVALID)
332                         n = pt->fds[wsi->sock].events;
333
334                 /* no need to log normal idle keepalive timeout */
335                 if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
336                         lwsl_notice("wsi %p: TIMEDOUT WAITING on %d (did hdr %d, ah %p, wl %d, pfd events %d)\n",
337                             (void *)wsi, wsi->pending_timeout,
338                             wsi->hdr_parsing_completed, wsi->u.hdr.ah,
339                             pt->ah_wait_list_length, n);
340 #endif
341                 /*
342                  * Since he failed a timeout, he already had a chance to do
343                  * something and was unable to... that includes situations like
344                  * half closed connections.  So process this "failed timeout"
345                  * close as a violent death and don't try to do protocol
346                  * cleanup like flush partials.
347                  */
348                 wsi->socket_is_permanently_unusable = 1;
349                 if (wsi->mode == LWSCM_WSCL_WAITING_SSL)
350                         wsi->vhost->protocols[0].callback(wsi,
351                                 LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
352                                 wsi->user_space, NULL, 0);
353
354                 lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
355
356                 return 1;
357         }
358
359         return 0;
360 }
361
362 int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
363 {
364         /* his RX is flowcontrolled, don't send remaining now */
365         if (wsi->rxflow_buffer) {
366                 /* rxflow while we were spilling prev rxflow */
367                 lwsl_info("stalling in existing rxflow buf\n");
368                 return 1;
369         }
370
371         /* a new rxflow, buffer it and warn caller */
372         lwsl_info("new rxflow input buffer len %d\n", len - n);
373         wsi->rxflow_buffer = lws_malloc(len - n);
374         if (!wsi->rxflow_buffer)
375                 return -1;
376         wsi->rxflow_len = len - n;
377         wsi->rxflow_pos = 0;
378         memcpy(wsi->rxflow_buffer, buf + n, len - n);
379
380         return 0;
381 }
382
383 /* this is used by the platform service code to stop us waiting for network
384  * activity in poll() when we have something that already needs service
385  */
386
387 int
388 lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
389 {
390         struct lws_context_per_thread *pt = &context->pt[tsi];
391         int n;
392
393         /* Figure out if we really want to wait in poll()
394          * We only need to wait if really nothing already to do and we have
395          * to wait for something from network
396          */
397
398         /* 1) if we know we are draining rx ext, do not wait in poll */
399         if (pt->rx_draining_ext_list)
400                 return 0;
401
402 #ifdef LWS_OPENSSL_SUPPORT
403         /* 2) if we know we have non-network pending data, do not wait in poll */
404         if (lws_ssl_anybody_has_buffered_read_tsi(context, tsi)) {
405                 lwsl_info("ssl buffered read\n");
406                 return 0;
407         }
408 #endif
409
410         /* 3) if any ah has pending rx, do not wait in poll */
411         for (n = 0; n < context->max_http_header_pool; n++)
412                 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen) {
413                         /* any ah with pending rx must be attached to someone */
414                         if (!pt->ah_pool[n].wsi) {
415                                 lwsl_err("%s: assert: no wsi attached to ah\n", __func__);
416                                 assert(0);
417                         }
418                         return 0;
419                 }
420
421         return timeout_ms;
422 }
423
424 /*
425  * guys that need POLLIN service again without waiting for network action
426  * can force POLLIN here if not flowcontrolled, so they will get service.
427  *
428  * Return nonzero if anybody got their POLLIN faked
429  */
430 int
431 lws_service_flag_pending(struct lws_context *context, int tsi)
432 {
433         struct lws_context_per_thread *pt = &context->pt[tsi];
434 #ifdef LWS_OPENSSL_SUPPORT
435         struct lws *wsi_next;
436 #endif
437         struct lws *wsi;
438         int forced = 0;
439         int n;
440
441         /* POLLIN faking */
442
443         /*
444          * 1) For all guys with already-available ext data to drain, if they are
445          * not flowcontrolled, fake their POLLIN status
446          */
447         wsi = pt->rx_draining_ext_list;
448         while (wsi) {
449                 pt->fds[wsi->position_in_fds_table].revents |=
450                         pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
451                 if (pt->fds[wsi->position_in_fds_table].revents &
452                     LWS_POLLIN)
453                         forced = 1;
454                 wsi = wsi->u.ws.rx_draining_ext_list;
455         }
456
457 #ifdef LWS_OPENSSL_SUPPORT
458         /*
459          * 2) For all guys with buffered SSL read data already saved up, if they
460          * are not flowcontrolled, fake their POLLIN status so they'll get
461          * service to use up the buffered incoming data, even though their
462          * network socket may have nothing
463          */
464         wsi = pt->pending_read_list;
465         while (wsi) {
466                 wsi_next = wsi->pending_read_list_next;
467                 pt->fds[wsi->position_in_fds_table].revents |=
468                         pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
469                 if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
470                         forced = 1;
471                         /*
472                          * he's going to get serviced now, take him off the
473                          * list of guys with buffered SSL.  If he still has some
474                          * at the end of the service, he'll get put back on the
475                          * list then.
476                          */
477                         lws_ssl_remove_wsi_from_buffered_list(wsi);
478                 }
479
480                 wsi = wsi_next;
481         }
482 #endif
483         /*
484          * 3) For any wsi who have an ah with pending RX who did not
485          * complete their current headers, and are not flowcontrolled,
486          * fake their POLLIN status so they will be able to drain the
487          * rx buffered in the ah
488          */
489         for (n = 0; n < context->max_http_header_pool; n++)
490                 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen &&
491                     !pt->ah_pool[n].wsi->hdr_parsing_completed) {
492                         pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |=
493                                 pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events &
494                                         LWS_POLLIN;
495                         if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents &
496                             LWS_POLLIN)
497                                 forced = 1;
498                 }
499
500         return forced;
501 }
502
503 #ifndef LWS_NO_CLIENT
504
505 LWS_VISIBLE int
506 lws_http_client_read(struct lws *wsi, char **buf, int *len)
507 {
508         int rlen, n;
509
510
511
512         rlen = lws_ssl_capable_read(wsi, (unsigned char *)*buf, *len);
513         if (rlen < 0)
514                 return -1;
515
516         *len = rlen;
517         if (rlen == 0)
518                 return 0;
519
520 //      lwsl_err("%s: read %d\n", __func__, rlen);
521
522         /* allow the source to signal he has data again next time */
523         wsi->client_rx_avail = 0;
524         lws_change_pollfd(wsi, 0, LWS_POLLIN);
525
526         /*
527          * server may insist on transfer-encoding: chunked,
528          * so http client must deal with it
529          */
530 spin_chunks:
531         while (wsi->chunked && (wsi->chunk_parser != ELCP_CONTENT) && *len) {
532                 switch (wsi->chunk_parser) {
533                 case ELCP_HEX:
534                         if ((*buf)[0] == '\x0d') {
535                                 wsi->chunk_parser = ELCP_CR;
536                                 break;
537                         }
538                         n = char_to_hex((*buf)[0]);
539                         if (n < 0)
540                                 return -1;
541                         wsi->chunk_remaining <<= 4;
542                         wsi->chunk_remaining |= n;
543                         break;
544                 case ELCP_CR:
545                         if ((*buf)[0] != '\x0a')
546                                 return -1;
547                         wsi->chunk_parser = ELCP_CONTENT;
548                         lwsl_info("chunk %d\n", wsi->chunk_remaining);
549                         if (wsi->chunk_remaining)
550                                 break;
551                         lwsl_info("final chunk\n");
552                         goto completed;
553
554                 case ELCP_CONTENT:
555                         break;
556
557                 case ELCP_POST_CR:
558                         if ((*buf)[0] != '\x0d')
559                                 return -1;
560
561                         wsi->chunk_parser = ELCP_POST_LF;
562                         break;
563
564                 case ELCP_POST_LF:
565                         if ((*buf)[0] != '\x0a')
566                                 return -1;
567
568                         wsi->chunk_parser = ELCP_HEX;
569                         wsi->chunk_remaining = 0;
570                         break;
571                 }
572                 (*buf)++;
573                 (*len)--;
574         }
575
576         if (wsi->chunked && !wsi->chunk_remaining)
577                 return 0;
578
579         if (wsi->u.http.content_remain &&
580             (int)wsi->u.http.content_remain < *len)
581                 n = wsi->u.http.content_remain;
582         else
583                 n = *len;
584
585         if (wsi->chunked && wsi->chunk_remaining &&
586             wsi->chunk_remaining < n)
587                 n = wsi->chunk_remaining;
588
589 #ifdef LWS_WITH_HTTP_PROXY
590         /* hubbub */
591         if (wsi->perform_rewrite)
592                 lws_rewrite_parse(wsi->rw, (unsigned char *)*buf, n);
593         else
594 #endif
595                 if (user_callback_handle_rxflow(wsi->protocol->callback,
596                                 wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
597                                 wsi->user_space, *buf, n))
598                         return -1;
599
600         if (wsi->chunked && wsi->chunk_remaining) {
601                 (*buf) += n;
602                 wsi->chunk_remaining -= n;
603                 *len -= n;
604         }
605
606         if (wsi->chunked && !wsi->chunk_remaining)
607                 wsi->chunk_parser = ELCP_POST_CR;
608
609         if (wsi->chunked && *len) {
610                 goto spin_chunks;
611         }
612
613         if (wsi->chunked)
614                 return 0;
615
616         wsi->u.http.content_remain -= n;
617         if (wsi->u.http.content_remain || !wsi->u.http.content_length)
618                 return 0;
619
620 completed:
621         if (user_callback_handle_rxflow(wsi->protocol->callback,
622                         wsi, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
623                         wsi->user_space, NULL, 0))
624                 return -1;
625
626         if (lws_http_transaction_completed_client(wsi))
627                 return -1;
628
629         return 0;
630 }
631 #endif
632
633 /**
634  * lws_service_fd() - Service polled socket with something waiting
635  * @context:    Websocket context
636  * @pollfd:     The pollfd entry describing the socket fd and which events
637  *              happened.
638  *
639  *      This function takes a pollfd that has POLLIN or POLLOUT activity and
640  *      services it according to the state of the associated
641  *      struct lws.
642  *
643  *      The one call deals with all "service" that might happen on a socket
644  *      including listen accepts, http files as well as websocket protocol.
645  *
646  *      If a pollfd says it has something, you can just pass it to
647  *      lws_service_fd() whether it is a socket handled by lws or not.
648  *      If it sees it is a lws socket, the traffic will be handled and
649  *      pollfd->revents will be zeroed now.
650  *
651  *      If the socket is foreign to lws, it leaves revents alone.  So you can
652  *      see if you should service yourself by checking the pollfd revents
653  *      after letting lws try to service it.
654  */
655
656 LWS_VISIBLE int
657 lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
658 {
659         struct lws_context_per_thread *pt = &context->pt[tsi];
660         lws_sockfd_type our_fd = 0, tmp_fd;
661         struct lws_tokens eff_buf;
662         unsigned int pending = 0;
663         struct lws *wsi, *wsi1;
664         char draining_flow = 0;
665         int timed_out = 0;
666         time_t now;
667         int n = 0, m;
668         int more;
669
670         if (!context->protocol_init_done)
671                 lws_protocol_init(context);
672
673         /*
674          * you can call us with pollfd = NULL to just allow the once-per-second
675          * global timeout checks; if less than a second since the last check
676          * it returns immediately then.
677          */
678
679         time(&now);
680
681         /*
682          * handle case that system time was uninitialized when lws started
683          * at boot, and got initialized a little later
684          */
685         if (context->time_up < 1464083026 && now > 1464083026)
686                 context->time_up = now;
687
688         /* TODO: if using libev, we should probably use timeout watchers... */
689         if (context->last_timeout_check_s != now) {
690                 context->last_timeout_check_s = now;
691
692                 lws_plat_service_periodic(context);
693
694                 /* global timeout check once per second */
695
696                 if (pollfd)
697                         our_fd = pollfd->fd;
698
699                 wsi = context->pt[tsi].timeout_list;
700                 while (wsi) {
701                         /* we have to take copies, because he may be deleted */
702                         wsi1 = wsi->timeout_list;
703                         tmp_fd = wsi->sock;
704                         if (lws_service_timeout_check(wsi, (unsigned int)now)) {
705                                 /* he did time out... */
706                                 if (tmp_fd == our_fd)
707                                         /* it was the guy we came to service! */
708                                         timed_out = 1;
709                                         /* he's gone, no need to mark as handled */
710                         }
711                         wsi = wsi1;
712                 }
713 #ifdef LWS_WITH_CGI
714                 lws_cgi_kill_terminated(pt);
715 #endif
716 #if 0
717                 {
718                         char s[300], *p = s;
719
720                         for (n = 0; n < context->count_threads; n++)
721                                 p += sprintf(p, " %7lu (%5d), ",
722                                              context->pt[n].count_conns,
723                                              context->pt[n].fds_count);
724
725                         lwsl_notice("load: %s\n", s);
726                 }
727 #endif
728         }
729
730         /* the socket we came to service timed out, nothing to do */
731         if (timed_out)
732                 return 0;
733
734         /* just here for timeout management? */
735         if (!pollfd)
736                 return 0;
737
738         /* no, here to service a socket descriptor */
739         wsi = wsi_from_fd(context, pollfd->fd);
740         if (!wsi)
741                 /* not lws connection ... leave revents alone and return */
742                 return 0;
743
744         /*
745          * so that caller can tell we handled, past here we need to
746          * zero down pollfd->revents after handling
747          */
748
749 #if LWS_POSIX
750         /* handle session socket closed */
751
752         if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
753             (pollfd->revents & LWS_POLLHUP)) {
754                 wsi->socket_is_permanently_unusable = 1;
755                 lwsl_debug("Session Socket %p (fd=%d) dead\n",
756                                                        (void *)wsi, pollfd->fd);
757
758                 goto close_and_handled;
759         }
760
761 #ifdef _WIN32
762         if (pollfd->revents & LWS_POLLOUT)
763                 wsi->sock_send_blocking = FALSE;
764 #endif
765
766 #endif
767
768         lwsl_debug("fd=%d, revents=%d\n", pollfd->fd, pollfd->revents);
769
770         /* okay, what we came here to do... */
771
772         switch (wsi->mode) {
773         case LWSCM_HTTP_SERVING:
774         case LWSCM_HTTP_CLIENT:
775         case LWSCM_HTTP_SERVING_ACCEPTED:
776         case LWSCM_SERVER_LISTENER:
777         case LWSCM_SSL_ACK_PENDING:
778                 if (wsi->state == LWSS_CLIENT_HTTP_ESTABLISHED)
779                         goto handled;
780
781 #ifdef LWS_WITH_CGI
782                 if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
783                         n = lws_handle_POLLOUT_event(wsi, pollfd);
784                         if (n)
785                                 goto close_and_handled;
786                         goto handled;
787                 }
788 #endif
789                 n = lws_server_socket_service(context, wsi, pollfd);
790                 if (n) /* closed by above */
791                         return 1;
792                 goto handled;
793
794         case LWSCM_WS_SERVING:
795         case LWSCM_WS_CLIENT:
796         case LWSCM_HTTP2_SERVING:
797         case LWSCM_HTTP_CLIENT_ACCEPTED:
798
799                 /* 1: something requested a callback when it was OK to write */
800
801                 if ((pollfd->revents & LWS_POLLOUT) &&
802                     (wsi->state == LWSS_ESTABLISHED ||
803                      wsi->state == LWSS_HTTP2_ESTABLISHED ||
804                      wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
805                      wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
806                      wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
807                     lws_handle_POLLOUT_event(wsi, pollfd)) {
808                         if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
809                                 wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
810                         lwsl_info("lws_service_fd: closing\n");
811                         goto close_and_handled;
812                 }
813
814                 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
815                     wsi->state == LWSS_AWAITING_CLOSE_ACK) {
816                         /*
817                          * we stopped caring about anything except control
818                          * packets.  Force flow control off, defeat tx
819                          * draining.
820                          */
821                         lws_rx_flow_control(wsi, 1);
822                         wsi->u.ws.tx_draining_ext = 0;
823                 }
824
825                 if (wsi->u.ws.tx_draining_ext)
826                         /* we cannot deal with new RX until the TX ext
827                          * path has been drained.  It's because new
828                          * rx will, eg, crap on the wsi rx buf that
829                          * may be needed to retain state.
830                          *
831                          * TX ext drain path MUST go through event loop
832                          * to avoid blocking.
833                          */
834                         break;
835
836                 if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
837                         /* We cannot deal with any kind of new RX
838                          * because we are RX-flowcontrolled.
839                          */
840                         break;
841
842                 /* 2: RX Extension needs to be drained
843                  */
844
845                 if (wsi->state == LWSS_ESTABLISHED &&
846                     wsi->u.ws.rx_draining_ext) {
847
848                         lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
849 #ifndef LWS_NO_CLIENT
850                         if (wsi->mode == LWSCM_WS_CLIENT) {
851                                 n = lws_client_rx_sm(wsi, 0);
852                                 if (n < 0)
853                                         /* we closed wsi */
854                                         n = 0;
855                         } else
856 #endif
857                                 n = lws_rx_sm(wsi, 0);
858
859                         goto handled;
860                 }
861
862                 if (wsi->u.ws.rx_draining_ext)
863                         /*
864                          * We have RX EXT content to drain, but can't do it
865                          * right now.  That means we cannot do anything lower
866                          * priority either.
867                          */
868                         break;
869
870                 /* 3: RX Flowcontrol buffer needs to be drained
871                  */
872
873                 if (wsi->rxflow_buffer) {
874                         lwsl_info("draining rxflow (len %d)\n",
875                                 wsi->rxflow_len - wsi->rxflow_pos
876                         );
877                         /* well, drain it */
878                         eff_buf.token = (char *)wsi->rxflow_buffer +
879                                                 wsi->rxflow_pos;
880                         eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
881                         draining_flow = 1;
882                         goto drain;
883                 }
884
885                 /* 4: any incoming (or ah-stashed incoming rx) data ready?
886                  * notice if rx flow going off raced poll(), rx flow wins
887                  */
888
889                 if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
890                         break;
891
892 read:
893                 /* all the union members start with hdr, so even in ws mode
894                  * we can deal with the ah via u.hdr
895                  */
896                 if (wsi->u.hdr.ah) {
897                         lwsl_info("%s: %p: inherited ah rx\n", __func__, wsi);
898                         eff_buf.token_len = wsi->u.hdr.ah->rxlen -
899                                             wsi->u.hdr.ah->rxpos;
900                         eff_buf.token = (char *)wsi->u.hdr.ah->rx +
901                                         wsi->u.hdr.ah->rxpos;
902                 } else {
903                         if (wsi->mode != LWSCM_HTTP_CLIENT_ACCEPTED) {
904                                 eff_buf.token_len = lws_ssl_capable_read(wsi,
905                                         pt->serv_buf, pending ? pending :
906                                         context->pt_serv_buf_size);
907                                 switch (eff_buf.token_len) {
908                                 case 0:
909                                         lwsl_info("%s: zero length read\n", __func__);
910                                         goto close_and_handled;
911                                 case LWS_SSL_CAPABLE_MORE_SERVICE:
912                                         lwsl_info("SSL Capable more service\n");
913                                         n = 0;
914                                         goto handled;
915                                 case LWS_SSL_CAPABLE_ERROR:
916                                         lwsl_info("Closing when error\n");
917                                         goto close_and_handled;
918                                 }
919
920                                 eff_buf.token = (char *)pt->serv_buf;
921                         }
922                 }
923
924 drain:
925 #ifndef LWS_NO_CLIENT
926                 if (wsi->mode == LWSCM_HTTP_CLIENT_ACCEPTED) {
927
928                         /*
929                          * simply mark ourselves as having readable data
930                          * and turn off our POLLIN
931                          */
932                         wsi->client_rx_avail = 1;
933                         lws_change_pollfd(wsi, LWS_POLLIN, 0);
934
935                         /* let user code know, he'll usually ask for writeable
936                          * callback and drain / reenable it there
937                          */
938                         if (user_callback_handle_rxflow(
939                                         wsi->protocol->callback,
940                                         wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
941                                         wsi->user_space, NULL, 0))
942                                 goto close_and_handled;
943                 }
944 #endif
945                 /*
946                  * give any active extensions a chance to munge the buffer
947                  * before parse.  We pass in a pointer to an lws_tokens struct
948                  * prepared with the default buffer and content length that's in
949                  * there.  Rather than rewrite the default buffer, extensions
950                  * that expect to grow the buffer can adapt .token to
951                  * point to their own per-connection buffer in the extension
952                  * user allocation.  By default with no extensions or no
953                  * extension callback handling, just the normal input buffer is
954                  * used then so it is efficient.
955                  */
956                 do {
957                         more = 0;
958
959                         m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
960                                               &eff_buf, 0);
961                         if (m < 0)
962                                 goto close_and_handled;
963                         if (m)
964                                 more = 1;
965
966                         /* service incoming data */
967
968                         if (eff_buf.token_len) {
969                                 /*
970                                  * if draining from rxflow buffer, not
971                                  * critical to track what was used since at the
972                                  * use it bumps wsi->rxflow_pos.  If we come
973                                  * around again it will pick up from where it
974                                  * left off.
975                                  */
976                                 n = lws_read(wsi, (unsigned char *)eff_buf.token,
977                                              eff_buf.token_len);
978                                 if (n < 0) {
979                                         /* we closed wsi */
980                                         n = 0;
981                                         goto handled;
982                                 }
983                         }
984
985                         eff_buf.token = NULL;
986                         eff_buf.token_len = 0;
987                 } while (more);
988
989                 if (wsi->u.hdr.ah) {
990                         lwsl_info("%s: %p: detaching inherited used ah\n",
991                                  __func__, wsi);
992                         /* show we used all the pending rx up */
993                         wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
994                         /* we can run the normal ah detach flow despite
995                          * being in ws union mode, since all union members
996                          * start with hdr */
997                         lws_header_table_detach(wsi, 0);
998                 }
999
1000                 pending = lws_ssl_pending(wsi);
1001                 if (pending) {
1002                         pending = pending > context->pt_serv_buf_size ?
1003                                         context->pt_serv_buf_size : pending;
1004                         goto read;
1005                 }
1006
1007                 if (draining_flow && wsi->rxflow_buffer &&
1008                     wsi->rxflow_pos == wsi->rxflow_len) {
1009                         lwsl_info("flow buffer: drained\n");
1010                         lws_free_set_NULL(wsi->rxflow_buffer);
1011                         /* having drained the rxflow buffer, can rearm POLLIN */
1012 #ifdef LWS_NO_SERVER
1013                         n =
1014 #endif
1015                         _lws_rx_flow_control(wsi);
1016                         /* n ignored, needed for NO_SERVER case */
1017                 }
1018
1019                 break;
1020 #ifdef LWS_WITH_CGI
1021         case LWSCM_CGI: /* we exist to handle a cgi's stdin/out/err data...
1022                          * do the callback on our master wsi
1023                          */
1024                 {
1025                         struct lws_cgi_args args;
1026
1027                         if (wsi->cgi_channel >= LWS_STDOUT &&
1028                             !(pollfd->revents & pollfd->events & LWS_POLLIN))
1029                                 break;
1030                         if (wsi->cgi_channel == LWS_STDIN &&
1031                             !(pollfd->revents & pollfd->events & LWS_POLLOUT))
1032                                 break;
1033
1034                         if (wsi->cgi_channel == LWS_STDIN)
1035                                 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
1036                                         lwsl_info("failed at set pollfd\n");
1037                                         return 1;
1038                                 }
1039
1040                         args.ch = wsi->cgi_channel;
1041                         args.stdwsi = &wsi->parent->cgi->stdwsi[0];
1042                         args.hdr_state = wsi->hdr_state;
1043
1044                         //lwsl_err("CGI LWS_STDOUT waiting wsi %p mode %d state %d\n",
1045                         //       wsi->parent, wsi->parent->mode, wsi->parent->state);
1046
1047                         if (user_callback_handle_rxflow(
1048                                         wsi->parent->protocol->callback,
1049                                         wsi->parent, LWS_CALLBACK_CGI,
1050                                         wsi->parent->user_space,
1051                                         (void *)&args, 0))
1052                                 return 1;
1053
1054                         break;
1055                 }
1056 #endif
1057         default:
1058 #ifdef LWS_NO_CLIENT
1059                 break;
1060 #else
1061                 n = lws_client_socket_service(context, wsi, pollfd);
1062                 if (n)
1063                         return 1;
1064                 goto handled;
1065 #endif
1066         }
1067
1068         n = 0;
1069         goto handled;
1070
1071 close_and_handled:
1072         lwsl_debug("Close and handled\n");
1073         lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
1074         /*
1075          * pollfd may point to something else after the close
1076          * due to pollfd swapping scheme on delete on some platforms
1077          * we can't clear revents now because it'd be the wrong guy's revents
1078          */
1079         return 1;
1080
1081 handled:
1082         pollfd->revents = 0;
1083         return n;
1084 }
1085
1086 LWS_VISIBLE int
1087 lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
1088 {
1089         return lws_service_fd_tsi(context, pollfd, 0);
1090 }
1091
1092 /**
1093  * lws_service() - Service any pending websocket activity
1094  * @context:    Websocket context
1095  * @timeout_ms: Timeout for poll; 0 means return immediately if nothing needed
1096  *              service otherwise block and service immediately, returning
1097  *              after the timeout if nothing needed service.
1098  *
1099  *      This function deals with any pending websocket traffic, for three
1100  *      kinds of event.  It handles these events on both server and client
1101  *      types of connection the same.
1102  *
1103  *      1) Accept new connections to our context's server
1104  *
1105  *      2) Call the receive callback for incoming frame data received by
1106  *          server or client connections.
1107  *
1108  *      You need to call this service function periodically to all the above
1109  *      functions to happen; if your application is single-threaded you can
1110  *      just call it in your main event loop.
1111  *
1112  *      Alternatively you can fork a new process that asynchronously handles
1113  *      calling this service in a loop.  In that case you are happy if this
1114  *      call blocks your thread until it needs to take care of something and
1115  *      would call it with a large nonzero timeout.  Your loop then takes no
1116  *      CPU while there is nothing happening.
1117  *
1118  *      If you are calling it in a single-threaded app, you don't want it to
1119  *      wait around blocking other things in your loop from happening, so you
1120  *      would call it with a timeout_ms of 0, so it returns immediately if
1121  *      nothing is pending, or as soon as it services whatever was pending.
1122  */
1123
1124 LWS_VISIBLE int
1125 lws_service(struct lws_context *context, int timeout_ms)
1126 {
1127         return lws_plat_service(context, timeout_ms);
1128 }
1129
1130 LWS_VISIBLE int
1131 lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
1132 {
1133         return lws_plat_service_tsi(context, timeout_ms, tsi);
1134 }
1135