publicly document lws_service_fd timeout servicing
[platform/upstream/libwebsockets.git] / lib / service.c
1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
5  *
6  *  This library is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU Lesser General Public
8  *  License as published by the Free Software Foundation:
9  *  version 2.1 of the License.
10  *
11  *  This library is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  Lesser General Public License for more details.
15  *
16  *  You should have received a copy of the GNU Lesser General Public
17  *  License along with this library; if not, write to the Free Software
18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19  *  MA  02110-1301  USA
20  */
21
22 #include "private-libwebsockets.h"
23
24 static int
25 lws_calllback_as_writeable(struct lws *wsi)
26 {
27         int n;
28
29         switch (wsi->mode) {
30         case LWSCM_WS_CLIENT:
31                 n = LWS_CALLBACK_CLIENT_WRITEABLE;
32                 break;
33         case LWSCM_WSCL_ISSUE_HTTP_BODY:
34                 n = LWS_CALLBACK_CLIENT_HTTP_WRITEABLE;
35                 break;
36         case LWSCM_WS_SERVING:
37                 n = LWS_CALLBACK_SERVER_WRITEABLE;
38                 break;
39         default:
40                 n = LWS_CALLBACK_HTTP_WRITEABLE;
41                 break;
42         }
43         lwsl_debug("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
44         return user_callback_handle_rxflow(wsi->protocol->callback,
45                                            wsi, (enum lws_callback_reasons) n,
46                                            wsi->user_space, NULL, 0);
47 }
48
49 int
50 lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
51 {
52         int write_type = LWS_WRITE_PONG;
53         struct lws_tokens eff_buf;
54 #ifdef LWS_USE_HTTP2
55         struct lws *wsi2;
56 #endif
57         int ret, m, n;
58
59         //lwsl_err("%s: %p\n", __func__, wsi);
60
61         /*
62          * user callback is lowest priority to get these notifications
63          * actually, since other pending things cannot be disordered
64          */
65
66         /* Priority 1: pending truncated sends are incomplete ws fragments
67          *             If anything else sent first the protocol would be
68          *             corrupted.
69          */
70         if (wsi->trunc_len) {
71                 if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
72                                   wsi->trunc_len) < 0) {
73                         lwsl_info("%s signalling to close\n", __func__);
74                         return -1;
75                 }
76                 /* leave POLLOUT active either way */
77                 return 0;
78         } else
79                 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
80                         return -1; /* retry closing now */
81
82         if (wsi->mode == LWSCM_WSCL_ISSUE_HTTP_BODY)
83                 goto user_service;
84
85
86 #ifdef LWS_USE_HTTP2
87         /* Priority 2: protocol packets
88          */
89         if (wsi->pps) {
90                 lwsl_info("servicing pps %d\n", wsi->pps);
91                 switch (wsi->pps) {
92                 case LWS_PPS_HTTP2_MY_SETTINGS:
93                 case LWS_PPS_HTTP2_ACK_SETTINGS:
94                         lws_http2_do_pps_send(lws_get_context(wsi), wsi);
95                         break;
96                 default:
97                         break;
98                 }
99                 wsi->pps = LWS_PPS_NONE;
100                 lws_rx_flow_control(wsi, 1);
101
102                 return 0; /* leave POLLOUT active */
103         }
104 #endif
105
106 #ifdef LWS_WITH_CGI
107         if (wsi->cgi)
108                 goto user_service_go_again;
109 #endif
110
111         /* Priority 3: pending control packets (pong or close)
112          */
113         if ((wsi->state == LWSS_ESTABLISHED &&
114              wsi->u.ws.ping_pending_flag) ||
115             (wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
116              wsi->u.ws.payload_is_close)) {
117
118                 if (wsi->u.ws.payload_is_close)
119                         write_type = LWS_WRITE_CLOSE;
120
121                 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
122                               wsi->u.ws.ping_payload_len, write_type);
123                 if (n < 0)
124                         return -1;
125
126                 /* well he is sent, mark him done */
127                 wsi->u.ws.ping_pending_flag = 0;
128                 if (wsi->u.ws.payload_is_close)
129                         /* oh... a close frame was it... then we are done */
130                         return -1;
131
132                 /* otherwise for PING, leave POLLOUT active either way */
133                 return 0;
134         }
135
136         if (wsi->state == LWSS_ESTABLISHED &&
137             !wsi->socket_is_permanently_unusable &&
138             wsi->u.ws.send_check_ping) {
139
140                 lwsl_info("issuing ping on wsi %p\n", wsi);
141                 wsi->u.ws.send_check_ping = 0;
142                 n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
143                               0, LWS_WRITE_PING);
144                 if (n < 0)
145                         return -1;
146
147                 /*
148                  * we apparently were able to send the PING in a reasonable time
149                  * now reset the clock on our peer to be able to send the
150                  * PONG in a reasonable time.
151                  */
152
153                 lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_GET_PONG,
154                                 wsi->context->timeout_secs);
155
156                 return 0;
157         }
158
159         /* Priority 4: if we are closing, not allowed to send more data frags
160          *             which means user callback or tx ext flush banned now
161          */
162         if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
163                 goto user_service;
164
165         /* Priority 5: Tx path extension with more to send
166          *
167          *             These are handled as new fragments each time around
168          *             So while we must block new writeable callback to enforce
169          *             payload ordering, but since they are always complete
170          *             fragments control packets can interleave OK.
171          */
172         if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
173                 lwsl_ext("SERVICING TX EXT DRAINING\n");
174                 if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
175                         return -1;
176                 /* leave POLLOUT active */
177                 return 0;
178         }
179
180         /* Priority 6: user can get the callback
181          */
182         m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
183         if (m)
184                 return -1;
185 #ifndef LWS_NO_EXTENSIONS
186         if (!wsi->extension_data_pending)
187                 goto user_service;
188 #endif
189         /*
190          * check in on the active extensions, see if they
191          * had pending stuff to spill... they need to get the
192          * first look-in otherwise sequence will be disordered
193          *
194          * NULL, zero-length eff_buf means just spill pending
195          */
196
197         ret = 1;
198         while (ret == 1) {
199
200                 /* default to nobody has more to spill */
201
202                 ret = 0;
203                 eff_buf.token = NULL;
204                 eff_buf.token_len = 0;
205
206                 /* give every extension a chance to spill */
207
208                 m = lws_ext_cb_active(wsi,
209                                         LWS_EXT_CB_PACKET_TX_PRESEND,
210                                                &eff_buf, 0);
211                 if (m < 0) {
212                         lwsl_err("ext reports fatal error\n");
213                         return -1;
214                 }
215                 if (m)
216                         /*
217                          * at least one extension told us he has more
218                          * to spill, so we will go around again after
219                          */
220                         ret = 1;
221
222                 /* assuming they gave us something to send, send it */
223
224                 if (eff_buf.token_len) {
225                         n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
226                                           eff_buf.token_len);
227                         if (n < 0) {
228                                 lwsl_info("closing from POLLOUT spill\n");
229                                 return -1;
230                         }
231                         /*
232                          * Keep amount spilled small to minimize chance of this
233                          */
234                         if (n != eff_buf.token_len) {
235                                 lwsl_err("Unable to spill ext %d vs %s\n",
236                                                           eff_buf.token_len, n);
237                                 return -1;
238                         }
239                 } else
240                         continue;
241
242                 /* no extension has more to spill */
243
244                 if (!ret)
245                         continue;
246
247                 /*
248                  * There's more to spill from an extension, but we just sent
249                  * something... did that leave the pipe choked?
250                  */
251
252                 if (!lws_send_pipe_choked(wsi))
253                         /* no we could add more */
254                         continue;
255
256                 lwsl_info("choked in POLLOUT service\n");
257
258                 /*
259                  * Yes, he's choked.  Leave the POLLOUT masked on so we will
260                  * come back here when he is unchoked.  Don't call the user
261                  * callback to enforce ordering of spilling, he'll get called
262                  * when we come back here and there's nothing more to spill.
263                  */
264
265                 return 0;
266         }
267 #ifndef LWS_NO_EXTENSIONS
268         wsi->extension_data_pending = 0;
269 #endif
270 user_service:
271         /* one shot */
272
273         if (pollfd)
274                 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
275                         lwsl_info("failed at set pollfd\n");
276                         return 1;
277                 }
278
279         if (wsi->mode != LWSCM_WSCL_ISSUE_HTTP_BODY &&
280             !wsi->hdr_parsing_completed)
281                 return 0;
282
283 #ifdef LWS_WITH_CGI
284 user_service_go_again:
285 #endif
286
287 #ifdef LWS_USE_HTTP2
288         /*
289          * we are the 'network wsi' for potentially many muxed child wsi with
290          * no network connection of their own, who have to use us for all their
291          * network actions.  So we use a round-robin scheme to share out the
292          * POLLOUT notifications to our children.
293          *
294          * But because any child could exhaust the socket's ability to take
295          * writes, we can only let one child get notified each time.
296          *
297          * In addition children may be closed / deleted / added between POLLOUT
298          * notifications, so we can't hold pointers
299          */
300
301         if (wsi->mode != LWSCM_HTTP2_SERVING) {
302                 lwsl_info("%s: non http2\n", __func__);
303                 goto notify;
304         }
305
306         wsi->u.http2.requested_POLLOUT = 0;
307         if (!wsi->u.http2.initialized) {
308                 lwsl_info("pollout on uninitialized http2 conn\n");
309                 return 0;
310         }
311
312         lwsl_info("%s: doing children\n", __func__);
313
314         wsi2 = wsi;
315         do {
316                 wsi2 = wsi2->u.http2.next_child_wsi;
317                 lwsl_info("%s: child %p\n", __func__, wsi2);
318                 if (!wsi2)
319                         continue;
320                 if (!wsi2->u.http2.requested_POLLOUT)
321                         continue;
322                 wsi2->u.http2.requested_POLLOUT = 0;
323                 if (lws_calllback_as_writeable(wsi2)) {
324                         lwsl_debug("Closing POLLOUT child\n");
325                         lws_close_free_wsi(wsi2, LWS_CLOSE_STATUS_NOSTATUS);
326                 }
327                 wsi2 = wsi;
328         } while (wsi2 != NULL && !lws_send_pipe_choked(wsi));
329
330         lwsl_info("%s: completed\n", __func__);
331
332         return 0;
333 notify:
334 #endif
335         return lws_calllback_as_writeable(wsi);
336 }
337
338 int
339 lws_service_timeout_check(struct lws *wsi, unsigned int sec)
340 {
341 //#if LWS_POSIX
342         struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
343         int n = 0;
344 //#endif
345
346         (void)n;
347
348         /*
349          * if extensions want in on it (eg, we are a mux parent)
350          * give them a chance to service child timeouts
351          */
352         if (lws_ext_cb_active(wsi, LWS_EXT_CB_1HZ, NULL, sec) < 0)
353                 return 0;
354
355         if (!wsi->pending_timeout)
356                 return 0;
357
358         /*
359          * if we went beyond the allowed time, kill the
360          * connection
361          */
362         if ((time_t)sec > wsi->pending_timeout_limit) {
363 //#if LWS_POSIX
364                 if (wsi->sock != LWS_SOCK_INVALID && wsi->position_in_fds_table >= 0)
365                         n = pt->fds[wsi->position_in_fds_table].events;
366
367                 /* no need to log normal idle keepalive timeout */
368                 if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
369                         lwsl_notice("wsi %p: TIMEDOUT WAITING on %d (did hdr %d, ah %p, wl %d, pfd events %d)\n",
370                             (void *)wsi, wsi->pending_timeout,
371                             wsi->hdr_parsing_completed, wsi->u.hdr.ah,
372                             pt->ah_wait_list_length, n);
373 //#endif
374                 /*
375                  * Since he failed a timeout, he already had a chance to do
376                  * something and was unable to... that includes situations like
377                  * half closed connections.  So process this "failed timeout"
378                  * close as a violent death and don't try to do protocol
379                  * cleanup like flush partials.
380                  */
381                 wsi->socket_is_permanently_unusable = 1;
382                 if (wsi->mode == LWSCM_WSCL_WAITING_SSL)
383                         wsi->vhost->protocols[0].callback(wsi,
384                                 LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
385                                 wsi->user_space, (void *)"Timed out waiting SSL", 21);
386
387                 lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
388
389                 return 1;
390         }
391
392         return 0;
393 }
394
395 int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
396 {
397         /* his RX is flowcontrolled, don't send remaining now */
398         if (wsi->rxflow_buffer) {
399                 /* rxflow while we were spilling prev rxflow */
400                 lwsl_info("stalling in existing rxflow buf\n");
401                 return 1;
402         }
403
404         /* a new rxflow, buffer it and warn caller */
405         lwsl_info("new rxflow input buffer len %d\n", len - n);
406         wsi->rxflow_buffer = lws_malloc(len - n);
407         if (!wsi->rxflow_buffer)
408                 return -1;
409         wsi->rxflow_len = len - n;
410         wsi->rxflow_pos = 0;
411         memcpy(wsi->rxflow_buffer, buf + n, len - n);
412
413         return 0;
414 }
415
416 /* this is used by the platform service code to stop us waiting for network
417  * activity in poll() when we have something that already needs service
418  */
419
420 LWS_VISIBLE LWS_EXTERN int
421 lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
422 {
423         struct lws_context_per_thread *pt = &context->pt[tsi];
424         int n;
425
426         /* Figure out if we really want to wait in poll()
427          * We only need to wait if really nothing already to do and we have
428          * to wait for something from network
429          */
430
431         /* 1) if we know we are draining rx ext, do not wait in poll */
432         if (pt->rx_draining_ext_list)
433                 return 0;
434
435 #ifdef LWS_OPENSSL_SUPPORT
436         /* 2) if we know we have non-network pending data, do not wait in poll */
437         if (lws_ssl_anybody_has_buffered_read_tsi(context, tsi)) {
438                 lwsl_info("ssl buffered read\n");
439                 return 0;
440         }
441 #endif
442
443         /* 3) if any ah has pending rx, do not wait in poll */
444         for (n = 0; n < context->max_http_header_pool; n++)
445                 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen) {
446                         /* any ah with pending rx must be attached to someone */
447                         if (!pt->ah_pool[n].wsi) {
448                                 lwsl_err("%s: assert: no wsi attached to ah\n", __func__);
449                                 assert(0);
450                         }
451                         return 0;
452                 }
453
454         return timeout_ms;
455 }
456
457 /*
458  * guys that need POLLIN service again without waiting for network action
459  * can force POLLIN here if not flowcontrolled, so they will get service.
460  *
461  * Return nonzero if anybody got their POLLIN faked
462  */
463 int
464 lws_service_flag_pending(struct lws_context *context, int tsi)
465 {
466         struct lws_context_per_thread *pt = &context->pt[tsi];
467 #ifdef LWS_OPENSSL_SUPPORT
468         struct lws *wsi_next;
469 #endif
470         struct lws *wsi;
471         int forced = 0;
472         int n;
473
474         /* POLLIN faking */
475
476         /*
477          * 1) For all guys with already-available ext data to drain, if they are
478          * not flowcontrolled, fake their POLLIN status
479          */
480         wsi = pt->rx_draining_ext_list;
481         while (wsi) {
482                 pt->fds[wsi->position_in_fds_table].revents |=
483                         pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
484                 if (pt->fds[wsi->position_in_fds_table].revents &
485                     LWS_POLLIN)
486                         forced = 1;
487                 wsi = wsi->u.ws.rx_draining_ext_list;
488         }
489
490 #ifdef LWS_OPENSSL_SUPPORT
491         /*
492          * 2) For all guys with buffered SSL read data already saved up, if they
493          * are not flowcontrolled, fake their POLLIN status so they'll get
494          * service to use up the buffered incoming data, even though their
495          * network socket may have nothing
496          */
497         wsi = pt->pending_read_list;
498         while (wsi) {
499                 wsi_next = wsi->pending_read_list_next;
500                 pt->fds[wsi->position_in_fds_table].revents |=
501                         pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
502                 if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
503                         forced = 1;
504                         /*
505                          * he's going to get serviced now, take him off the
506                          * list of guys with buffered SSL.  If he still has some
507                          * at the end of the service, he'll get put back on the
508                          * list then.
509                          */
510                         lws_ssl_remove_wsi_from_buffered_list(wsi);
511                 }
512
513                 wsi = wsi_next;
514         }
515 #endif
516         /*
517          * 3) For any wsi who have an ah with pending RX who did not
518          * complete their current headers, and are not flowcontrolled,
519          * fake their POLLIN status so they will be able to drain the
520          * rx buffered in the ah
521          */
522         for (n = 0; n < context->max_http_header_pool; n++)
523                 if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen &&
524                     !pt->ah_pool[n].wsi->hdr_parsing_completed) {
525                         pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |=
526                                 pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events &
527                                         LWS_POLLIN;
528                         if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents &
529                             LWS_POLLIN)
530                                 forced = 1;
531                 }
532
533         return forced;
534 }
535
536 #ifndef LWS_NO_CLIENT
537
538 LWS_VISIBLE int
539 lws_http_client_read(struct lws *wsi, char **buf, int *len)
540 {
541         int rlen, n;
542
543
544
545         rlen = lws_ssl_capable_read(wsi, (unsigned char *)*buf, *len);
546         if (rlen < 0)
547                 return -1;
548
549         *len = rlen;
550         if (rlen == 0)
551                 return 0;
552
553 //      lwsl_err("%s: read %d\n", __func__, rlen);
554
555         /* allow the source to signal he has data again next time */
556         wsi->client_rx_avail = 0;
557         lws_change_pollfd(wsi, 0, LWS_POLLIN);
558
559         /*
560          * server may insist on transfer-encoding: chunked,
561          * so http client must deal with it
562          */
563 spin_chunks:
564         while (wsi->chunked && (wsi->chunk_parser != ELCP_CONTENT) && *len) {
565                 switch (wsi->chunk_parser) {
566                 case ELCP_HEX:
567                         if ((*buf)[0] == '\x0d') {
568                                 wsi->chunk_parser = ELCP_CR;
569                                 break;
570                         }
571                         n = char_to_hex((*buf)[0]);
572                         if (n < 0)
573                                 return -1;
574                         wsi->chunk_remaining <<= 4;
575                         wsi->chunk_remaining |= n;
576                         break;
577                 case ELCP_CR:
578                         if ((*buf)[0] != '\x0a')
579                                 return -1;
580                         wsi->chunk_parser = ELCP_CONTENT;
581                         lwsl_info("chunk %d\n", wsi->chunk_remaining);
582                         if (wsi->chunk_remaining)
583                                 break;
584                         lwsl_info("final chunk\n");
585                         goto completed;
586
587                 case ELCP_CONTENT:
588                         break;
589
590                 case ELCP_POST_CR:
591                         if ((*buf)[0] != '\x0d')
592                                 return -1;
593
594                         wsi->chunk_parser = ELCP_POST_LF;
595                         break;
596
597                 case ELCP_POST_LF:
598                         if ((*buf)[0] != '\x0a')
599                                 return -1;
600
601                         wsi->chunk_parser = ELCP_HEX;
602                         wsi->chunk_remaining = 0;
603                         break;
604                 }
605                 (*buf)++;
606                 (*len)--;
607         }
608
609         if (wsi->chunked && !wsi->chunk_remaining)
610                 return 0;
611
612         if (wsi->u.http.content_remain &&
613             (int)wsi->u.http.content_remain < *len)
614                 n = wsi->u.http.content_remain;
615         else
616                 n = *len;
617
618         if (wsi->chunked && wsi->chunk_remaining &&
619             wsi->chunk_remaining < n)
620                 n = wsi->chunk_remaining;
621
622 #ifdef LWS_WITH_HTTP_PROXY
623         /* hubbub */
624         if (wsi->perform_rewrite)
625                 lws_rewrite_parse(wsi->rw, (unsigned char *)*buf, n);
626         else
627 #endif
628                 if (user_callback_handle_rxflow(wsi->protocol->callback,
629                                 wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
630                                 wsi->user_space, *buf, n))
631                         return -1;
632
633         if (wsi->chunked && wsi->chunk_remaining) {
634                 (*buf) += n;
635                 wsi->chunk_remaining -= n;
636                 *len -= n;
637         }
638
639         if (wsi->chunked && !wsi->chunk_remaining)
640                 wsi->chunk_parser = ELCP_POST_CR;
641
642         if (wsi->chunked && *len) {
643                 goto spin_chunks;
644         }
645
646         if (wsi->chunked)
647                 return 0;
648
649         wsi->u.http.content_remain -= n;
650         if (wsi->u.http.content_remain || !wsi->u.http.content_length)
651                 return 0;
652
653 completed:
654         if (user_callback_handle_rxflow(wsi->protocol->callback,
655                         wsi, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
656                         wsi->user_space, NULL, 0))
657                 return -1;
658
659         if (lws_http_transaction_completed_client(wsi))
660                 return -1;
661
662         return 0;
663 }
664 #endif
665
666 LWS_VISIBLE int
667 lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
668 {
669         struct lws_context_per_thread *pt = &context->pt[tsi];
670         lws_sockfd_type our_fd = 0, tmp_fd;
671         struct lws_tokens eff_buf;
672         unsigned int pending = 0;
673         struct lws *wsi, *wsi1;
674         char draining_flow = 0;
675         int timed_out = 0;
676         time_t now;
677         int n = 0, m;
678         int more;
679
680         if (!context->protocol_init_done)
681                 lws_protocol_init(context);
682
683         time(&now);
684
685         /*
686          * handle case that system time was uninitialized when lws started
687          * at boot, and got initialized a little later
688          */
689         if (context->time_up < 1464083026 && now > 1464083026)
690                 context->time_up = now;
691
692         /* TODO: if using libev, we should probably use timeout watchers... */
693         if (context->last_timeout_check_s != now) {
694                 context->last_timeout_check_s = now;
695
696                 lws_plat_service_periodic(context);
697
698                 /* global timeout check once per second */
699
700                 if (pollfd)
701                         our_fd = pollfd->fd;
702
703                 wsi = context->pt[tsi].timeout_list;
704                 while (wsi) {
705                         /* we have to take copies, because he may be deleted */
706                         wsi1 = wsi->timeout_list;
707                         tmp_fd = wsi->sock;
708                         if (lws_service_timeout_check(wsi, (unsigned int)now)) {
709                                 /* he did time out... */
710                                 if (tmp_fd == our_fd)
711                                         /* it was the guy we came to service! */
712                                         timed_out = 1;
713                                         /* he's gone, no need to mark as handled */
714                         }
715                         wsi = wsi1;
716                 }
717 #ifdef LWS_WITH_CGI
718                 lws_cgi_kill_terminated(pt);
719 #endif
720 #if 0
721                 {
722                         char s[300], *p = s;
723
724                         for (n = 0; n < context->count_threads; n++)
725                                 p += sprintf(p, " %7lu (%5d), ",
726                                              context->pt[n].count_conns,
727                                              context->pt[n].fds_count);
728
729                         lwsl_notice("load: %s\n", s);
730                 }
731 #endif
732         }
733
734         /*
735          * at intervals, check for ws connections needing ping-pong checks
736          */
737
738         if (context->ws_ping_pong_interval &&
739             context->last_ws_ping_pong_check_s < now + 10) {
740                 context->last_ws_ping_pong_check_s = now;
741
742                 struct lws_vhost *vh = context->vhost_list;
743                 while (vh) {
744                         for (n = 0; n < vh->count_protocols; n++) {
745                                 wsi = vh->same_vh_protocol_list[n];
746
747                                 while (wsi) {
748                                         if (wsi->state == LWSS_ESTABLISHED &&
749                                             !wsi->socket_is_permanently_unusable &&
750                                             !wsi->u.ws.send_check_ping &&
751                                             wsi->u.ws.time_next_ping_check &&
752                                             wsi->u.ws.time_next_ping_check < now) {
753
754                                                 lwsl_info("requesting ping-pong on wsi %p\n", wsi);
755                                                 wsi->u.ws.send_check_ping = 1;
756                                                 lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_SEND_PING,
757                                                                 context->timeout_secs);
758                                                 lws_callback_on_writable(wsi);
759                                                 wsi->u.ws.time_next_ping_check = now +
760                                                                 wsi->context->ws_ping_pong_interval;
761                                         }
762                                         wsi = wsi->same_vh_protocol_next;
763                                 }
764                         }
765                         vh = vh->vhost_next;
766                 }
767         }
768
769         /* the socket we came to service timed out, nothing to do */
770         if (timed_out)
771                 return 0;
772
773         /* just here for timeout management? */
774         if (!pollfd)
775                 return 0;
776
777         /* no, here to service a socket descriptor */
778         wsi = wsi_from_fd(context, pollfd->fd);
779         if (!wsi)
780                 /* not lws connection ... leave revents alone and return */
781                 return 0;
782
783         /*
784          * so that caller can tell we handled, past here we need to
785          * zero down pollfd->revents after handling
786          */
787
788 #if LWS_POSIX
789         /* handle session socket closed */
790
791         if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
792             (pollfd->revents & LWS_POLLHUP)) {
793                 wsi->socket_is_permanently_unusable = 1;
794                 lwsl_debug("Session Socket %p (fd=%d) dead\n",
795                                                        (void *)wsi, pollfd->fd);
796
797                 goto close_and_handled;
798         }
799
800 #ifdef _WIN32
801         if (pollfd->revents & LWS_POLLOUT)
802                 wsi->sock_send_blocking = FALSE;
803 #endif
804
805 #endif
806
807         lwsl_debug("fd=%d, revents=%d\n", pollfd->fd, pollfd->revents);
808
809         /* okay, what we came here to do... */
810
811         switch (wsi->mode) {
812         case LWSCM_HTTP_SERVING:
813         case LWSCM_HTTP_CLIENT:
814         case LWSCM_HTTP_SERVING_ACCEPTED:
815         case LWSCM_SERVER_LISTENER:
816         case LWSCM_SSL_ACK_PENDING:
817                 if (wsi->state == LWSS_CLIENT_HTTP_ESTABLISHED)
818                         goto handled;
819
820 #ifdef LWS_WITH_CGI
821                 if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
822                         n = lws_handle_POLLOUT_event(wsi, pollfd);
823                         if (n)
824                                 goto close_and_handled;
825                         goto handled;
826                 }
827 #endif
828                 n = lws_server_socket_service(context, wsi, pollfd);
829                 if (n) /* closed by above */
830                         return 1;
831                 goto handled;
832
833         case LWSCM_WS_SERVING:
834         case LWSCM_WS_CLIENT:
835         case LWSCM_HTTP2_SERVING:
836         case LWSCM_HTTP_CLIENT_ACCEPTED:
837
838                 /* 1: something requested a callback when it was OK to write */
839
840                 if ((pollfd->revents & LWS_POLLOUT) &&
841                     (wsi->state == LWSS_ESTABLISHED ||
842                      wsi->state == LWSS_HTTP2_ESTABLISHED ||
843                      wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
844                      wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
845                      wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
846                     lws_handle_POLLOUT_event(wsi, pollfd)) {
847                         if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
848                                 wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
849                         lwsl_info("lws_service_fd: closing\n");
850                         goto close_and_handled;
851                 }
852
853                 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
854                     wsi->state == LWSS_AWAITING_CLOSE_ACK) {
855                         /*
856                          * we stopped caring about anything except control
857                          * packets.  Force flow control off, defeat tx
858                          * draining.
859                          */
860                         lws_rx_flow_control(wsi, 1);
861                         wsi->u.ws.tx_draining_ext = 0;
862                 }
863
864                 if (wsi->u.ws.tx_draining_ext)
865                         /* we cannot deal with new RX until the TX ext
866                          * path has been drained.  It's because new
867                          * rx will, eg, crap on the wsi rx buf that
868                          * may be needed to retain state.
869                          *
870                          * TX ext drain path MUST go through event loop
871                          * to avoid blocking.
872                          */
873                         break;
874
875                 if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
876                         /* We cannot deal with any kind of new RX
877                          * because we are RX-flowcontrolled.
878                          */
879                         break;
880
881                 /* 2: RX Extension needs to be drained
882                  */
883
884                 if (wsi->state == LWSS_ESTABLISHED &&
885                     wsi->u.ws.rx_draining_ext) {
886
887                         lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
888 #ifndef LWS_NO_CLIENT
889                         if (wsi->mode == LWSCM_WS_CLIENT) {
890                                 n = lws_client_rx_sm(wsi, 0);
891                                 if (n < 0)
892                                         /* we closed wsi */
893                                         n = 0;
894                         } else
895 #endif
896                                 n = lws_rx_sm(wsi, 0);
897
898                         goto handled;
899                 }
900
901                 if (wsi->u.ws.rx_draining_ext)
902                         /*
903                          * We have RX EXT content to drain, but can't do it
904                          * right now.  That means we cannot do anything lower
905                          * priority either.
906                          */
907                         break;
908
909                 /* 3: RX Flowcontrol buffer needs to be drained
910                  */
911
912                 if (wsi->rxflow_buffer) {
913                         lwsl_info("draining rxflow (len %d)\n",
914                                 wsi->rxflow_len - wsi->rxflow_pos
915                         );
916                         /* well, drain it */
917                         eff_buf.token = (char *)wsi->rxflow_buffer +
918                                                 wsi->rxflow_pos;
919                         eff_buf.token_len = wsi->rxflow_len - wsi->rxflow_pos;
920                         draining_flow = 1;
921                         goto drain;
922                 }
923
924                 /* 4: any incoming (or ah-stashed incoming rx) data ready?
925                  * notice if rx flow going off raced poll(), rx flow wins
926                  */
927
928                 if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
929                         break;
930
931 read:
932                 /* all the union members start with hdr, so even in ws mode
933                  * we can deal with the ah via u.hdr
934                  */
935                 if (wsi->u.hdr.ah) {
936                         lwsl_info("%s: %p: inherited ah rx\n", __func__, wsi);
937                         eff_buf.token_len = wsi->u.hdr.ah->rxlen -
938                                             wsi->u.hdr.ah->rxpos;
939                         eff_buf.token = (char *)wsi->u.hdr.ah->rx +
940                                         wsi->u.hdr.ah->rxpos;
941                 } else {
942                         if (wsi->mode != LWSCM_HTTP_CLIENT_ACCEPTED) {
943                                 eff_buf.token_len = lws_ssl_capable_read(wsi,
944                                         pt->serv_buf, pending ? pending :
945                                         context->pt_serv_buf_size);
946                                 switch (eff_buf.token_len) {
947                                 case 0:
948                                         lwsl_info("%s: zero length read\n", __func__);
949                                         goto close_and_handled;
950                                 case LWS_SSL_CAPABLE_MORE_SERVICE:
951                                         lwsl_info("SSL Capable more service\n");
952                                         n = 0;
953                                         goto handled;
954                                 case LWS_SSL_CAPABLE_ERROR:
955                                         lwsl_info("Closing when error\n");
956                                         goto close_and_handled;
957                                 }
958
959                                 eff_buf.token = (char *)pt->serv_buf;
960                         }
961                 }
962
963 drain:
964 #ifndef LWS_NO_CLIENT
965                 if (wsi->mode == LWSCM_HTTP_CLIENT_ACCEPTED &&
966                     !wsi->told_user_closed) {
967
968                         /*
969                          * simply mark ourselves as having readable data
970                          * and turn off our POLLIN
971                          */
972                         wsi->client_rx_avail = 1;
973                         lws_change_pollfd(wsi, LWS_POLLIN, 0);
974
975                         /* let user code know, he'll usually ask for writeable
976                          * callback and drain / re-enable it there
977                          */
978                         if (user_callback_handle_rxflow(
979                                         wsi->protocol->callback,
980                                         wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
981                                         wsi->user_space, NULL, 0))
982                                 goto close_and_handled;
983                 }
984 #endif
985                 /*
986                  * give any active extensions a chance to munge the buffer
987                  * before parse.  We pass in a pointer to an lws_tokens struct
988                  * prepared with the default buffer and content length that's in
989                  * there.  Rather than rewrite the default buffer, extensions
990                  * that expect to grow the buffer can adapt .token to
991                  * point to their own per-connection buffer in the extension
992                  * user allocation.  By default with no extensions or no
993                  * extension callback handling, just the normal input buffer is
994                  * used then so it is efficient.
995                  */
996                 do {
997                         more = 0;
998
999                         m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
1000                                               &eff_buf, 0);
1001                         if (m < 0)
1002                                 goto close_and_handled;
1003                         if (m)
1004                                 more = 1;
1005
1006                         /* service incoming data */
1007
1008                         if (eff_buf.token_len) {
1009                                 /*
1010                                  * if draining from rxflow buffer, not
1011                                  * critical to track what was used since at the
1012                                  * use it bumps wsi->rxflow_pos.  If we come
1013                                  * around again it will pick up from where it
1014                                  * left off.
1015                                  */
1016                                 n = lws_read(wsi, (unsigned char *)eff_buf.token,
1017                                              eff_buf.token_len);
1018                                 if (n < 0) {
1019                                         /* we closed wsi */
1020                                         n = 0;
1021                                         goto handled;
1022                                 }
1023                         }
1024
1025                         eff_buf.token = NULL;
1026                         eff_buf.token_len = 0;
1027                 } while (more);
1028
1029                 if (wsi->u.hdr.ah) {
1030                         lwsl_notice("%s: %p: detaching\n",
1031                                  __func__, wsi);
1032                         /* show we used all the pending rx up */
1033                         wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
1034                         /* we can run the normal ah detach flow despite
1035                          * being in ws union mode, since all union members
1036                          * start with hdr */
1037                         lws_header_table_detach(wsi, 0);
1038                 }
1039
1040                 pending = lws_ssl_pending(wsi);
1041                 if (pending) {
1042                         pending = pending > context->pt_serv_buf_size ?
1043                                         context->pt_serv_buf_size : pending;
1044                         goto read;
1045                 }
1046
1047                 if (draining_flow && wsi->rxflow_buffer &&
1048                     wsi->rxflow_pos == wsi->rxflow_len) {
1049                         lwsl_info("flow buffer: drained\n");
1050                         lws_free_set_NULL(wsi->rxflow_buffer);
1051                         /* having drained the rxflow buffer, can rearm POLLIN */
1052 #ifdef LWS_NO_SERVER
1053                         n =
1054 #endif
1055                         _lws_rx_flow_control(wsi);
1056                         /* n ignored, needed for NO_SERVER case */
1057                 }
1058
1059                 break;
1060 #ifdef LWS_WITH_CGI
1061         case LWSCM_CGI: /* we exist to handle a cgi's stdin/out/err data...
1062                          * do the callback on our master wsi
1063                          */
1064                 {
1065                         struct lws_cgi_args args;
1066
1067                         if (wsi->cgi_channel >= LWS_STDOUT &&
1068                             !(pollfd->revents & pollfd->events & LWS_POLLIN))
1069                                 break;
1070                         if (wsi->cgi_channel == LWS_STDIN &&
1071                             !(pollfd->revents & pollfd->events & LWS_POLLOUT))
1072                                 break;
1073
1074                         if (wsi->cgi_channel == LWS_STDIN)
1075                                 if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
1076                                         lwsl_info("failed at set pollfd\n");
1077                                         return 1;
1078                                 }
1079
1080                         args.ch = wsi->cgi_channel;
1081                         args.stdwsi = &wsi->parent->cgi->stdwsi[0];
1082                         args.hdr_state = wsi->hdr_state;
1083
1084                         //lwsl_err("CGI LWS_STDOUT waiting wsi %p mode %d state %d\n",
1085                         //       wsi->parent, wsi->parent->mode, wsi->parent->state);
1086
1087                         if (user_callback_handle_rxflow(
1088                                         wsi->parent->protocol->callback,
1089                                         wsi->parent, LWS_CALLBACK_CGI,
1090                                         wsi->parent->user_space,
1091                                         (void *)&args, 0))
1092                                 return 1;
1093
1094                         break;
1095                 }
1096 #endif
1097         default:
1098 #ifdef LWS_NO_CLIENT
1099                 break;
1100 #else
1101                 if ((pollfd->revents & LWS_POLLOUT) &&
1102                     lws_handle_POLLOUT_event(wsi, pollfd))
1103                         goto close_and_handled;
1104
1105                 n = lws_client_socket_service(context, wsi, pollfd);
1106                 if (n)
1107                         return 1;
1108                 goto handled;
1109 #endif
1110         }
1111
1112         n = 0;
1113         goto handled;
1114
1115 close_and_handled:
1116         lwsl_debug("Close and handled\n");
1117         lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
1118         /*
1119          * pollfd may point to something else after the close
1120          * due to pollfd swapping scheme on delete on some platforms
1121          * we can't clear revents now because it'd be the wrong guy's revents
1122          */
1123         return 1;
1124
1125 handled:
1126         pollfd->revents = 0;
1127         return n;
1128 }
1129
1130 LWS_VISIBLE int
1131 lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
1132 {
1133         return lws_service_fd_tsi(context, pollfd, 0);
1134 }
1135
1136 LWS_VISIBLE int
1137 lws_service(struct lws_context *context, int timeout_ms)
1138 {
1139         return lws_plat_service(context, timeout_ms);
1140 }
1141
1142 LWS_VISIBLE int
1143 lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
1144 {
1145         return lws_plat_service_tsi(context, timeout_ms, tsi);
1146 }
1147