2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2013 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
24 const unsigned char lextable[] = {
28 #define FAIL_CHAR 0x08
30 int LWS_WARN_UNUSED_RESULT
31 lextable_decode(int pos, char c)
33 if (c >= 'A' && c <= 'Z')
37 if (lextable[pos] & (1 << 7)) { /* 1-byte, fail on mismatch */
38 if ((lextable[pos] & 0x7f) != c)
42 if (lextable[pos] == FAIL_CHAR)
47 if (lextable[pos] == FAIL_CHAR)
50 /* b7 = 0, end or 3-byte */
51 if (lextable[pos] < FAIL_CHAR) /* terminal marker */
54 if (lextable[pos] == c) /* goto */
55 return pos + (lextable[pos + 1]) +
56 (lextable[pos + 2] << 8);
64 _lws_header_table_reset(struct allocated_headers *ah)
66 /* init the ah to reflect no headers or data have appeared yet */
67 memset(ah->frag_index, 0, sizeof(ah->frag_index));
70 ah->http_response = 0;
73 // doesn't scrub the ah rxbuffer by default, parent must do if needed
76 lws_header_table_reset(struct lws *wsi, int autoservice)
78 struct allocated_headers *ah = wsi->u.hdr.ah;
79 struct lws_context_per_thread *pt;
80 struct lws_pollfd *pfd;
82 /* if we have the idea we're resetting 'our' ah, must be bound to one */
84 /* ah also concurs with ownership */
85 assert(ah->wsi == wsi);
87 _lws_header_table_reset(ah);
89 wsi->u.hdr.parser_state = WSI_TOKEN_NAME_PART;
90 wsi->u.hdr.lextable_pos = 0;
92 /* since we will restart the ah, our new headers are not completed */
93 wsi->hdr_parsing_completed = 0;
96 * if we inherited pending rx (from socket adoption deferred
97 * processing), apply and free it.
99 if (wsi->u.hdr.preamble_rx) {
100 memcpy(ah->rx, wsi->u.hdr.preamble_rx,
101 wsi->u.hdr.preamble_rx_len);
102 ah->rxlen = wsi->u.hdr.preamble_rx_len;
103 lws_free_set_NULL(wsi->u.hdr.preamble_rx);
106 lwsl_notice("%s: calling service on readbuf ah\n", __func__);
108 pt = &wsi->context->pt[(int)wsi->tsi];
110 /* unlike a normal connect, we have the headers already
111 * (or the first part of them anyway)
113 pfd = &pt->fds[wsi->position_in_fds_table];
114 pfd->revents |= LWS_POLLIN;
115 lwsl_err("%s: calling service\n", __func__);
116 lws_service_fd_tsi(wsi->context, pfd, wsi->tsi);
121 int LWS_WARN_UNUSED_RESULT
122 lws_header_table_attach(struct lws *wsi, int autoservice)
124 struct lws_context *context = wsi->context;
125 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
126 struct lws_pollargs pa;
130 lwsl_info("%s: wsi %p: ah %p (tsi %d, count = %d) in\n", __func__, (void *)wsi,
131 (void *)wsi->u.hdr.ah, wsi->tsi, pt->ah_count_in_use);
133 /* if we are already bound to one, just clear it down */
135 lwsl_info("cleardown\n");
140 pwsi = &pt->ah_wait_list;
143 /* if already waiting on list, if no new ah just ret */
144 if (pt->ah_count_in_use ==
145 context->max_http_header_pool) {
146 lwsl_notice("%s: no free ah to attach\n", __func__);
149 /* new ah.... remove ourselves from waiting list */
150 *pwsi = wsi->u.hdr.ah_wait_list; /* set our prev to our next */
151 wsi->u.hdr.ah_wait_list = NULL; /* no next any more */
152 pt->ah_wait_list_length--;
155 pwsi = &(*pwsi)->u.hdr.ah_wait_list;
158 * pool is all busy... add us to waiting list and return that we
159 * weren't able to deliver it right now
161 if (pt->ah_count_in_use == context->max_http_header_pool) {
162 lwsl_info("%s: adding %p to ah waiting list\n", __func__, wsi);
163 wsi->u.hdr.ah_wait_list = pt->ah_wait_list;
164 pt->ah_wait_list = wsi;
165 pt->ah_wait_list_length++;
167 /* we cannot accept input then */
169 _lws_change_pollfd(wsi, LWS_POLLIN, 0, &pa);
173 for (n = 0; n < context->max_http_header_pool; n++)
174 if (!pt->ah_pool[n].in_use)
177 /* if the count of in use said something free... */
178 assert(n != context->max_http_header_pool);
180 wsi->u.hdr.ah = &pt->ah_pool[n];
181 wsi->u.hdr.ah->in_use = 1;
182 pt->ah_pool[n].wsi = wsi; /* mark our owner */
183 pt->ah_count_in_use++;
185 _lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
187 lwsl_info("%s: did attach wsi %p: ah %p: count %d (on exit)\n", __func__,
188 (void *)wsi, (void *)wsi->u.hdr.ah, pt->ah_count_in_use);
194 /* and reset the rx state */
195 wsi->u.hdr.ah->rxpos = 0;
196 wsi->u.hdr.ah->rxlen = 0;
198 lws_header_table_reset(wsi, autoservice);
199 time(&wsi->u.hdr.ah->assigned);
201 #ifndef LWS_NO_CLIENT
202 if (wsi->state == LWSS_CLIENT_UNCONNECTED)
203 if (!lws_client_connect_via_info2(wsi))
204 /* our client connect has failed, the wsi
218 int lws_header_table_detach(struct lws *wsi, int autoservice)
220 struct lws_context *context = wsi->context;
221 struct allocated_headers *ah = wsi->u.hdr.ah;
222 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
223 struct lws_pollargs pa;
227 lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
228 (void *)wsi, (void *)ah, wsi->tsi,
229 pt->ah_count_in_use);
231 if (wsi->u.hdr.preamble_rx)
232 lws_free_set_NULL(wsi->u.hdr.preamble_rx);
234 /* may not be detached while he still has unprocessed rx */
235 if (ah && ah->rxpos != ah->rxlen) {
236 lwsl_err("%s: %p: CANNOT DETACH rxpos:%d, rxlen:%d\n", __func__, wsi,
237 ah->rxpos, ah->rxlen);
238 assert(ah->rxpos == ah->rxlen);
245 pwsi = &pt->ah_wait_list;
246 if (!ah) { /* remove from wait list if none attached */
249 lwsl_info("%s: wsi %p, remv wait\n",
251 *pwsi = wsi->u.hdr.ah_wait_list;
252 wsi->u.hdr.ah_wait_list = NULL;
253 pt->ah_wait_list_length--;
256 pwsi = &(*pwsi)->u.hdr.ah_wait_list;
258 /* no ah, not on list... no more business here */
261 /* we did have an ah attached */
263 if (ah->assigned && now - ah->assigned > 3) {
265 * we're detaching the ah, but it was held an
266 * unreasonably long time
268 lwsl_notice("%s: wsi %p: ah held %ds, "
269 "ah.rxpos %d, ah.rxlen %d, mode/state %d %d,"
270 "wsi->more_rx_waiting %d\n", __func__, wsi,
271 (int)(now - ah->assigned),
272 ah->rxpos, ah->rxlen, wsi->mode, wsi->state,
273 wsi->more_rx_waiting);
278 /* if we think we're detaching one, there should be one in use */
279 assert(pt->ah_count_in_use > 0);
280 /* and this specific one should have been in use */
282 wsi->u.hdr.ah = NULL;
283 ah->wsi = NULL; /* no owner */
285 /* oh there is nobody on the waiting list... leave it at that then */
288 pt->ah_count_in_use--;
293 /* somebody else on same tsi is waiting, give it to oldest guy */
295 lwsl_info("pt wait list %p\n", *pwsi);
296 while ((*pwsi)->u.hdr.ah_wait_list)
297 pwsi = &(*pwsi)->u.hdr.ah_wait_list;
300 lwsl_info("last wsi in wait list %p\n", wsi);
303 ah->wsi = wsi; /* new owner */
304 /* and reset the rx state */
307 lws_header_table_reset(wsi, autoservice);
308 time(&wsi->u.hdr.ah->assigned);
310 /* clients acquire the ah and then insert themselves in fds table... */
311 if (wsi->position_in_fds_table != -1) {
312 lwsl_info("%s: Enabling %p POLLIN\n", __func__, wsi);
314 /* he has been stuck waiting for an ah, but now his wait is over,
317 _lws_change_pollfd(wsi, 0, LWS_POLLIN, &pa);
320 /* point prev guy to next guy in list instead */
321 *pwsi = wsi->u.hdr.ah_wait_list;
322 /* the guy who got one is out of the list */
323 wsi->u.hdr.ah_wait_list = NULL;
324 pt->ah_wait_list_length--;
326 #ifndef LWS_NO_CLIENT
327 if (wsi->state == LWSS_CLIENT_UNCONNECTED) {
330 if (!lws_client_connect_via_info2(wsi)) {
331 /* our client connect has failed, the wsi
341 assert(!!pt->ah_wait_list_length == !!(int)(long)pt->ah_wait_list);
343 lwsl_info("%s: wsi %p: ah %p (tsi=%d, count = %d)\n", __func__,
344 (void *)wsi, (void *)ah, wsi->tsi,
345 pt->ah_count_in_use);
352 lws_hdr_fragment_length(struct lws *wsi, enum lws_token_indexes h, int frag_idx)
356 n = wsi->u.hdr.ah->frag_index[h];
361 return wsi->u.hdr.ah->frags[n].len;
362 n = wsi->u.hdr.ah->frags[n].nfrag;
363 } while (frag_idx-- && n);
368 LWS_VISIBLE int lws_hdr_total_length(struct lws *wsi, enum lws_token_indexes h)
373 n = wsi->u.hdr.ah->frag_index[h];
377 len += wsi->u.hdr.ah->frags[n].len;
378 n = wsi->u.hdr.ah->frags[n].nfrag;
384 LWS_VISIBLE int lws_hdr_copy_fragment(struct lws *wsi, char *dst, int len,
385 enum lws_token_indexes h, int frag_idx)
388 int f = wsi->u.hdr.ah->frag_index[h];
393 while (n < frag_idx) {
394 f = wsi->u.hdr.ah->frags[f].nfrag;
400 if (wsi->u.hdr.ah->frags[f].len >= len)
403 memcpy(dst, wsi->u.hdr.ah->data + wsi->u.hdr.ah->frags[f].offset,
404 wsi->u.hdr.ah->frags[f].len);
405 dst[wsi->u.hdr.ah->frags[f].len] = '\0';
407 return wsi->u.hdr.ah->frags[f].len;
410 LWS_VISIBLE int lws_hdr_copy(struct lws *wsi, char *dst, int len,
411 enum lws_token_indexes h)
413 int toklen = lws_hdr_total_length(wsi, h);
419 n = wsi->u.hdr.ah->frag_index[h];
424 strcpy(dst, &wsi->u.hdr.ah->data[wsi->u.hdr.ah->frags[n].offset]);
425 dst += wsi->u.hdr.ah->frags[n].len;
426 n = wsi->u.hdr.ah->frags[n].nfrag;
432 char *lws_hdr_simple_ptr(struct lws *wsi, enum lws_token_indexes h)
436 n = wsi->u.hdr.ah->frag_index[h];
440 return wsi->u.hdr.ah->data + wsi->u.hdr.ah->frags[n].offset;
443 int LWS_WARN_UNUSED_RESULT
444 lws_pos_in_bounds(struct lws *wsi)
446 if (wsi->u.hdr.ah->pos < (unsigned int)wsi->context->max_http_header_data)
449 if (wsi->u.hdr.ah->pos == wsi->context->max_http_header_data) {
450 lwsl_err("Ran out of header data space\n");
455 * with these tests everywhere, it should never be able to exceed
456 * the limit, only meet the limit
459 lwsl_err("%s: pos %d, limit %d\n", __func__, wsi->u.hdr.ah->pos,
460 wsi->context->max_http_header_data);
466 int LWS_WARN_UNUSED_RESULT
467 lws_hdr_simple_create(struct lws *wsi, enum lws_token_indexes h, const char *s)
469 wsi->u.hdr.ah->nfrag++;
470 if (wsi->u.hdr.ah->nfrag == ARRAY_SIZE(wsi->u.hdr.ah->frags)) {
471 lwsl_warn("More hdr frags than we can deal with, dropping\n");
475 wsi->u.hdr.ah->frag_index[h] = wsi->u.hdr.ah->nfrag;
477 wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].offset = wsi->u.hdr.ah->pos;
478 wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len = 0;
479 wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].nfrag = 0;
482 if (lws_pos_in_bounds(wsi))
485 wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = *s;
487 wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len++;
493 signed char char_to_hex(const char c)
495 if (c >= '0' && c <= '9')
498 if (c >= 'a' && c <= 'f')
501 if (c >= 'A' && c <= 'F')
507 static int LWS_WARN_UNUSED_RESULT
508 issue_char(struct lws *wsi, unsigned char c)
510 unsigned short frag_len;
512 if (lws_pos_in_bounds(wsi))
515 frag_len = wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len;
517 * If we haven't hit the token limit, just copy the character into
520 if (frag_len < wsi->u.hdr.current_token_limit) {
521 wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = c;
523 wsi->u.hdr.ah->frags[wsi->u.hdr.ah->nfrag].len++;
527 /* Insert a null character when we *hit* the limit: */
528 if (frag_len == wsi->u.hdr.current_token_limit) {
529 if (lws_pos_in_bounds(wsi))
531 wsi->u.hdr.ah->data[wsi->u.hdr.ah->pos++] = '\0';
532 lwsl_warn("header %i exceeds limit %d\n",
533 wsi->u.hdr.parser_state,
534 wsi->u.hdr.current_token_limit);
540 int LWS_WARN_UNUSED_RESULT
541 lws_parse(struct lws *wsi, unsigned char c)
543 static const unsigned char methods[] = {
546 WSI_TOKEN_OPTIONS_URI,
549 WSI_TOKEN_DELETE_URI,
552 struct allocated_headers *ah = wsi->u.hdr.ah;
553 struct lws_context *context = wsi->context;
554 unsigned int n, m, enc = 0;
556 assert(wsi->u.hdr.ah);
558 switch (wsi->u.hdr.parser_state) {
561 lwsl_parser("WSI_TOK_(%d) '%c'\n", wsi->u.hdr.parser_state, c);
563 /* collect into malloc'd buffers */
564 /* optional initial space swallow */
565 if (!ah->frags[ah->frag_index[wsi->u.hdr.parser_state]].len &&
569 for (m = 0; m < ARRAY_SIZE(methods); m++)
570 if (wsi->u.hdr.parser_state == methods[m])
572 if (m == ARRAY_SIZE(methods))
573 /* it was not any of the methods */
576 /* special URI processing... end at space */
579 /* enforce starting with / */
580 if (!ah->frags[ah->nfrag].len)
581 if (issue_char(wsi, '/') < 0)
584 if (wsi->u.hdr.ups == URIPS_SEEN_SLASH_DOT_DOT) {
586 * back up one dir level if possible
587 * safe against header fragmentation because
588 * the method URI can only be in 1 fragment
590 if (ah->frags[ah->nfrag].len > 2) {
592 ah->frags[ah->nfrag].len--;
595 ah->frags[ah->nfrag].len--;
596 } while (ah->frags[ah->nfrag].len > 1 &&
597 ah->data[ah->pos] != '/');
601 /* begin parsing HTTP version: */
602 if (issue_char(wsi, '\0') < 0)
604 wsi->u.hdr.parser_state = WSI_TOKEN_HTTP;
610 * special URI processing... convert %xx
613 switch (wsi->u.hdr.ues) {
616 wsi->u.hdr.ues = URIES_SEEN_PERCENT;
620 case URIES_SEEN_PERCENT:
621 if (char_to_hex(c) < 0)
622 /* illegal post-% char */
625 wsi->u.hdr.esc_stash = c;
626 wsi->u.hdr.ues = URIES_SEEN_PERCENT_H1;
629 case URIES_SEEN_PERCENT_H1:
630 if (char_to_hex(c) < 0)
631 /* illegal post-% char */
634 c = (char_to_hex(wsi->u.hdr.esc_stash) << 4) |
637 wsi->u.hdr.ues = URIES_IDLE;
643 * special URI processing...
644 * convert /.. or /... or /../ etc to /
646 * convert // or /// etc to /
647 * leave /.dir or whatever alone
650 switch (wsi->u.hdr.ups) {
654 /* genuine delimiter */
655 if ((c == '&' || c == ';') && !enc) {
656 if (issue_char(wsi, c) < 0)
658 /* swallow the terminator */
659 ah->frags[ah->nfrag].len--;
660 /* link to next fragment */
661 ah->frags[ah->nfrag].nfrag = ah->nfrag + 1;
663 if (ah->nfrag >= ARRAY_SIZE(ah->frags))
665 /* start next fragment after the & */
666 wsi->u.hdr.post_literal_equal = 0;
667 ah->frags[ah->nfrag].offset = ah->pos;
668 ah->frags[ah->nfrag].len = 0;
669 ah->frags[ah->nfrag].nfrag = 0;
672 /* uriencoded = in the name part, disallow */
673 if (c == '=' && enc &&
674 ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] &&
675 !wsi->u.hdr.post_literal_equal)
678 /* after the real =, we don't care how many = */
679 if (c == '=' && !enc)
680 wsi->u.hdr.post_literal_equal = 1;
683 if (c == '+' && !enc)
685 /* issue the first / always */
686 if (c == '/' && !ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS])
687 wsi->u.hdr.ups = URIPS_SEEN_SLASH;
689 case URIPS_SEEN_SLASH:
690 /* swallow subsequent slashes */
693 /* track and swallow the first . after / */
695 wsi->u.hdr.ups = URIPS_SEEN_SLASH_DOT;
698 wsi->u.hdr.ups = URIPS_IDLE;
700 case URIPS_SEEN_SLASH_DOT:
701 /* swallow second . */
703 wsi->u.hdr.ups = URIPS_SEEN_SLASH_DOT_DOT;
706 /* change /./ to / */
708 wsi->u.hdr.ups = URIPS_SEEN_SLASH;
711 /* it was like /.dir ... regurgitate the . */
712 wsi->u.hdr.ups = URIPS_IDLE;
713 if (issue_char(wsi, '.') < 0)
717 case URIPS_SEEN_SLASH_DOT_DOT:
719 /* /../ or /..[End of URI] --> backup to last / */
720 if (c == '/' || c == '?') {
722 * back up one dir level if possible
723 * safe against header fragmentation because
724 * the method URI can only be in 1 fragment
726 if (ah->frags[ah->nfrag].len > 2) {
728 ah->frags[ah->nfrag].len--;
731 ah->frags[ah->nfrag].len--;
732 } while (ah->frags[ah->nfrag].len > 1 &&
733 ah->data[ah->pos] != '/');
735 wsi->u.hdr.ups = URIPS_SEEN_SLASH;
736 if (ah->frags[ah->nfrag].len > 1)
741 /* /..[^/] ... regurgitate and allow */
743 if (issue_char(wsi, '.') < 0)
745 if (issue_char(wsi, '.') < 0)
747 wsi->u.hdr.ups = URIPS_IDLE;
751 if (c == '?' && !enc &&
752 !ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS]) { /* start of URI arguments */
753 if (wsi->u.hdr.ues != URIES_IDLE)
756 /* seal off uri header */
757 if (issue_char(wsi, '\0') < 0)
760 /* move to using WSI_TOKEN_HTTP_URI_ARGS */
762 if (ah->nfrag >= ARRAY_SIZE(ah->frags))
764 ah->frags[ah->nfrag].offset = ah->pos;
765 ah->frags[ah->nfrag].len = 0;
766 ah->frags[ah->nfrag].nfrag = 0;
768 wsi->u.hdr.post_literal_equal = 0;
769 ah->frag_index[WSI_TOKEN_HTTP_URI_ARGS] = ah->nfrag;
770 wsi->u.hdr.ups = URIPS_IDLE;
776 if (wsi->u.hdr.parser_state != WSI_TOKEN_CHALLENGE &&
778 if (wsi->u.hdr.ues != URIES_IDLE)
782 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
786 n = issue_char(wsi, c);
790 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
793 /* per-protocol end of headers management */
795 if (wsi->u.hdr.parser_state == WSI_TOKEN_CHALLENGE)
796 goto set_parsing_complete;
799 /* collecting and checking a name part */
800 case WSI_TOKEN_NAME_PART:
801 lwsl_parser("WSI_TOKEN_NAME_PART '%c' (mode=%d)\n", c, wsi->mode);
803 wsi->u.hdr.lextable_pos =
804 lextable_decode(wsi->u.hdr.lextable_pos, c);
806 * Server needs to look out for unknown methods...
808 if (wsi->u.hdr.lextable_pos < 0 &&
809 wsi->mode == LWSCM_HTTP_SERVING) {
810 /* this is not a header we know about */
811 for (m = 0; m < ARRAY_SIZE(methods); m++)
812 if (ah->frag_index[methods[m]]) {
814 * already had the method, no idea what
815 * this crap from the client is, ignore
817 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
821 * hm it's an unknown http method from a client in fact,
822 * it cannot be valid http
824 if (m == ARRAY_SIZE(methods)) {
826 * are we set up to accept raw in these cases?
828 if (lws_check_opt(wsi->vhost->options,
829 LWS_SERVER_OPTION_FALLBACK_TO_RAW))
830 return 2; /* transition to raw */
832 lwsl_info("Unknown method - dropping\n");
838 * ...otherwise for a client, let him ignore unknown headers
839 * coming from the server
841 if (wsi->u.hdr.lextable_pos < 0) {
842 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
846 if (lextable[wsi->u.hdr.lextable_pos] < FAIL_CHAR) {
849 n = ((unsigned int)lextable[wsi->u.hdr.lextable_pos] << 8) |
850 lextable[wsi->u.hdr.lextable_pos + 1];
852 lwsl_parser("known hdr %d\n", n);
853 for (m = 0; m < ARRAY_SIZE(methods); m++)
854 if (n == methods[m] &&
855 ah->frag_index[methods[m]]) {
856 lwsl_warn("Duplicated method\n");
861 * WSORIGIN is protocol equiv to ORIGIN,
862 * JWebSocket likes to send it, map to ORIGIN
864 if (n == WSI_TOKEN_SWORIGIN)
865 n = WSI_TOKEN_ORIGIN;
867 wsi->u.hdr.parser_state = (enum lws_token_indexes)
868 (WSI_TOKEN_GET_URI + n);
870 if (context->token_limits)
871 wsi->u.hdr.current_token_limit =
872 context->token_limits->token_limit[
873 wsi->u.hdr.parser_state];
875 wsi->u.hdr.current_token_limit =
876 wsi->context->max_http_header_data;
878 if (wsi->u.hdr.parser_state == WSI_TOKEN_CHALLENGE)
879 goto set_parsing_complete;
888 if (ah->nfrag == ARRAY_SIZE(ah->frags)) {
889 lwsl_warn("More hdr frags than we can deal with\n");
893 ah->frags[ah->nfrag].offset = ah->pos;
894 ah->frags[ah->nfrag].len = 0;
895 ah->frags[ah->nfrag].nfrag = 0;
897 n = ah->frag_index[wsi->u.hdr.parser_state];
898 if (!n) { /* first fragment */
899 ah->frag_index[wsi->u.hdr.parser_state] = ah->nfrag;
903 while (ah->frags[n].nfrag)
904 n = ah->frags[n].nfrag;
905 ah->frags[n].nfrag = ah->nfrag;
907 if (issue_char(wsi, ' ') < 0)
911 /* skipping arg part of a name we didn't recognize */
912 case WSI_TOKEN_SKIPPING:
913 lwsl_parser("WSI_TOKEN_SKIPPING '%c'\n", c);
916 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING_SAW_CR;
919 case WSI_TOKEN_SKIPPING_SAW_CR:
920 lwsl_parser("WSI_TOKEN_SKIPPING_SAW_CR '%c'\n", c);
921 if (wsi->u.hdr.ues != URIES_IDLE)
924 wsi->u.hdr.parser_state = WSI_TOKEN_NAME_PART;
925 wsi->u.hdr.lextable_pos = 0;
927 wsi->u.hdr.parser_state = WSI_TOKEN_SKIPPING;
929 /* we're done, ignore anything else */
931 case WSI_PARSING_COMPLETE:
932 lwsl_parser("WSI_PARSING_COMPLETE '%c'\n", c);
938 set_parsing_complete:
939 if (wsi->u.hdr.ues != URIES_IDLE)
941 if (lws_hdr_total_length(wsi, WSI_TOKEN_UPGRADE)) {
942 if (lws_hdr_total_length(wsi, WSI_TOKEN_VERSION))
943 wsi->ietf_spec_revision =
944 atoi(lws_hdr_simple_ptr(wsi, WSI_TOKEN_VERSION));
946 lwsl_parser("v%02d hdrs completed\n", wsi->ietf_spec_revision);
948 wsi->u.hdr.parser_state = WSI_PARSING_COMPLETE;
949 wsi->hdr_parsing_completed = 1;
954 lwsl_notice(" forbidding on uri sanitation\n");
955 lws_return_http_status(wsi, HTTP_STATUS_FORBIDDEN, NULL);
959 LWS_VISIBLE int lws_frame_is_binary(struct lws *wsi)
961 return wsi->u.ws.frame_is_binary;
965 lws_add_wsi_to_draining_ext_list(struct lws *wsi)
967 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
969 if (wsi->u.ws.rx_draining_ext)
972 lwsl_ext("%s: RX EXT DRAINING: Adding to list\n", __func__);
974 wsi->u.ws.rx_draining_ext = 1;
975 wsi->u.ws.rx_draining_ext_list = pt->rx_draining_ext_list;
976 pt->rx_draining_ext_list = wsi;
980 lws_remove_wsi_from_draining_ext_list(struct lws *wsi)
982 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
983 struct lws **w = &pt->rx_draining_ext_list;
985 if (!wsi->u.ws.rx_draining_ext)
988 lwsl_ext("%s: RX EXT DRAINING: Removing from list\n", __func__);
990 wsi->u.ws.rx_draining_ext = 0;
992 /* remove us from context draining ext list */
995 /* if us, point it instead to who we were pointing to */
996 *w = wsi->u.ws.rx_draining_ext_list;
999 w = &((*w)->u.ws.rx_draining_ext_list);
1001 wsi->u.ws.rx_draining_ext_list = NULL;
1005 * client-parser.c: lws_client_rx_sm() needs to be roughly kept in
1006 * sync with changes here, esp related to ext draining
1010 lws_rx_sm(struct lws *wsi, unsigned char c)
1012 int callback_action = LWS_CALLBACK_RECEIVE;
1013 int ret = 0, n, rx_draining_ext = 0;
1014 struct lws_tokens eff_buf;
1016 eff_buf.token = NULL;
1017 eff_buf.token_len = 0;
1018 if (wsi->socket_is_permanently_unusable)
1021 switch (wsi->lws_rx_parse_state) {
1023 if (wsi->u.ws.rx_draining_ext) {
1024 eff_buf.token = NULL;
1025 eff_buf.token_len = 0;
1026 lws_remove_wsi_from_draining_ext_list(wsi);
1027 rx_draining_ext = 1;
1028 lwsl_debug("%s: doing draining flow\n", __func__);
1030 goto drain_extension;
1032 switch (wsi->ietf_spec_revision) {
1035 * no prepended frame key any more
1037 wsi->u.ws.all_zero_nonce = 1;
1041 lwsl_warn("lws_rx_sm: unknown spec version %d\n",
1042 wsi->ietf_spec_revision);
1046 case LWS_RXPS_04_mask_1:
1047 wsi->u.ws.mask[1] = c;
1049 wsi->u.ws.all_zero_nonce = 0;
1050 wsi->lws_rx_parse_state = LWS_RXPS_04_mask_2;
1052 case LWS_RXPS_04_mask_2:
1053 wsi->u.ws.mask[2] = c;
1055 wsi->u.ws.all_zero_nonce = 0;
1056 wsi->lws_rx_parse_state = LWS_RXPS_04_mask_3;
1058 case LWS_RXPS_04_mask_3:
1059 wsi->u.ws.mask[3] = c;
1061 wsi->u.ws.all_zero_nonce = 0;
1064 * start from the zero'th byte in the XOR key buffer since
1065 * this is the start of a frame with a new key
1068 wsi->u.ws.mask_idx = 0;
1070 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_1;
1074 * 04 logical framing from the spec (all this is masked when incoming
1075 * and has to be unmasked)
1077 * We ignore the possibility of extension data because we don't
1078 * negotiate any extensions at the moment.
1081 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
1082 * +-+-+-+-+-------+-+-------------+-------------------------------+
1083 * |F|R|R|R| opcode|R| Payload len | Extended payload length |
1084 * |I|S|S|S| (4) |S| (7) | (16/63) |
1085 * |N|V|V|V| |V| | (if payload len==126/127) |
1087 * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
1088 * | Extended payload length continued, if payload len == 127 |
1089 * + - - - - - - - - - - - - - - - +-------------------------------+
1090 * | | Extension data |
1091 * +-------------------------------+ - - - - - - - - - - - - - - - +
1093 * +---------------------------------------------------------------+
1094 * : Application data :
1095 * +---------------------------------------------------------------+
1097 * We pass payload through to userland as soon as we get it, ignoring
1098 * FIN. It's up to userland to buffer it up if it wants to see a
1099 * whole unfragmented block of the original size (which may be up to
1103 case LWS_RXPS_04_FRAME_HDR_1:
1106 wsi->u.ws.opcode = c & 0xf;
1107 wsi->u.ws.rsv = c & 0x70;
1108 wsi->u.ws.final = !!((c >> 7) & 1);
1110 switch (wsi->u.ws.opcode) {
1111 case LWSWSOPC_TEXT_FRAME:
1112 case LWSWSOPC_BINARY_FRAME:
1113 wsi->u.ws.rsv_first_msg = (c & 0x70);
1114 wsi->u.ws.frame_is_binary =
1115 wsi->u.ws.opcode == LWSWSOPC_BINARY_FRAME;
1127 lwsl_info("illegal opcode\n");
1130 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN;
1133 case LWS_RXPS_04_FRAME_HDR_LEN:
1135 wsi->u.ws.this_frame_masked = !!(c & 0x80);
1139 /* control frames are not allowed to have big lengths */
1140 if (wsi->u.ws.opcode & 8)
1141 goto illegal_ctl_length;
1143 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_2;
1146 /* control frames are not allowed to have big lengths */
1147 if (wsi->u.ws.opcode & 8)
1148 goto illegal_ctl_length;
1150 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_8;
1153 wsi->u.ws.rx_packet_length = c & 0x7f;
1154 if (wsi->u.ws.this_frame_masked)
1155 wsi->lws_rx_parse_state =
1156 LWS_RXPS_07_COLLECT_FRAME_KEY_1;
1158 if (wsi->u.ws.rx_packet_length)
1159 wsi->lws_rx_parse_state =
1160 LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
1162 wsi->lws_rx_parse_state = LWS_RXPS_NEW;
1169 case LWS_RXPS_04_FRAME_HDR_LEN16_2:
1170 wsi->u.ws.rx_packet_length = c << 8;
1171 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_1;
1174 case LWS_RXPS_04_FRAME_HDR_LEN16_1:
1175 wsi->u.ws.rx_packet_length |= c;
1176 if (wsi->u.ws.this_frame_masked)
1177 wsi->lws_rx_parse_state =
1178 LWS_RXPS_07_COLLECT_FRAME_KEY_1;
1180 wsi->lws_rx_parse_state =
1181 LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
1184 case LWS_RXPS_04_FRAME_HDR_LEN64_8:
1186 lwsl_warn("b63 of length must be zero\n");
1187 /* kill the connection */
1190 #if defined __LP64__
1191 wsi->u.ws.rx_packet_length = ((size_t)c) << 56;
1193 wsi->u.ws.rx_packet_length = 0;
1195 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_7;
1198 case LWS_RXPS_04_FRAME_HDR_LEN64_7:
1199 #if defined __LP64__
1200 wsi->u.ws.rx_packet_length |= ((size_t)c) << 48;
1202 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_6;
1205 case LWS_RXPS_04_FRAME_HDR_LEN64_6:
1206 #if defined __LP64__
1207 wsi->u.ws.rx_packet_length |= ((size_t)c) << 40;
1209 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_5;
1212 case LWS_RXPS_04_FRAME_HDR_LEN64_5:
1213 #if defined __LP64__
1214 wsi->u.ws.rx_packet_length |= ((size_t)c) << 32;
1216 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_4;
1219 case LWS_RXPS_04_FRAME_HDR_LEN64_4:
1220 wsi->u.ws.rx_packet_length |= ((size_t)c) << 24;
1221 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_3;
1224 case LWS_RXPS_04_FRAME_HDR_LEN64_3:
1225 wsi->u.ws.rx_packet_length |= ((size_t)c) << 16;
1226 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_2;
1229 case LWS_RXPS_04_FRAME_HDR_LEN64_2:
1230 wsi->u.ws.rx_packet_length |= ((size_t)c) << 8;
1231 wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_1;
1234 case LWS_RXPS_04_FRAME_HDR_LEN64_1:
1235 wsi->u.ws.rx_packet_length |= ((size_t)c);
1236 if (wsi->u.ws.this_frame_masked)
1237 wsi->lws_rx_parse_state =
1238 LWS_RXPS_07_COLLECT_FRAME_KEY_1;
1240 wsi->lws_rx_parse_state =
1241 LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
1244 case LWS_RXPS_07_COLLECT_FRAME_KEY_1:
1245 wsi->u.ws.mask[0] = c;
1247 wsi->u.ws.all_zero_nonce = 0;
1248 wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_2;
1251 case LWS_RXPS_07_COLLECT_FRAME_KEY_2:
1252 wsi->u.ws.mask[1] = c;
1254 wsi->u.ws.all_zero_nonce = 0;
1255 wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_3;
1258 case LWS_RXPS_07_COLLECT_FRAME_KEY_3:
1259 wsi->u.ws.mask[2] = c;
1261 wsi->u.ws.all_zero_nonce = 0;
1262 wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_4;
1265 case LWS_RXPS_07_COLLECT_FRAME_KEY_4:
1266 wsi->u.ws.mask[3] = c;
1268 wsi->u.ws.all_zero_nonce = 0;
1269 wsi->lws_rx_parse_state =
1270 LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED;
1271 wsi->u.ws.mask_idx = 0;
1272 if (wsi->u.ws.rx_packet_length == 0) {
1273 wsi->lws_rx_parse_state = LWS_RXPS_NEW;
1279 case LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED:
1280 assert(wsi->u.ws.rx_ubuf);
1282 if (wsi->u.ws.rx_draining_ext)
1283 goto drain_extension;
1285 if (wsi->u.ws.rx_ubuf_head + LWS_PRE >=
1286 wsi->u.ws.rx_ubuf_alloc) {
1287 lwsl_err("Attempted overflow \n");
1290 if (wsi->u.ws.all_zero_nonce)
1291 wsi->u.ws.rx_ubuf[LWS_PRE +
1292 (wsi->u.ws.rx_ubuf_head++)] = c;
1294 wsi->u.ws.rx_ubuf[LWS_PRE +
1295 (wsi->u.ws.rx_ubuf_head++)] =
1297 (wsi->u.ws.mask_idx++) & 3];
1299 if (--wsi->u.ws.rx_packet_length == 0) {
1300 /* spill because we have the whole frame */
1301 wsi->lws_rx_parse_state = LWS_RXPS_NEW;
1306 * if there's no protocol max frame size given, we are
1307 * supposed to default to context->pt_serv_buf_size
1310 if (!wsi->protocol->rx_buffer_size &&
1311 wsi->u.ws.rx_ubuf_head != wsi->context->pt_serv_buf_size)
1314 if (wsi->protocol->rx_buffer_size &&
1315 wsi->u.ws.rx_ubuf_head !=
1316 wsi->protocol->rx_buffer_size)
1319 /* spill because we filled our rx buffer */
1322 * is this frame a control packet we should take care of at this
1323 * layer? If so service it and hide it from the user callback
1326 lwsl_parser("spill on %s\n", wsi->protocol->name);
1328 switch (wsi->u.ws.opcode) {
1329 case LWSWSOPC_CLOSE:
1331 /* is this an acknowledgement of our close? */
1332 if (wsi->state == LWSS_AWAITING_CLOSE_ACK) {
1334 * fine he has told us he is closing too, let's
1337 lwsl_parser("seen client close ack\n");
1340 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
1341 /* if he sends us 2 CLOSE, kill him */
1344 if (lws_partial_buffered(wsi)) {
1346 * if we're in the middle of something,
1347 * we can't do a normal close response and
1348 * have to just close our end.
1350 wsi->socket_is_permanently_unusable = 1;
1351 lwsl_parser("Closing on peer close due to Pending tx\n");
1355 if (user_callback_handle_rxflow(
1356 wsi->protocol->callback, wsi,
1357 LWS_CALLBACK_WS_PEER_INITIATED_CLOSE,
1359 &wsi->u.ws.rx_ubuf[LWS_PRE],
1360 wsi->u.ws.rx_ubuf_head))
1363 lwsl_parser("server sees client close packet\n");
1364 wsi->state = LWSS_RETURNED_CLOSE_ALREADY;
1365 /* deal with the close packet contents as a PONG */
1366 wsi->u.ws.payload_is_close = 1;
1367 goto process_as_ping;
1370 lwsl_info("received %d byte ping, sending pong\n",
1371 wsi->u.ws.rx_ubuf_head);
1373 if (wsi->u.ws.ping_pending_flag) {
1375 * there is already a pending ping payload
1376 * we should just log and drop
1378 lwsl_parser("DROP PING since one pending\n");
1382 /* control packets can only be < 128 bytes long */
1383 if (wsi->u.ws.rx_ubuf_head > 128 - 3) {
1384 lwsl_parser("DROP PING payload too large\n");
1388 /* stash the pong payload */
1389 memcpy(wsi->u.ws.ping_payload_buf + LWS_PRE,
1390 &wsi->u.ws.rx_ubuf[LWS_PRE],
1391 wsi->u.ws.rx_ubuf_head);
1393 wsi->u.ws.ping_payload_len = wsi->u.ws.rx_ubuf_head;
1394 wsi->u.ws.ping_pending_flag = 1;
1396 /* get it sent as soon as possible */
1397 lws_callback_on_writable(wsi);
1399 wsi->u.ws.rx_ubuf_head = 0;
1403 lwsl_info("received pong\n");
1404 lwsl_hexdump(&wsi->u.ws.rx_ubuf[LWS_PRE],
1405 wsi->u.ws.rx_ubuf_head);
1407 if (wsi->pending_timeout == PENDING_TIMEOUT_WS_PONG_CHECK_GET_PONG) {
1408 lwsl_info("received expected PONG on wsi %p\n", wsi);
1409 lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
1413 callback_action = LWS_CALLBACK_RECEIVE_PONG;
1416 case LWSWSOPC_TEXT_FRAME:
1417 case LWSWSOPC_BINARY_FRAME:
1418 case LWSWSOPC_CONTINUATION:
1422 lwsl_parser("passing opc %x up to exts\n",
1425 * It's something special we can't understand here.
1426 * Pass the payload up to the extension's parsing
1430 eff_buf.token = &wsi->u.ws.rx_ubuf[LWS_PRE];
1431 eff_buf.token_len = wsi->u.ws.rx_ubuf_head;
1433 if (lws_ext_cb_active(wsi, LWS_EXT_CB_EXTENDED_PAYLOAD_RX,
1435 /* not handle or fail */
1436 lwsl_ext("ext opc opcode 0x%x unknown\n",
1439 wsi->u.ws.rx_ubuf_head = 0;
1444 * No it's real payload, pass it up to the user callback.
1445 * It's nicely buffered with the pre-padding taken care of
1446 * so it can be sent straight out again using lws_write
1449 eff_buf.token = &wsi->u.ws.rx_ubuf[LWS_PRE];
1450 eff_buf.token_len = wsi->u.ws.rx_ubuf_head;
1453 lwsl_ext("%s: passing %d to ext\n", __func__, eff_buf.token_len);
1455 if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
1456 wsi->state == LWSS_AWAITING_CLOSE_ACK)
1459 n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &eff_buf, 0);
1460 /* eff_buf may be pointing somewhere completely different now,
1465 * we may rely on this to get RX, just drop connection
1467 wsi->socket_is_permanently_unusable = 1;
1471 if (rx_draining_ext && eff_buf.token_len == 0)
1474 if (n && eff_buf.token_len) {
1475 /* extension had more... main loop will come back */
1476 lws_add_wsi_to_draining_ext_list(wsi);
1478 lws_remove_wsi_from_draining_ext_list(wsi);
1480 if (eff_buf.token_len > 0 ||
1481 callback_action == LWS_CALLBACK_RECEIVE_PONG) {
1482 eff_buf.token[eff_buf.token_len] = '\0';
1484 if (wsi->protocol->callback) {
1486 if (callback_action == LWS_CALLBACK_RECEIVE_PONG)
1487 lwsl_info("Doing pong callback\n");
1489 ret = user_callback_handle_rxflow(
1490 wsi->protocol->callback,
1492 (enum lws_callback_reasons)callback_action,
1498 lwsl_err("No callback on payload spill!\n");
1502 wsi->u.ws.rx_ubuf_head = 0;
1510 lwsl_warn("Control frame with xtended length is illegal\n");
1511 /* kill the connection */
1516 lws_remaining_packet_payload(struct lws *wsi)
1518 return wsi->u.ws.rx_packet_length;
1521 /* Once we reach LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED, we know how much
1522 * to expect in that state and can deal with it in bulk more efficiently.
1526 lws_payload_until_length_exhausted(struct lws *wsi, unsigned char **buf,
1529 unsigned char *buffer = *buf, mask[4];
1534 if (wsi->protocol->rx_buffer_size)
1535 buffer_size = wsi->protocol->rx_buffer_size;
1537 buffer_size = wsi->context->pt_serv_buf_size;
1538 avail = buffer_size - wsi->u.ws.rx_ubuf_head;
1540 /* do not consume more than we should */
1541 if (avail > wsi->u.ws.rx_packet_length)
1542 avail = wsi->u.ws.rx_packet_length;
1544 /* do not consume more than what is in the buffer */
1548 /* we want to leave 1 byte for the parser to handle properly */
1553 rx_ubuf = wsi->u.ws.rx_ubuf + LWS_PRE + wsi->u.ws.rx_ubuf_head;
1554 if (wsi->u.ws.all_zero_nonce)
1555 memcpy(rx_ubuf, buffer, avail);
1558 for (n = 0; n < 4; n++)
1559 mask[n] = wsi->u.ws.mask[(wsi->u.ws.mask_idx + n) & 3];
1561 /* deal with 4-byte chunks using unwrapped loop */
1564 *(rx_ubuf++) = *(buffer++) ^ mask[0];
1565 *(rx_ubuf++) = *(buffer++) ^ mask[1];
1566 *(rx_ubuf++) = *(buffer++) ^ mask[2];
1567 *(rx_ubuf++) = *(buffer++) ^ mask[3];
1569 /* and the remaining bytes bytewise */
1570 for (n = 0; n < (int)(avail & 3); n++)
1571 *(rx_ubuf++) = *(buffer++) ^ mask[n];
1573 wsi->u.ws.mask_idx = (wsi->u.ws.mask_idx + avail) & 3;
1577 wsi->u.ws.rx_ubuf_head += avail;
1578 wsi->u.ws.rx_packet_length -= avail;