2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_0405_frame_mask_generate(struct lws *wsi)
28 wsi->u.ws.mask[0] = 0;
29 wsi->u.ws.mask[1] = 0;
30 wsi->u.ws.mask[2] = 0;
31 wsi->u.ws.mask[3] = 0;
34 /* fetch the per-frame nonce */
36 n = lws_get_random(lws_get_context(wsi), wsi->u.ws.mask, 4);
38 lwsl_parser("Unable to read from random device %s %d\n",
39 SYSTEM_RANDOM_FILEPATH, n);
43 /* start masking from first byte of masking key buffer */
44 wsi->u.ws.mask_idx = 0;
51 LWS_VISIBLE void lwsl_hexdump(void *vbuf, size_t len)
53 unsigned char *buf = (unsigned char *)vbuf;
54 unsigned int n, m, start;
60 for (n = 0; n < len;) {
64 p += sprintf(p, "%04X: ", start);
66 for (m = 0; m < 16 && n < len; m++)
67 p += sprintf(p, "%02X ", buf[n++]);
73 for (m = 0; m < 16 && (start + m) < len; m++) {
74 if (buf[start + m] >= ' ' && buf[start + m] < 127)
75 *p++ = buf[start + m];
84 lwsl_debug("%s", line);
92 * notice this returns number of bytes consumed, or -1
95 int lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
97 struct lws_context *context = lws_get_context(wsi);
98 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
99 size_t real_len = len;
103 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_WRITE, 1);
107 /* just ignore sends after we cleared the truncation buffer */
108 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE &&
112 if (wsi->trunc_len && (buf < wsi->trunc_alloc ||
113 buf > (wsi->trunc_alloc + wsi->trunc_len + wsi->trunc_offset))) {
115 strncpy(dump, (char *)buf, sizeof(dump) - 1);
116 dump[sizeof(dump) - 1] = '\0';
117 #if defined(LWS_WITH_ESP8266)
118 lwsl_err("****** %p: Sending new %lu (%s), pending truncated ...\n",
119 wsi, (unsigned long)len, dump);
121 lwsl_err("****** %p: Sending new %lu (%s), pending truncated ...\n"
122 " It's illegal to do an lws_write outside of\n"
123 " the writable callback: fix your code",
124 wsi, (unsigned long)len, dump);
131 m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_DO_SEND, &buf, len);
134 if (m) /* handled */ {
136 goto handle_truncated_send;
139 if (!lws_socket_is_valid(wsi->desc.sockfd))
140 lwsl_warn("** error invalid sock but expected to send\n");
143 if (wsi->protocol->tx_packet_size)
144 n = wsi->protocol->tx_packet_size;
146 n = wsi->protocol->rx_buffer_size;
148 n = context->pt_serv_buf_size;
153 #if defined(LWS_WITH_ESP8266)
154 if (wsi->pending_send_completion) {
156 goto handle_truncated_send;
160 /* nope, send it on the socket directly */
161 lws_latency_pre(context, wsi);
162 n = lws_ssl_capable_write(wsi, buf, n);
163 lws_latency(context, wsi, "send lws_issue_raw", n, n == len);
165 //lwsl_notice("lws_ssl_capable_write: %d\n", n);
168 case LWS_SSL_CAPABLE_ERROR:
169 /* we're going to close, let close know sends aren't possible */
170 wsi->socket_is_permanently_unusable = 1;
172 case LWS_SSL_CAPABLE_MORE_SERVICE:
173 /* nothing got sent, not fatal, retry the whole thing later */
178 handle_truncated_send:
180 * we were already handling a truncated send?
182 if (wsi->trunc_len) {
183 lwsl_info("%p partial adv %d (vs %ld)\n", wsi, n, (long)real_len);
184 wsi->trunc_offset += n;
187 if (!wsi->trunc_len) {
188 lwsl_info("***** %p partial send completed\n", wsi);
189 /* done with it, but don't free it */
191 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
192 lwsl_info("***** %p signalling to close now\n", wsi);
193 return -1; /* retry closing now */
196 /* always callback on writeable */
197 lws_callback_on_writable(wsi);
202 if ((unsigned int)n == real_len)
203 /* what we just sent went out cleanly */
207 * Newly truncated send. Buffer the remainder (it will get
208 * first priority next time the socket is writable)
210 lwsl_debug("%p new partial sent %d from %lu total\n", wsi, n,
211 (unsigned long)real_len);
213 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITE_PARTIALS, 1);
214 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_PARTIALS_ACCEPTED_PARTS, n);
217 * - if we still have a suitable malloc lying around, use it
218 * - or, if too small, reallocate it
219 * - or, if no buffer, create it
221 if (!wsi->trunc_alloc || real_len - n > wsi->trunc_alloc_len) {
222 lws_free(wsi->trunc_alloc);
224 wsi->trunc_alloc_len = real_len - n;
225 wsi->trunc_alloc = lws_malloc(real_len - n);
226 if (!wsi->trunc_alloc) {
227 lwsl_err("truncated send: unable to malloc %lu\n",
228 (unsigned long)(real_len - n));
232 wsi->trunc_offset = 0;
233 wsi->trunc_len = real_len - n;
234 memcpy(wsi->trunc_alloc, buf + n, real_len - n);
236 /* since something buffered, force it to get another chance to send */
237 lws_callback_on_writable(wsi);
242 LWS_VISIBLE int lws_write(struct lws *wsi, unsigned char *buf, size_t len,
243 enum lws_write_protocol wp)
245 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
246 int masked7 = (wsi->mode == LWSCM_WS_CLIENT);
247 unsigned char is_masked_bit = 0;
248 unsigned char *dropmask = NULL;
249 struct lws_tokens eff_buf;
251 size_t orig_len = len;
253 if (wsi->parent_carries_io) {
254 struct lws_write_passthru pas;
261 if (wsi->parent->protocol->callback(wsi->parent,
262 LWS_CALLBACK_CHILD_WRITE_VIA_PARENT,
263 wsi->parent->user_space,
270 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_LWS_WRITE, 1);
273 lwsl_err("%s: suspicious len int %d, ulong %lu\n", __func__,
274 (int)len, (unsigned long)len);
278 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_WRITE, len);
280 #ifdef LWS_WITH_ACCESS_LOG
281 wsi->access_log.sent += len;
284 wsi->vhost->conn_stats.tx += len;
286 if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
287 /* remove us from the list */
288 struct lws **w = &pt->tx_draining_ext_list;
290 // lwsl_notice("%s: TX EXT DRAINING: Remove from list\n", __func__);
291 wsi->u.ws.tx_draining_ext = 0;
292 /* remove us from context draining ext list */
295 *w = wsi->u.ws.tx_draining_ext_list;
298 w = &((*w)->u.ws.tx_draining_ext_list);
300 wsi->u.ws.tx_draining_ext_list = NULL;
301 wp = (wsi->u.ws.tx_draining_stashed_wp & 0xc0) |
302 LWS_WRITE_CONTINUATION;
304 lwsl_ext("FORCED draining wp to 0x%02X\n", wp);
307 lws_restart_ws_ping_pong_timer(wsi);
309 if (wp == LWS_WRITE_HTTP ||
310 wp == LWS_WRITE_HTTP_FINAL ||
311 wp == LWS_WRITE_HTTP_HEADERS)
314 /* if not in a state to send stuff, then just send nothing */
316 if (wsi->state != LWSS_ESTABLISHED &&
317 ((wsi->state != LWSS_RETURNED_CLOSE_ALREADY &&
318 wsi->state != LWSS_AWAITING_CLOSE_ACK) ||
319 wp != LWS_WRITE_CLOSE))
322 /* if we are continuing a frame that already had its header done */
324 if (wsi->u.ws.inside_frame) {
325 lwsl_debug("INSIDE FRAME\n");
326 goto do_more_inside_frame;
329 wsi->u.ws.clean_buffer = 1;
332 * give a chance to the extensions to modify payload
333 * the extension may decide to produce unlimited payload erratically
334 * (eg, compression extension), so we require only that if he produces
335 * something, it will be a complete fragment of the length known at
336 * the time (just the fragment length known), and if he has
337 * more we will come back next time he is writeable and allow him to
338 * produce more fragments until he's drained.
340 * This allows what is sent each time it is writeable to be limited to
341 * a size that can be sent without partial sends or blocking, allows
342 * interleaving of control frames and other connection service.
344 eff_buf.token = (char *)buf;
345 eff_buf.token_len = len;
350 case LWS_WRITE_CLOSE:
353 lwsl_debug("LWS_EXT_CB_PAYLOAD_TX\n");
354 n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_TX, &eff_buf, wp);
358 if (n && eff_buf.token_len) {
359 lwsl_debug("drain len %d\n", (int)eff_buf.token_len);
360 /* extension requires further draining */
361 wsi->u.ws.tx_draining_ext = 1;
362 wsi->u.ws.tx_draining_ext_list = pt->tx_draining_ext_list;
363 pt->tx_draining_ext_list = wsi;
364 /* we must come back to do more */
365 lws_callback_on_writable(wsi);
367 * keep a copy of the write type for the overall
368 * action that has provoked generation of these
369 * fragments, so the last guy can use its FIN state.
371 wsi->u.ws.tx_draining_stashed_wp = wp;
372 /* this is definitely not actually the last fragment
373 * because the extension asserted he has more coming
374 * So make sure this intermediate one doesn't go out
377 wp |= LWS_WRITE_NO_FIN;
380 if (eff_buf.token_len && wsi->u.ws.stashed_write_pending) {
381 wsi->u.ws.stashed_write_pending = 0;
382 wp = (wp &0xc0) | (int)wsi->u.ws.stashed_write_type;
387 * an extension did something we need to keep... for example, if
388 * compression extension, it has already updated its state according
389 * to this being issued
391 if ((char *)buf != eff_buf.token) {
393 * ext might eat it, but not have anything to issue yet.
394 * In that case we have to follow his lead, but stash and
395 * replace the write type that was lost here the first time.
397 if (len && !eff_buf.token_len) {
398 if (!wsi->u.ws.stashed_write_pending)
399 wsi->u.ws.stashed_write_type = (char)wp & 0x3f;
400 wsi->u.ws.stashed_write_pending = 1;
404 * extension recreated it:
405 * need to buffer this if not all sent
407 wsi->u.ws.clean_buffer = 0;
410 buf = (unsigned char *)eff_buf.token;
411 len = eff_buf.token_len;
413 lwsl_debug("%p / %d\n", buf, (int)len);
416 lwsl_err("null buf (%d)\n", (int)len);
420 switch (wsi->ietf_spec_revision) {
424 dropmask = &buf[0 - pre];
425 is_masked_bit = 0x80;
430 n = LWSWSOPC_TEXT_FRAME;
432 case LWS_WRITE_BINARY:
433 n = LWSWSOPC_BINARY_FRAME;
435 case LWS_WRITE_CONTINUATION:
436 n = LWSWSOPC_CONTINUATION;
439 case LWS_WRITE_CLOSE:
449 lwsl_warn("lws_write: unknown write opc / wp\n");
453 if (!(wp & LWS_WRITE_NO_FIN))
459 buf[-pre + 1] = (unsigned char)(len | is_masked_bit);
464 buf[-pre + 1] = 126 | is_masked_bit;
465 buf[-pre + 2] = (unsigned char)(len >> 8);
466 buf[-pre + 3] = (unsigned char)len;
470 buf[-pre + 1] = 127 | is_masked_bit;
472 buf[-pre + 2] = (len >> 56) & 0x7f;
473 buf[-pre + 3] = len >> 48;
474 buf[-pre + 4] = len >> 40;
475 buf[-pre + 5] = len >> 32;
482 buf[-pre + 6] = (unsigned char)(len >> 24);
483 buf[-pre + 7] = (unsigned char)(len >> 16);
484 buf[-pre + 8] = (unsigned char)(len >> 8);
485 buf[-pre + 9] = (unsigned char)len;
491 do_more_inside_frame:
494 * Deal with masking if we are in client -> server direction and
499 if (!wsi->u.ws.inside_frame)
500 if (lws_0405_frame_mask_generate(wsi)) {
501 lwsl_err("frame mask generation failed\n");
506 * in v7, just mask the payload
508 if (dropmask) { /* never set if already inside frame */
509 for (n = 4; n < (int)len + 4; n++)
510 dropmask[n] = dropmask[n] ^ wsi->u.ws.mask[
511 (wsi->u.ws.mask_idx++) & 3];
513 /* copy the frame nonce into place */
514 memcpy(dropmask, wsi->u.ws.mask, 4);
520 case LWS_WRITE_CLOSE:
521 /* lwsl_hexdump(&buf[-pre], len); */
523 case LWS_WRITE_HTTP_FINAL:
524 case LWS_WRITE_HTTP_HEADERS:
528 if (wsi->mode == LWSCM_HTTP2_SERVING) {
529 unsigned char flags = 0;
531 n = LWS_HTTP2_FRAME_TYPE_DATA;
532 if (wp == LWS_WRITE_HTTP_HEADERS) {
533 n = LWS_HTTP2_FRAME_TYPE_HEADERS;
534 flags = LWS_HTTP2_FLAG_END_HEADERS;
535 if (wsi->u.http2.send_END_STREAM)
536 flags |= LWS_HTTP2_FLAG_END_STREAM;
539 if ((wp == LWS_WRITE_HTTP ||
540 wp == LWS_WRITE_HTTP_FINAL) &&
541 wsi->u.http.content_length) {
542 wsi->u.http.content_remain -= len;
543 lwsl_info("%s: content_remain = %llu\n", __func__,
544 (unsigned long long)wsi->u.http.content_remain);
545 if (!wsi->u.http.content_remain) {
546 lwsl_info("%s: selecting final write mode\n", __func__);
547 wp = LWS_WRITE_HTTP_FINAL;
551 if (wp == LWS_WRITE_HTTP_FINAL && wsi->u.http2.END_STREAM) {
552 lwsl_info("%s: setting END_STREAM\n", __func__);
553 flags |= LWS_HTTP2_FLAG_END_STREAM;
556 return lws_http2_frame_write(wsi, n, flags,
557 wsi->u.http2.my_stream_id, len, buf);
560 return lws_issue_raw(wsi, (unsigned char *)buf - pre, len + pre);
566 * give any active extensions a chance to munge the buffer
567 * before send. We pass in a pointer to an lws_tokens struct
568 * prepared with the default buffer and content length that's in
569 * there. Rather than rewrite the default buffer, extensions
570 * that expect to grow the buffer can adapt .token to
571 * point to their own per-connection buffer in the extension
572 * user allocation. By default with no extensions or no
573 * extension callback handling, just the normal input buffer is
574 * used then so it is efficient.
576 * callback returns 1 in case it wants to spill more buffers
578 * This takes care of holding the buffer if send is incomplete, ie,
579 * if wsi->u.ws.clean_buffer is 0 (meaning an extension meddled with
580 * the buffer). If wsi->u.ws.clean_buffer is 1, it will instead
581 * return to the user code how much OF THE USER BUFFER was consumed.
584 n = lws_issue_raw_ext_access(wsi, buf - pre, len + pre);
585 wsi->u.ws.inside_frame = 1;
589 if (n == (int)len + pre) {
590 /* everything in the buffer was handled (or rebuffered...) */
591 wsi->u.ws.inside_frame = 0;
596 * it is how many bytes of user buffer got sent... may be < orig_len
597 * in which case callback when writable has already been arranged
598 * and user code can call lws_write() again with the rest
605 LWS_VISIBLE int lws_serve_http_file_fragment(struct lws *wsi)
607 struct lws_context *context = wsi->context;
608 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
609 struct lws_process_html_args args;
610 lws_filepos_t amount, poss;
612 #if defined(LWS_WITH_RANGES)
613 unsigned char finished = 0;
617 // lwsl_notice("%s (trunc len %d)\n", __func__, wsi->trunc_len);
619 while (wsi->http2_substream || !lws_send_pipe_choked(wsi)) {
621 if (wsi->trunc_len) {
622 if (lws_issue_raw(wsi, wsi->trunc_alloc +
624 wsi->trunc_len) < 0) {
625 lwsl_info("%s: closing\n", __func__);
631 if (wsi->u.http.filepos == wsi->u.http.filelen)
638 #if defined(LWS_WITH_RANGES)
639 if (wsi->u.http.range.count_ranges && !wsi->u.http.range.inside) {
641 lwsl_notice("%s: doing range start %llu\n", __func__, wsi->u.http.range.start);
643 if ((long long)lws_vfs_file_seek_cur(wsi->u.http.fop_fd,
644 wsi->u.http.range.start -
645 wsi->u.http.filepos) < 0)
648 wsi->u.http.filepos = wsi->u.http.range.start;
650 if (wsi->u.http.range.count_ranges > 1) {
651 n = lws_snprintf((char *)p, context->pt_serv_buf_size,
653 "Content-Type: %s\x0d\x0a"
654 "Content-Range: bytes %llu-%llu/%llu\x0d\x0a"
656 wsi->u.http.multipart_content_type,
657 wsi->u.http.range.start,
658 wsi->u.http.range.end,
659 wsi->u.http.range.extent);
663 wsi->u.http.range.budget = wsi->u.http.range.end -
664 wsi->u.http.range.start + 1;
665 wsi->u.http.range.inside = 1;
669 poss = context->pt_serv_buf_size - n;
672 * if there is a hint about how much we will do well to send at one time,
673 * restrict ourselves to only trying to send that.
675 if (wsi->protocol->tx_packet_size && poss > wsi->protocol->tx_packet_size)
676 poss = wsi->protocol->tx_packet_size;
678 #if defined(LWS_WITH_RANGES)
679 if (wsi->u.http.range.count_ranges) {
680 if (wsi->u.http.range.count_ranges > 1)
681 poss -= 7; /* allow for final boundary */
682 if (poss > wsi->u.http.range.budget)
683 poss = wsi->u.http.range.budget;
686 if (wsi->sending_chunked) {
687 /* we need to drop the chunk size in here */
689 /* allow for the chunk to grow by 128 in translation */
693 if (lws_vfs_file_read(wsi->u.http.fop_fd, &amount, p, poss) < 0)
694 goto file_had_it; /* caller will close */
696 //lwsl_notice("amount %ld\n", amount);
698 if (wsi->sending_chunked)
701 n = (p - pt->serv_buf) + (int)amount;
703 lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_CONTENT,
704 context->timeout_secs);
706 if (wsi->sending_chunked) {
709 args.max_len = (unsigned int)poss + 128;
710 args.final = wsi->u.http.filepos + n ==
712 if (user_callback_handle_rxflow(
713 wsi->vhost->protocols[(int)wsi->protocol_interpret_idx].callback, wsi,
714 LWS_CALLBACK_PROCESS_HTML,
715 wsi->user_space, &args, 0) < 0)
718 p = (unsigned char *)args.p;
722 #if defined(LWS_WITH_RANGES)
723 if (wsi->u.http.range.send_ctr + 1 ==
724 wsi->u.http.range.count_ranges && // last range
725 wsi->u.http.range.count_ranges > 1 && // was 2+ ranges (ie, multipart)
726 wsi->u.http.range.budget - amount == 0) {// final part
727 n += lws_snprintf((char *)pt->serv_buf + n, 6,
728 "_lws\x0d\x0a"); // append trailing boundary
729 lwsl_debug("added trailing boundary\n");
732 m = lws_write(wsi, p, n,
733 wsi->u.http.filepos == wsi->u.http.filelen ?
734 LWS_WRITE_HTTP_FINAL :
740 wsi->u.http.filepos += amount;
742 #if defined(LWS_WITH_RANGES)
743 if (wsi->u.http.range.count_ranges >= 1) {
744 wsi->u.http.range.budget -= amount;
745 if (wsi->u.http.range.budget == 0) {
746 lwsl_notice("range budget exhausted\n");
747 wsi->u.http.range.inside = 0;
748 wsi->u.http.range.send_ctr++;
750 if (lws_ranges_next(&wsi->u.http.range) < 1) {
759 /* adjust for what was not sent */
760 if (lws_vfs_file_seek_cur(wsi->u.http.fop_fd,
767 if ((!wsi->trunc_len && wsi->u.http.filepos == wsi->u.http.filelen)
768 #if defined(LWS_WITH_RANGES)
774 wsi->state = LWSS_HTTP;
775 /* we might be in keepalive, so close it off here */
776 lws_vfs_file_close(&wsi->u.http.fop_fd);
778 lwsl_debug("file completed\n");
780 if (wsi->protocol->callback)
781 /* ignore callback returned value */
782 if (user_callback_handle_rxflow(
783 wsi->protocol->callback, wsi,
784 LWS_CALLBACK_HTTP_FILE_COMPLETION,
785 wsi->user_space, NULL, 0) < 0)
788 return 1; /* >0 indicates completed */
792 lws_callback_on_writable(wsi);
794 return 0; /* indicates further processing must be done */
797 lws_vfs_file_close(&wsi->u.http.fop_fd);
804 lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, int len)
806 struct lws_context *context = wsi->context;
807 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
810 lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1);
812 n = recv(wsi->desc.sockfd, (char *)buf, len, 0);
815 wsi->vhost->conn_stats.rx += n;
816 lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n);
817 lws_restart_ws_ping_pong_timer(wsi);
821 if (LWS_ERRNO == LWS_EAGAIN ||
822 LWS_ERRNO == LWS_EWOULDBLOCK ||
823 LWS_ERRNO == LWS_EINTR)
824 return LWS_SSL_CAPABLE_MORE_SERVICE;
826 lwsl_notice("error on reading from skt : %d\n", LWS_ERRNO);
827 return LWS_SSL_CAPABLE_ERROR;
831 lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len)
836 n = send(wsi->desc.sockfd, (char *)buf, len, MSG_NOSIGNAL);
837 // lwsl_info("%s: sent len %d result %d", __func__, len, n);
841 if (LWS_ERRNO == LWS_EAGAIN ||
842 LWS_ERRNO == LWS_EWOULDBLOCK ||
843 LWS_ERRNO == LWS_EINTR) {
844 if (LWS_ERRNO == LWS_EWOULDBLOCK) {
845 lws_set_blocking_send(wsi);
848 return LWS_SSL_CAPABLE_MORE_SERVICE;
858 lwsl_debug("ERROR writing len %d to skt fd %d err %d / errno %d\n", len, wsi->desc.sockfd, n, LWS_ERRNO);
859 return LWS_SSL_CAPABLE_ERROR;
863 lws_ssl_pending_no_ssl(struct lws *wsi)
866 #if defined(LWS_WITH_ESP32)