2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2015 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "private-libwebsockets.h"
25 lws_0405_frame_mask_generate(struct lws *wsi)
28 wsi->u.ws.mask[0] = 0;
29 wsi->u.ws.mask[1] = 0;
30 wsi->u.ws.mask[2] = 0;
31 wsi->u.ws.mask[3] = 0;
34 /* fetch the per-frame nonce */
36 n = lws_get_random(lws_get_context(wsi), wsi->u.ws.mask, 4);
38 lwsl_parser("Unable to read from random device %s %d\n",
39 SYSTEM_RANDOM_FILEPATH, n);
43 /* start masking from first byte of masking key buffer */
44 wsi->u.ws.mask_idx = 0;
51 LWS_VISIBLE void lwsl_hexdump(void *vbuf, size_t len)
53 unsigned char *buf = (unsigned char *)vbuf;
54 unsigned int n, m, start;
60 for (n = 0; n < len;) {
64 p += sprintf(p, "%04X: ", start);
66 for (m = 0; m < 16 && n < len; m++)
67 p += sprintf(p, "%02X ", buf[n++]);
73 for (m = 0; m < 16 && (start + m) < len; m++) {
74 if (buf[start + m] >= ' ' && buf[start + m] < 127)
75 *p++ = buf[start + m];
84 lwsl_debug("%s", line);
92 * notice this returns number of bytes consumed, or -1
95 int lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
97 struct lws_context *context = lws_get_context(wsi);
98 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
99 size_t real_len = len;
103 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_WRITE, 1);
107 /* just ignore sends after we cleared the truncation buffer */
108 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE &&
112 if (wsi->trunc_len && (buf < wsi->trunc_alloc ||
113 buf > (wsi->trunc_alloc + wsi->trunc_len + wsi->trunc_offset))) {
115 strncpy(dump, (char *)buf, sizeof(dump) - 1);
116 dump[sizeof(dump) - 1] = '\0';
117 #if defined(LWS_WITH_ESP8266)
118 lwsl_err("****** %p: Sending new %lu (%s), pending truncated ...\n",
119 wsi, (unsigned long)len, dump);
121 lwsl_err("****** %p: Sending new %lu (%s), pending truncated ...\n"
122 " It's illegal to do an lws_write outside of\n"
123 " the writable callback: fix your code",
124 wsi, (unsigned long)len, dump);
131 m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_DO_SEND, &buf, len);
134 if (m) /* handled */ {
136 goto handle_truncated_send;
139 if (!lws_socket_is_valid(wsi->desc.sockfd))
140 lwsl_warn("** error invalid sock but expected to send\n");
143 if (wsi->protocol->tx_packet_size)
144 n = wsi->protocol->tx_packet_size;
146 n = wsi->protocol->rx_buffer_size;
148 n = context->pt_serv_buf_size;
153 #if defined(LWS_WITH_ESP8266)
154 if (wsi->pending_send_completion) {
156 goto handle_truncated_send;
160 /* nope, send it on the socket directly */
161 lws_latency_pre(context, wsi);
162 n = lws_ssl_capable_write(wsi, buf, n);
163 lws_latency(context, wsi, "send lws_issue_raw", n, n == len);
165 //lwsl_notice("lws_ssl_capable_write: %d\n", n);
168 case LWS_SSL_CAPABLE_ERROR:
169 /* we're going to close, let close know sends aren't possible */
170 wsi->socket_is_permanently_unusable = 1;
172 case LWS_SSL_CAPABLE_MORE_SERVICE:
173 /* nothing got sent, not fatal, retry the whole thing later */
178 handle_truncated_send:
180 * we were already handling a truncated send?
182 if (wsi->trunc_len) {
183 lwsl_info("%p partial adv %d (vs %ld)\n", wsi, n, (long)real_len);
184 wsi->trunc_offset += n;
187 if (!wsi->trunc_len) {
188 lwsl_info("***** %p partial send completed\n", wsi);
189 /* done with it, but don't free it */
191 if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
192 lwsl_info("***** %p signalling to close now\n", wsi);
193 return -1; /* retry closing now */
196 /* always callback on writeable */
197 lws_callback_on_writable(wsi);
202 if ((unsigned int)n == real_len)
203 /* what we just sent went out cleanly */
207 * Newly truncated send. Buffer the remainder (it will get
208 * first priority next time the socket is writable)
210 lwsl_debug("%p new partial sent %d from %lu total\n", wsi, n,
211 (unsigned long)real_len);
213 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITE_PARTIALS, 1);
214 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_PARTIALS_ACCEPTED_PARTS, n);
217 * - if we still have a suitable malloc lying around, use it
218 * - or, if too small, reallocate it
219 * - or, if no buffer, create it
221 if (!wsi->trunc_alloc || real_len - n > wsi->trunc_alloc_len) {
222 lws_free(wsi->trunc_alloc);
224 wsi->trunc_alloc_len = real_len - n;
225 wsi->trunc_alloc = lws_malloc(real_len - n);
226 if (!wsi->trunc_alloc) {
227 lwsl_err("truncated send: unable to malloc %lu\n",
228 (unsigned long)(real_len - n));
232 wsi->trunc_offset = 0;
233 wsi->trunc_len = real_len - n;
234 memcpy(wsi->trunc_alloc, buf + n, real_len - n);
236 /* since something buffered, force it to get another chance to send */
237 lws_callback_on_writable(wsi);
242 LWS_VISIBLE int lws_write(struct lws *wsi, unsigned char *buf, size_t len,
243 enum lws_write_protocol wp)
245 struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
246 int masked7 = (wsi->mode == LWSCM_WS_CLIENT);
247 unsigned char is_masked_bit = 0;
248 unsigned char *dropmask = NULL;
249 struct lws_tokens eff_buf;
251 size_t orig_len = len;
253 if (wsi->parent_carries_io) {
254 struct lws_write_passthru pas;
261 if (wsi->parent->protocol->callback(wsi->parent,
262 LWS_CALLBACK_CHILD_WRITE_VIA_PARENT,
263 wsi->parent->user_space,
270 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_API_LWS_WRITE, 1);
273 lwsl_err("%s: suspicious len int %d, ulong %lu\n", __func__,
274 (int)len, (unsigned long)len);
278 lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_B_WRITE, len);
280 #ifdef LWS_WITH_ACCESS_LOG
281 wsi->access_log.sent += len;
284 wsi->vhost->conn_stats.tx += len;
286 if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
287 /* remove us from the list */
288 struct lws **w = &pt->tx_draining_ext_list;
290 // lwsl_notice("%s: TX EXT DRAINING: Remove from list\n", __func__);
291 wsi->u.ws.tx_draining_ext = 0;
292 /* remove us from context draining ext list */
295 *w = wsi->u.ws.tx_draining_ext_list;
298 w = &((*w)->u.ws.tx_draining_ext_list);
300 wsi->u.ws.tx_draining_ext_list = NULL;
301 wp = (wsi->u.ws.tx_draining_stashed_wp & 0xc0) |
302 LWS_WRITE_CONTINUATION;
304 lwsl_ext("FORCED draining wp to 0x%02X\n", wp);
307 lws_restart_ws_ping_pong_timer(wsi);
309 if (wp == LWS_WRITE_HTTP ||
310 wp == LWS_WRITE_HTTP_FINAL ||
311 wp == LWS_WRITE_HTTP_HEADERS)
314 /* if not in a state to send stuff, then just send nothing */
316 if (wsi->state != LWSS_ESTABLISHED &&
317 ((wsi->state != LWSS_RETURNED_CLOSE_ALREADY &&
318 wsi->state != LWSS_WAITING_TO_SEND_CLOSE_NOTIFICATION &&
319 wsi->state != LWSS_AWAITING_CLOSE_ACK) ||
320 wp != LWS_WRITE_CLOSE)) {
321 lwsl_debug("binning\n");
325 /* if we are continuing a frame that already had its header done */
327 if (wsi->u.ws.inside_frame) {
328 lwsl_debug("INSIDE FRAME\n");
329 goto do_more_inside_frame;
332 wsi->u.ws.clean_buffer = 1;
335 * give a chance to the extensions to modify payload
336 * the extension may decide to produce unlimited payload erratically
337 * (eg, compression extension), so we require only that if he produces
338 * something, it will be a complete fragment of the length known at
339 * the time (just the fragment length known), and if he has
340 * more we will come back next time he is writeable and allow him to
341 * produce more fragments until he's drained.
343 * This allows what is sent each time it is writeable to be limited to
344 * a size that can be sent without partial sends or blocking, allows
345 * interleaving of control frames and other connection service.
347 eff_buf.token = (char *)buf;
348 eff_buf.token_len = len;
353 case LWS_WRITE_CLOSE:
356 lwsl_debug("LWS_EXT_CB_PAYLOAD_TX\n");
357 n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_TX, &eff_buf, wp);
361 if (n && eff_buf.token_len) {
362 lwsl_debug("drain len %d\n", (int)eff_buf.token_len);
363 /* extension requires further draining */
364 wsi->u.ws.tx_draining_ext = 1;
365 wsi->u.ws.tx_draining_ext_list = pt->tx_draining_ext_list;
366 pt->tx_draining_ext_list = wsi;
367 /* we must come back to do more */
368 lws_callback_on_writable(wsi);
370 * keep a copy of the write type for the overall
371 * action that has provoked generation of these
372 * fragments, so the last guy can use its FIN state.
374 wsi->u.ws.tx_draining_stashed_wp = wp;
375 /* this is definitely not actually the last fragment
376 * because the extension asserted he has more coming
377 * So make sure this intermediate one doesn't go out
380 wp |= LWS_WRITE_NO_FIN;
383 if (eff_buf.token_len && wsi->u.ws.stashed_write_pending) {
384 wsi->u.ws.stashed_write_pending = 0;
385 wp = (wp &0xc0) | (int)wsi->u.ws.stashed_write_type;
390 * an extension did something we need to keep... for example, if
391 * compression extension, it has already updated its state according
392 * to this being issued
394 if ((char *)buf != eff_buf.token) {
396 * ext might eat it, but not have anything to issue yet.
397 * In that case we have to follow his lead, but stash and
398 * replace the write type that was lost here the first time.
400 if (len && !eff_buf.token_len) {
401 if (!wsi->u.ws.stashed_write_pending)
402 wsi->u.ws.stashed_write_type = (char)wp & 0x3f;
403 wsi->u.ws.stashed_write_pending = 1;
407 * extension recreated it:
408 * need to buffer this if not all sent
410 wsi->u.ws.clean_buffer = 0;
413 buf = (unsigned char *)eff_buf.token;
414 len = eff_buf.token_len;
416 lwsl_debug("%p / %d\n", buf, (int)len);
419 lwsl_err("null buf (%d)\n", (int)len);
423 switch (wsi->ietf_spec_revision) {
427 dropmask = &buf[0 - pre];
428 is_masked_bit = 0x80;
433 n = LWSWSOPC_TEXT_FRAME;
435 case LWS_WRITE_BINARY:
436 n = LWSWSOPC_BINARY_FRAME;
438 case LWS_WRITE_CONTINUATION:
439 n = LWSWSOPC_CONTINUATION;
442 case LWS_WRITE_CLOSE:
452 lwsl_warn("lws_write: unknown write opc / wp\n");
456 if (!(wp & LWS_WRITE_NO_FIN))
462 buf[-pre + 1] = (unsigned char)(len | is_masked_bit);
467 buf[-pre + 1] = 126 | is_masked_bit;
468 buf[-pre + 2] = (unsigned char)(len >> 8);
469 buf[-pre + 3] = (unsigned char)len;
473 buf[-pre + 1] = 127 | is_masked_bit;
475 buf[-pre + 2] = (len >> 56) & 0x7f;
476 buf[-pre + 3] = len >> 48;
477 buf[-pre + 4] = len >> 40;
478 buf[-pre + 5] = len >> 32;
485 buf[-pre + 6] = (unsigned char)(len >> 24);
486 buf[-pre + 7] = (unsigned char)(len >> 16);
487 buf[-pre + 8] = (unsigned char)(len >> 8);
488 buf[-pre + 9] = (unsigned char)len;
494 do_more_inside_frame:
497 * Deal with masking if we are in client -> server direction and
502 if (!wsi->u.ws.inside_frame)
503 if (lws_0405_frame_mask_generate(wsi)) {
504 lwsl_err("frame mask generation failed\n");
509 * in v7, just mask the payload
511 if (dropmask) { /* never set if already inside frame */
512 for (n = 4; n < (int)len + 4; n++)
513 dropmask[n] = dropmask[n] ^ wsi->u.ws.mask[
514 (wsi->u.ws.mask_idx++) & 3];
516 /* copy the frame nonce into place */
517 memcpy(dropmask, wsi->u.ws.mask, 4);
523 case LWS_WRITE_CLOSE:
524 /* lwsl_hexdump(&buf[-pre], len); */
526 case LWS_WRITE_HTTP_FINAL:
527 case LWS_WRITE_HTTP_HEADERS:
531 if (wsi->mode == LWSCM_HTTP2_SERVING) {
532 unsigned char flags = 0;
534 n = LWS_HTTP2_FRAME_TYPE_DATA;
535 if (wp == LWS_WRITE_HTTP_HEADERS) {
536 n = LWS_HTTP2_FRAME_TYPE_HEADERS;
537 flags = LWS_HTTP2_FLAG_END_HEADERS;
538 if (wsi->u.http2.send_END_STREAM)
539 flags |= LWS_HTTP2_FLAG_END_STREAM;
542 if ((wp == LWS_WRITE_HTTP ||
543 wp == LWS_WRITE_HTTP_FINAL) &&
544 wsi->u.http.content_length) {
545 wsi->u.http.content_remain -= len;
546 lwsl_info("%s: content_remain = %llu\n", __func__,
547 (unsigned long long)wsi->u.http.content_remain);
548 if (!wsi->u.http.content_remain) {
549 lwsl_info("%s: selecting final write mode\n", __func__);
550 wp = LWS_WRITE_HTTP_FINAL;
554 if (wp == LWS_WRITE_HTTP_FINAL && wsi->u.http2.END_STREAM) {
555 lwsl_info("%s: setting END_STREAM\n", __func__);
556 flags |= LWS_HTTP2_FLAG_END_STREAM;
559 return lws_http2_frame_write(wsi, n, flags,
560 wsi->u.http2.my_stream_id, len, buf);
563 return lws_issue_raw(wsi, (unsigned char *)buf - pre, len + pre);
569 * give any active extensions a chance to munge the buffer
570 * before send. We pass in a pointer to an lws_tokens struct
571 * prepared with the default buffer and content length that's in
572 * there. Rather than rewrite the default buffer, extensions
573 * that expect to grow the buffer can adapt .token to
574 * point to their own per-connection buffer in the extension
575 * user allocation. By default with no extensions or no
576 * extension callback handling, just the normal input buffer is
577 * used then so it is efficient.
579 * callback returns 1 in case it wants to spill more buffers
581 * This takes care of holding the buffer if send is incomplete, ie,
582 * if wsi->u.ws.clean_buffer is 0 (meaning an extension meddled with
583 * the buffer). If wsi->u.ws.clean_buffer is 1, it will instead
584 * return to the user code how much OF THE USER BUFFER was consumed.
587 n = lws_issue_raw_ext_access(wsi, buf - pre, len + pre);
588 wsi->u.ws.inside_frame = 1;
592 if (n == (int)len + pre) {
593 /* everything in the buffer was handled (or rebuffered...) */
594 wsi->u.ws.inside_frame = 0;
599 * it is how many bytes of user buffer got sent... may be < orig_len
600 * in which case callback when writable has already been arranged
601 * and user code can call lws_write() again with the rest
608 LWS_VISIBLE int lws_serve_http_file_fragment(struct lws *wsi)
610 struct lws_context *context = wsi->context;
611 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
612 struct lws_process_html_args args;
613 lws_filepos_t amount, poss;
615 #if defined(LWS_WITH_RANGES)
616 unsigned char finished = 0;
620 // lwsl_notice("%s (trunc len %d)\n", __func__, wsi->trunc_len);
622 while (wsi->http2_substream || !lws_send_pipe_choked(wsi)) {
624 if (wsi->trunc_len) {
625 if (lws_issue_raw(wsi, wsi->trunc_alloc +
627 wsi->trunc_len) < 0) {
628 lwsl_info("%s: closing\n", __func__);
634 if (wsi->u.http.filepos == wsi->u.http.filelen)
641 #if defined(LWS_WITH_RANGES)
642 if (wsi->u.http.range.count_ranges && !wsi->u.http.range.inside) {
644 lwsl_notice("%s: doing range start %llu\n", __func__, wsi->u.http.range.start);
646 if ((long long)lws_vfs_file_seek_cur(wsi->u.http.fop_fd,
647 wsi->u.http.range.start -
648 wsi->u.http.filepos) < 0)
651 wsi->u.http.filepos = wsi->u.http.range.start;
653 if (wsi->u.http.range.count_ranges > 1) {
654 n = lws_snprintf((char *)p, context->pt_serv_buf_size,
656 "Content-Type: %s\x0d\x0a"
657 "Content-Range: bytes %llu-%llu/%llu\x0d\x0a"
659 wsi->u.http.multipart_content_type,
660 wsi->u.http.range.start,
661 wsi->u.http.range.end,
662 wsi->u.http.range.extent);
666 wsi->u.http.range.budget = wsi->u.http.range.end -
667 wsi->u.http.range.start + 1;
668 wsi->u.http.range.inside = 1;
672 poss = context->pt_serv_buf_size - n;
675 * if there is a hint about how much we will do well to send at one time,
676 * restrict ourselves to only trying to send that.
678 if (wsi->protocol->tx_packet_size && poss > wsi->protocol->tx_packet_size)
679 poss = wsi->protocol->tx_packet_size;
681 #if defined(LWS_WITH_RANGES)
682 if (wsi->u.http.range.count_ranges) {
683 if (wsi->u.http.range.count_ranges > 1)
684 poss -= 7; /* allow for final boundary */
685 if (poss > wsi->u.http.range.budget)
686 poss = wsi->u.http.range.budget;
689 if (wsi->sending_chunked) {
690 /* we need to drop the chunk size in here */
692 /* allow for the chunk to grow by 128 in translation */
696 if (lws_vfs_file_read(wsi->u.http.fop_fd, &amount, p, poss) < 0)
697 goto file_had_it; /* caller will close */
699 //lwsl_notice("amount %ld\n", amount);
701 if (wsi->sending_chunked)
704 n = (p - pt->serv_buf) + (int)amount;
706 lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_CONTENT,
707 context->timeout_secs);
709 if (wsi->sending_chunked) {
712 args.max_len = (unsigned int)poss + 128;
713 args.final = wsi->u.http.filepos + n ==
715 if (user_callback_handle_rxflow(
716 wsi->vhost->protocols[(int)wsi->protocol_interpret_idx].callback, wsi,
717 LWS_CALLBACK_PROCESS_HTML,
718 wsi->user_space, &args, 0) < 0)
721 p = (unsigned char *)args.p;
725 #if defined(LWS_WITH_RANGES)
726 if (wsi->u.http.range.send_ctr + 1 ==
727 wsi->u.http.range.count_ranges && // last range
728 wsi->u.http.range.count_ranges > 1 && // was 2+ ranges (ie, multipart)
729 wsi->u.http.range.budget - amount == 0) {// final part
730 n += lws_snprintf((char *)pt->serv_buf + n, 6,
731 "_lws\x0d\x0a"); // append trailing boundary
732 lwsl_debug("added trailing boundary\n");
735 m = lws_write(wsi, p, n,
736 wsi->u.http.filepos == wsi->u.http.filelen ?
737 LWS_WRITE_HTTP_FINAL :
743 wsi->u.http.filepos += amount;
745 #if defined(LWS_WITH_RANGES)
746 if (wsi->u.http.range.count_ranges >= 1) {
747 wsi->u.http.range.budget -= amount;
748 if (wsi->u.http.range.budget == 0) {
749 lwsl_notice("range budget exhausted\n");
750 wsi->u.http.range.inside = 0;
751 wsi->u.http.range.send_ctr++;
753 if (lws_ranges_next(&wsi->u.http.range) < 1) {
762 /* adjust for what was not sent */
763 if (lws_vfs_file_seek_cur(wsi->u.http.fop_fd,
770 if ((!wsi->trunc_len && wsi->u.http.filepos == wsi->u.http.filelen)
771 #if defined(LWS_WITH_RANGES)
777 wsi->state = LWSS_HTTP;
778 /* we might be in keepalive, so close it off here */
779 lws_vfs_file_close(&wsi->u.http.fop_fd);
781 lwsl_debug("file completed\n");
783 if (wsi->protocol->callback)
784 /* ignore callback returned value */
785 if (user_callback_handle_rxflow(
786 wsi->protocol->callback, wsi,
787 LWS_CALLBACK_HTTP_FILE_COMPLETION,
788 wsi->user_space, NULL, 0) < 0)
791 return 1; /* >0 indicates completed */
795 lws_callback_on_writable(wsi);
797 return 0; /* indicates further processing must be done */
800 lws_vfs_file_close(&wsi->u.http.fop_fd);
807 lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, int len)
809 struct lws_context *context = wsi->context;
810 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
813 lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1);
815 n = recv(wsi->desc.sockfd, (char *)buf, len, 0);
818 wsi->vhost->conn_stats.rx += n;
819 lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n);
820 lws_restart_ws_ping_pong_timer(wsi);
824 if (LWS_ERRNO == LWS_EAGAIN ||
825 LWS_ERRNO == LWS_EWOULDBLOCK ||
826 LWS_ERRNO == LWS_EINTR)
827 return LWS_SSL_CAPABLE_MORE_SERVICE;
829 lwsl_notice("error on reading from skt : %d\n", LWS_ERRNO);
830 return LWS_SSL_CAPABLE_ERROR;
834 lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len)
839 n = send(wsi->desc.sockfd, (char *)buf, len, MSG_NOSIGNAL);
840 // lwsl_info("%s: sent len %d result %d", __func__, len, n);
844 if (LWS_ERRNO == LWS_EAGAIN ||
845 LWS_ERRNO == LWS_EWOULDBLOCK ||
846 LWS_ERRNO == LWS_EINTR) {
847 if (LWS_ERRNO == LWS_EWOULDBLOCK) {
848 lws_set_blocking_send(wsi);
851 return LWS_SSL_CAPABLE_MORE_SERVICE;
861 lwsl_debug("ERROR writing len %d to skt fd %d err %d / errno %d\n", len, wsi->desc.sockfd, n, LWS_ERRNO);
862 return LWS_SSL_CAPABLE_ERROR;
866 lws_ssl_pending_no_ssl(struct lws *wsi)
869 #if defined(LWS_WITH_ESP32)