2 * libwebsockets - small server side websockets and web server implementation
4 * Copyright (C) 2010-2019 Andy Green <andy@warmcat.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation:
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 #include "core/private.h"
24 #ifndef LWS_BUILD_HASH
25 #define LWS_BUILD_HASH "unknown-build-hash"
29 static const char *library_version = LWS_LIBRARY_VERSION " " LWS_BUILD_HASH;
32 * lws_get_library_version: get version and git hash library built from
34 * returns a const char * to a string like "1.1 178d78c"
35 * representing the library version followed by the git head hash it
38 LWS_VISIBLE const char *
39 lws_get_library_version(void)
41 return library_version;
44 #if defined(LWS_WITH_STATS)
46 lws_sul_stats_cb(lws_sorted_usec_list_t *sul)
48 struct lws_context_per_thread *pt = lws_container_of(sul,
49 struct lws_context_per_thread, sul_stats);
51 lws_stats_log_dump(pt->context);
53 __lws_sul_insert(&pt->pt_sul_owner, &pt->sul_stats, 10 * LWS_US_PER_SEC);
56 #if defined(LWS_WITH_PEER_LIMITS)
58 lws_sul_peer_limits_cb(lws_sorted_usec_list_t *sul)
60 struct lws_context_per_thread *pt = lws_container_of(sul,
61 struct lws_context_per_thread, sul_peer_limits);
63 lws_peer_cull_peer_wait_list(pt->context);
65 __lws_sul_insert(&pt->pt_sul_owner, &pt->sul_peer_limits, 10 * LWS_US_PER_SEC);
70 LWS_VISIBLE struct lws_context *
71 lws_create_context(const struct lws_context_creation_info *info)
73 struct lws_context *context = NULL;
74 struct lws_plat_file_ops *prev;
75 #ifndef LWS_NO_DAEMONIZE
76 pid_t pid_daemon = get_daemonize_pid();
78 #if defined(LWS_WITH_NETWORK)
81 #if defined(__ANDROID__)
85 lwsl_info("Initial logging level %d\n", log_level);
86 lwsl_info("Libwebsockets version: %s\n", library_version);
89 if (!lws_check_opt(info->options, LWS_SERVER_OPTION_DISABLE_IPV6))
90 lwsl_info("IPV6 compiled in and enabled\n");
92 lwsl_info("IPV6 compiled in but disabled\n");
94 lwsl_info("IPV6 not compiled in\n");
97 lwsl_info(" LWS_DEF_HEADER_LEN : %u\n", LWS_DEF_HEADER_LEN);
98 lwsl_info(" LWS_MAX_PROTOCOLS : %u\n", LWS_MAX_PROTOCOLS);
99 lwsl_info(" LWS_MAX_SMP : %u\n", LWS_MAX_SMP);
100 lwsl_info(" sizeof (*info) : %ld\n", (long)sizeof(*info));
101 #if defined(LWS_WITH_STATS)
102 lwsl_info(" LWS_WITH_STATS : on\n");
104 lwsl_info(" SYSTEM_RANDOM_FILEPATH: '%s'\n", SYSTEM_RANDOM_FILEPATH);
105 #if defined(LWS_WITH_HTTP2)
106 lwsl_info(" HTTP2 support : available\n");
108 lwsl_info(" HTTP2 support : not configured\n");
110 if (lws_plat_context_early_init())
113 context = lws_zalloc(sizeof(struct lws_context), "context");
115 lwsl_err("No memory for websocket context\n");
119 context->uid = info->uid;
120 context->gid = info->gid;
121 context->username = info->username;
122 context->groupname = info->groupname;
123 context->system_ops = info->system_ops;
125 /* if he gave us names, set the uid / gid */
126 if (lws_plat_drop_app_privileges(context, 0))
129 lwsl_info("context created\n");
130 #if defined(LWS_WITH_TLS) && defined(LWS_WITH_NETWORK)
131 #if defined(LWS_WITH_MBEDTLS)
132 context->tls_ops = &tls_ops_mbedtls;
134 context->tls_ops = &tls_ops_openssl;
138 if (info->pt_serv_buf_size)
139 context->pt_serv_buf_size = info->pt_serv_buf_size;
141 context->pt_serv_buf_size = 4096;
143 #if defined(LWS_ROLE_H2)
144 role_ops_h2.init_context(context, info);
148 lws_mutex_refcount_init(&context->mr);
151 #if defined(LWS_WITH_ESP32)
152 context->last_free_heap = esp_get_free_heap_size();
155 /* default to just the platform fops implementation */
157 context->fops_platform.LWS_FOP_OPEN = _lws_plat_file_open;
158 context->fops_platform.LWS_FOP_CLOSE = _lws_plat_file_close;
159 context->fops_platform.LWS_FOP_SEEK_CUR = _lws_plat_file_seek_cur;
160 context->fops_platform.LWS_FOP_READ = _lws_plat_file_read;
161 context->fops_platform.LWS_FOP_WRITE = _lws_plat_file_write;
162 context->fops_platform.fi[0].sig = NULL;
165 * arrange a linear linked-list of fops starting from context->fops
168 * [ -> fops_zip (copied into context so .next settable) ]
172 context->fops = &context->fops_platform;
173 prev = (struct lws_plat_file_ops *)context->fops;
175 #if defined(LWS_WITH_ZIP_FOPS)
176 /* make a soft copy so we can set .next */
177 context->fops_zip = fops_zip;
178 prev->next = &context->fops_zip;
179 prev = (struct lws_plat_file_ops *)prev->next;
182 /* if user provided fops, tack them on the end of the list */
184 prev->next = info->fops;
186 context->reject_service_keywords = info->reject_service_keywords;
187 if (info->external_baggage_free_on_destroy)
188 context->external_baggage_free_on_destroy =
189 info->external_baggage_free_on_destroy;
190 #if defined(LWS_WITH_NETWORK)
191 context->time_up = lws_now_usecs();
193 context->pcontext_finalize = info->pcontext;
195 context->simultaneous_ssl_restriction =
196 info->simultaneous_ssl_restriction;
198 context->options = info->options;
200 #ifndef LWS_NO_DAEMONIZE
202 context->started_with_parent = pid_daemon;
203 lwsl_info(" Started with daemon pid %u\n", (unsigned int)pid_daemon);
206 #if defined(__ANDROID__)
207 n = getrlimit(RLIMIT_NOFILE, &rt);
209 lwsl_err("Get RLIMIT_NOFILE failed!\n");
213 context->max_fds = rt.rlim_cur;
215 #if defined(WIN32) || defined(_WIN32) || defined(LWS_AMAZON_RTOS)
216 context->max_fds = getdtablesize();
218 context->max_fds = sysconf(_SC_OPEN_MAX);
222 if (context->max_fds < 0) {
223 lwsl_err("%s: problem getting process max files\n",
229 if (info->count_threads)
230 context->count_threads = info->count_threads;
232 context->count_threads = 1;
234 if (context->count_threads > LWS_MAX_SMP)
235 context->count_threads = LWS_MAX_SMP;
238 * deal with any max_fds override, if it's reducing (setting it to
239 * more than ulimit -n is meaningless). The platform init will
240 * figure out what if this is something it can deal with.
242 if (info->fd_limit_per_thread) {
243 int mf = info->fd_limit_per_thread * context->count_threads;
245 if (mf < context->max_fds) {
246 context->max_fds_unrelated_to_ulimit = 1;
247 context->max_fds = mf;
251 context->token_limits = info->token_limits;
253 #if defined(LWS_WITH_NETWORK)
256 * set the context event loops ops struct
258 * after this, all event_loop actions use the generic ops
261 #if defined(LWS_WITH_POLL)
262 context->event_loop_ops = &event_loop_ops_poll;
265 if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
266 #if defined(LWS_WITH_LIBUV)
267 context->event_loop_ops = &event_loop_ops_uv;
269 goto fail_event_libs;
272 if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEV))
273 #if defined(LWS_WITH_LIBEV)
274 context->event_loop_ops = &event_loop_ops_ev;
276 goto fail_event_libs;
279 if (lws_check_opt(context->options, LWS_SERVER_OPTION_LIBEVENT))
280 #if defined(LWS_WITH_LIBEVENT)
281 context->event_loop_ops = &event_loop_ops_event;
283 goto fail_event_libs;
286 if (!context->event_loop_ops)
287 goto fail_event_libs;
289 lwsl_info("Using event loop: %s\n", context->event_loop_ops->name);
292 #if defined(LWS_WITH_TLS) && defined(LWS_WITH_NETWORK)
293 time(&context->tls.last_cert_check_s);
295 context->tls.alpn_default = info->alpn;
297 char *p = context->tls.alpn_discovered, first = 1;
299 LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
304 context->tls.alpn_discovered +
305 sizeof(context->tls.alpn_discovered) -
306 2 - p, "%s", ar->alpn);
309 } LWS_FOR_EVERY_AVAILABLE_ROLE_END;
311 context->tls.alpn_default = context->tls.alpn_discovered;
314 lwsl_info("Default ALPN advertisment: %s\n", context->tls.alpn_default);
317 if (info->timeout_secs)
318 context->timeout_secs = info->timeout_secs;
320 context->timeout_secs = AWAITING_TIMEOUT;
322 context->ws_ping_pong_interval = info->ws_ping_pong_interval;
324 lwsl_info(" default timeout (secs): %u\n", context->timeout_secs);
326 if (info->max_http_header_data)
327 context->max_http_header_data = info->max_http_header_data;
329 if (info->max_http_header_data2)
330 context->max_http_header_data =
331 info->max_http_header_data2;
333 context->max_http_header_data = LWS_DEF_HEADER_LEN;
335 if (info->max_http_header_pool)
336 context->max_http_header_pool = info->max_http_header_pool;
338 if (info->max_http_header_pool2)
339 context->max_http_header_pool =
340 info->max_http_header_pool2;
342 context->max_http_header_pool = context->max_fds;
344 if (info->fd_limit_per_thread)
345 context->fd_limit_per_thread = info->fd_limit_per_thread;
347 context->fd_limit_per_thread = context->max_fds /
348 context->count_threads;
350 #if defined(LWS_WITH_NETWORK)
352 * Allocate the per-thread storage for scratchpad buffers,
353 * and header data pool
355 for (n = 0; n < context->count_threads; n++) {
356 context->pt[n].serv_buf = lws_malloc(
357 context->pt_serv_buf_size + sizeof(struct lws),
359 if (!context->pt[n].serv_buf) {
364 context->pt[n].context = context;
365 context->pt[n].tid = n;
368 * We overallocated for a fakewsi (can't compose it in the
369 * pt because size isn't known at that time). point to it
370 * and zero it down. Fakewsis are needed to make callbacks work
371 * when the source of the callback is not actually from a wsi
374 context->pt[n].fake_wsi = (struct lws *)(context->pt[n].serv_buf +
375 context->pt_serv_buf_size);
377 memset(context->pt[n].fake_wsi, 0, sizeof(struct lws));
379 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
380 context->pt[n].http.ah_list = NULL;
381 context->pt[n].http.ah_pool_length = 0;
383 lws_pt_mutex_init(&context->pt[n]);
384 #if defined(LWS_WITH_SEQUENCER)
385 lws_seq_pt_init(&context->pt[n]);
389 lwsl_info(" Threads: %d each %d fds\n", context->count_threads,
390 context->fd_limit_per_thread);
392 if (!info->ka_interval && info->ka_time > 0) {
393 lwsl_err("info->ka_interval can't be 0 if ka_time used\n");
397 #if defined(LWS_WITH_PEER_LIMITS)
398 /* scale the peer hash table according to the max fds for the process,
399 * so that the max list depth averages 16. Eg, 1024 fd -> 64,
403 context->pl_hash_elements =
404 (context->count_threads * context->fd_limit_per_thread) / 16;
405 context->pl_hash_table = lws_zalloc(sizeof(struct lws_peer *) *
406 context->pl_hash_elements, "peer limits hash table");
408 context->ip_limit_ah = info->ip_limit_ah;
409 context->ip_limit_wsi = info->ip_limit_wsi;
412 lwsl_info(" mem: context: %5lu B (%ld ctx + (%ld thr x %d))\n",
413 (long)sizeof(struct lws_context) +
414 (context->count_threads * context->pt_serv_buf_size),
415 (long)sizeof(struct lws_context),
416 (long)context->count_threads,
417 context->pt_serv_buf_size);
418 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
419 lwsl_info(" mem: http hdr size: (%u + %lu), max count %u\n",
420 context->max_http_header_data,
421 (long)sizeof(struct allocated_headers),
422 context->max_http_header_pool);
426 * fds table contains pollfd structs for as many pollfds as we can
427 * handle... spread across as many service threads as we have going
429 n = sizeof(struct lws_pollfd) * context->count_threads *
430 context->fd_limit_per_thread;
431 context->pt[0].fds = lws_zalloc(n, "fds table");
432 if (context->pt[0].fds == NULL) {
433 lwsl_err("OOM allocating %d fds\n", context->max_fds);
436 lwsl_info(" mem: pollfd map: %5u B\n", n);
438 if (info->server_string) {
439 context->server_string = info->server_string;
440 context->server_string_len = (short)
441 strlen(context->server_string);
445 /* each thread serves his own chunk of fds */
446 for (n = 1; n < (int)context->count_threads; n++)
447 context->pt[n].fds = context->pt[n - 1].fds +
448 context->fd_limit_per_thread;
451 if (lws_plat_init(context, info))
454 #if defined(LWS_WITH_NETWORK)
455 if (context->event_loop_ops->init_context)
456 if (context->event_loop_ops->init_context(context, info))
460 if (context->event_loop_ops->init_pt)
461 for (n = 0; n < context->count_threads; n++) {
464 if (info->foreign_loops)
465 lp = info->foreign_loops[n];
467 if (context->event_loop_ops->init_pt(context, lp, n))
471 #if !defined(LWS_AMAZON_RTOS)
472 if (lws_create_event_pipes(context))
477 lws_context_init_ssl_library(info);
479 context->user_space = info->user;
480 #if defined(LWS_WITH_NETWORK)
482 * if he's not saying he'll make his own vhosts later then act
483 * compatibly and make a default vhost using the data in the info
485 if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
486 if (!lws_create_vhost(context, info)) {
487 lwsl_err("Failed to create default vhost\n");
488 for (n = 0; n < context->count_threads; n++)
489 lws_free_set_NULL(context->pt[n].serv_buf);
490 #if defined(LWS_WITH_PEER_LIMITS)
491 lws_free_set_NULL(context->pl_hash_table);
493 goto fail_clean_pipes;
496 lws_context_init_extensions(info, context);
498 lwsl_info(" mem: per-conn: %5lu bytes + protocol rx buf\n",
499 (unsigned long)sizeof(struct lws));
501 strcpy(context->canonical_hostname, "unknown");
502 #if defined(LWS_WITH_NETWORK)
503 lws_server_get_canonical_hostname(context, info);
506 #if defined(LWS_WITH_STATS)
507 context->pt[0].sul_stats.cb = lws_sul_stats_cb;
508 __lws_sul_insert(&context->pt[0].pt_sul_owner, &context->pt[0].sul_stats,
509 10 * LWS_US_PER_SEC);
511 #if defined(LWS_WITH_PEER_LIMITS)
512 context->pt[0].sul_peer_limits.cb = lws_sul_peer_limits_cb;
513 __lws_sul_insert(&context->pt[0].pt_sul_owner,
514 &context->pt[0].sul_peer_limits, 10 * LWS_US_PER_SEC);
517 #if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
518 memcpy(context->caps, info->caps, sizeof(context->caps));
519 context->count_caps = info->count_caps;
523 * drop any root privs for this process
524 * to listen on port < 1023 we would have needed root, but now we are
525 * listening, we don't want the power for anything else
527 if (!lws_check_opt(info->options, LWS_SERVER_OPTION_EXPLICIT_VHOSTS))
528 if (lws_plat_drop_app_privileges(context, 1))
531 #if defined(LWS_WITH_NETWORK)
532 /* expedite post-context init (eg, protocols) */
533 lws_cancel_service(context);
538 #if defined(LWS_WITH_NETWORK)
540 for (n = 0; n < context->count_threads; n++)
541 lws_destroy_event_pipe(context->pt[n].pipe_wsi);
543 lws_free_set_NULL(context->pt[0].fds);
544 lws_plat_context_late_destroy(context);
545 lws_free_set_NULL(context);
551 lws_context_destroy(context);
555 #if defined(LWS_WITH_NETWORK)
557 lwsl_err("Requested event library support not configured, available:\n");
559 extern const struct lws_event_loop_ops *available_event_libs[];
560 const struct lws_event_loop_ops **elops = available_event_libs;
563 lwsl_err(" - %s\n", (*elops)->name);
573 LWS_VISIBLE LWS_EXTERN int
574 lws_context_is_deprecated(struct lws_context *context)
576 return context->deprecated;
580 * When using an event loop, the context destruction is in three separate
581 * parts. This is to cover both internal and foreign event loops cleanly.
583 * - lws_context_destroy() simply starts a soft close of all wsi and
584 * related allocations. The event loop continues.
586 * As the closes complete in the event loop, reference counting is used
587 * to determine when everything is closed. It then calls
588 * lws_context_destroy2().
590 * - lws_context_destroy2() cleans up the rest of the higher-level logical
591 * lws pieces like vhosts. If the loop was foreign, it then proceeds to
592 * lws_context_destroy3(). If it the loop is internal, it stops the
593 * internal loops and waits for lws_context_destroy() to be called again
594 * outside the event loop (since we cannot destroy the loop from
595 * within the loop). That will cause lws_context_destroy3() to run
598 * - lws_context_destroy3() destroys any internal event loops and then
599 * destroys the context itself, setting what was info.pcontext to NULL.
603 * destroy the actual context itself
607 lws_context_destroy3(struct lws_context *context)
609 struct lws_context **pcontext_finalize = context->pcontext_finalize;
610 #if defined(LWS_WITH_NETWORK)
613 lwsl_debug("%s\n", __func__);
615 for (n = 0; n < context->count_threads; n++) {
616 struct lws_context_per_thread *pt = &context->pt[n];
618 #if defined(LWS_WITH_SEQUENCER)
619 lws_seq_destroy_all_on_pt(pt);
622 if (context->event_loop_ops->destroy_pt)
623 context->event_loop_ops->destroy_pt(context, n);
625 lws_free_set_NULL(context->pt[n].serv_buf);
627 #if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
628 while (pt->http.ah_list)
629 _lws_destroy_ah(pt, pt->http.ah_list);
633 if (context->pt[0].fds)
634 lws_free_set_NULL(context->pt[0].fds);
636 lws_context_deinit_ssl_library(context);
639 lwsl_info("%s: ctx %p freed\n", __func__, context);
641 if (pcontext_finalize)
642 *pcontext_finalize = NULL;
646 * really start destroying things
650 lws_context_destroy2(struct lws_context *context)
652 #if defined(LWS_WITH_NETWORK)
653 struct lws_vhost *vh = NULL, *vh1;
655 #if defined(LWS_WITH_PEER_LIMITS)
659 lwsl_info("%s: ctx %p\n", __func__, context);
661 lws_context_lock(context, "context destroy 2"); /* ------ context { */
663 context->being_destroyed2 = 1;
664 #if defined(LWS_WITH_NETWORK)
666 * free all the per-vhost allocations
669 vh = context->vhost_list;
671 vh1 = vh->vhost_next;
672 __lws_vhost_destroy2(vh);
676 lwsl_debug("%p: post vh listl\n", __func__);
678 /* remove ourselves from the pending destruction list */
680 while (context->vhost_pending_destruction_list)
681 /* removes itself from list */
682 __lws_vhost_destroy2(context->vhost_pending_destruction_list);
685 lwsl_debug("%p: post pdl\n", __func__);
687 lws_stats_log_dump(context);
688 #if defined(LWS_WITH_NETWORK)
689 lws_ssl_context_destroy(context);
691 lws_plat_context_late_destroy(context);
693 #if defined(LWS_WITH_PEER_LIMITS)
694 for (nu = 0; nu < context->pl_hash_elements; nu++) {
695 lws_start_foreach_llp(struct lws_peer **, peer,
696 context->pl_hash_table[nu]) {
697 struct lws_peer *df = *peer;
701 } lws_end_foreach_llp(peer, next);
703 lws_free(context->pl_hash_table);
706 lwsl_debug("%p: baggage\n", __func__);
708 if (context->external_baggage_free_on_destroy)
709 free(context->external_baggage_free_on_destroy);
711 #if defined(LWS_WITH_NETWORK)
712 lws_check_deferred_free(context, 0, 1);
716 lws_mutex_refcount_destroy(&context->mr);
718 #if defined(LWS_WITH_NETWORK)
719 if (context->event_loop_ops->destroy_context2)
720 if (context->event_loop_ops->destroy_context2(context)) {
721 lws_context_unlock(context); /* } context ----------- */
722 context->finalize_destroy_after_internal_loops_stopped = 1;
726 lwsl_debug("%p: post dc2\n", __func__);
728 if (!context->pt[0].event_loop_foreign) {
730 for (n = 0; n < context->count_threads; n++)
731 if (context->pt[n].inside_service) {
732 lwsl_debug("%p: bailing as inside service\n", __func__);
733 lws_context_unlock(context); /* } context --- */
738 lws_context_unlock(context); /* } context ------------------- */
740 lws_context_destroy3(context);
744 * Begin the context takedown
748 lws_context_destroy(struct lws_context *context)
750 #if defined(LWS_WITH_NETWORK)
751 volatile struct lws_foreign_thread_pollfd *ftp, *next;
752 volatile struct lws_context_per_thread *vpt;
753 struct lws_vhost *vh = NULL;
760 #if defined(LWS_WITH_NETWORK)
761 if (context->finalize_destroy_after_internal_loops_stopped) {
762 if (context->event_loop_ops->destroy_context2)
763 context->event_loop_ops->destroy_context2(context);
764 lws_context_destroy3(context);
769 if (context->being_destroyed1) {
770 if (!context->being_destroyed2) {
771 lws_context_destroy2(context);
775 lwsl_info("%s: ctx %p: already being destroyed\n",
778 lws_context_destroy3(context);
782 lwsl_info("%s: ctx %p\n", __func__, context);
784 context->being_destroyed = 1;
785 context->being_destroyed1 = 1;
786 context->requested_kill = 1;
788 #if defined(LWS_WITH_NETWORK)
789 m = context->count_threads;
790 memset(&wsi, 0, sizeof(wsi));
791 wsi.context = context;
794 if (context->worst_latency_info[0])
795 lwsl_notice("Worst latency: %s\n", context->worst_latency_info);
799 struct lws_context_per_thread *pt = &context->pt[m];
800 vpt = (volatile struct lws_context_per_thread *)pt;
802 ftp = vpt->foreign_pfd_list;
805 lws_free((void *)ftp);
808 vpt->foreign_pfd_list = NULL;
810 for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
811 struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
816 lws_destroy_event_pipe(wsi);
818 lws_close_free_wsi(wsi,
819 LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
821 /* no protocol close */);
824 lws_pt_mutex_destroy(pt);
828 * inform all the protocols that they are done and will have no more
831 * We can't free things until after the event loop shuts down.
833 if (context->protocol_init_done)
834 vh = context->vhost_list;
836 struct lws_vhost *vhn = vh->vhost_next;
837 lws_vhost_destroy1(vh);
842 lws_plat_context_early_destroy(context);
844 #if defined(LWS_WITH_NETWORK)
847 * We face two different needs depending if foreign loop or not.
849 * 1) If foreign loop, we really want to advance the destroy_context()
850 * past here, and block only for libuv-style async close completion.
852 * 2a) If poll, and we exited by ourselves and are calling a final
853 * destroy_context() outside of any service already, we want to
854 * advance all the way in one step.
856 * 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
857 * be in poll wait or servicing. We can't advance the
858 * destroy_context() to the point it's freeing things; we have to
859 * leave that for the final destroy_context() after the service
860 * thread(s) are finished calling for service.
863 if (context->event_loop_ops->destroy_context1) {
864 context->event_loop_ops->destroy_context1(context);
870 #if defined(LWS_WITH_ESP32)
871 #if defined(LWS_AMAZON_RTOS)
872 context->last_free_heap = xPortGetFreeHeapSize();
874 context->last_free_heap = esp_get_free_heap_size();
878 lws_context_destroy2(context);