}
#if LWS_POSIX
+ (void)n;
#if defined(__linux__)
limit = vhost->context->count_threads;
#endif
return 1;
}
#if LWS_POSIX && !defined(LWS_WITH_ESP32)
+
+#if (defined(WIN32) || defined(_WIN32)) && defined(SO_EXCLUSIVEADDRUSE)
+ /*
+ * only accept that we are the only listener on the port
+ * https://msdn.microsoft.com/zh-tw/library/windows/desktop/ms740621(v=vs.85).aspx
+ *
+ * for lws, to match Linux, we default to exclusive listen
+ */
+ if (!lws_check_opt(vhost->options, LWS_SERVER_OPTION_ALLOW_LISTEN_SHARE)) {
+ if (setsockopt(sockfd, SOL_SOCKET, SO_EXCLUSIVEADDRUSE,
+ (const void *)&opt, sizeof(opt)) < 0) {
+ lwsl_err("reuseaddr failed\n");
+ compatible_close(sockfd);
+ return 1;
+ }
+ } else
+#endif
+
/*
* allow us to restart even if old sockets in TIME_WAIT
*/
}
#endif
-#if defined(__linux__) && defined(SO_REUSEPORT) && LWS_MAX_SMP > 1
- if (vhost->context->count_threads > 1)
- if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEPORT,
- (const void *)&opt, sizeof(opt)) < 0) {
- compatible_close(sockfd);
- return 1;
- }
+#if defined(__linux__) && defined(SO_REUSEPORT)
+ n = lws_check_opt(vhost->options, LWS_SERVER_OPTION_ALLOW_LISTEN_SHARE);
+#if LWS_MAX_SMP > 1
+ n = 1;
+#endif
+
+ if (n)
+ if (vhost->context->count_threads > 1)
+ if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEPORT,
+ (const void *)&opt, sizeof(opt)) < 0) {
+ compatible_close(sockfd);
+ return 1;
+ }
#endif
#endif
lws_plat_set_socket_options(vhost, sockfd);
if (!strcmp(&file[n - 4], ".ttf"))
return "application/x-font-ttf";
+ if (!strcmp(&file[n - 4], ".otf"))
+ return "application/font-woff";
+
if (!strcmp(&file[n - 5], ".woff"))
return "application/font-woff";
const struct lws_protocol_vhost_options *pvo = m->interpret;
struct lws_process_html_args args;
const char *mimetype;
-#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP8266) && \
- !defined(LWS_WITH_ESP32)
+#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP8266)
const struct lws_plat_file_ops *fops;
const char *vpath;
lws_fop_flags_t fflags = LWS_O_RDONLY;
+#if defined(WIN32) && defined(LWS_HAVE__STAT32I64)
+ struct _stat32i64 st;
+#else
struct stat st;
+#endif
int spin = 0;
#endif
char path[256], sym[512];
lws_snprintf(path, sizeof(path) - 1, "%s/%s", origin, uri);
-#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP8266) && \
- !defined(LWS_WITH_ESP32)
+#if !defined(_WIN32_WCE) && !defined(LWS_WITH_ESP8266)
fflags |= lws_vfs_prepare_flags(wsi);
/* if it can't be statted, don't try */
if (fflags & LWS_FOP_FLAG_VIRTUAL)
break;
-
+#if defined(LWS_WITH_ESP32)
+ break;
+#endif
+#if !defined(WIN32)
if (fstat(wsi->u.http.fop_fd->fd, &st)) {
lwsl_info("unable to stat %s\n", path);
goto bail;
}
+#else
+#if defined(LWS_HAVE__STAT32I64)
+ if (_stat32i64(path, &st)) {
+ lwsl_info("unable to stat %s\n", path);
+ goto bail;
+ }
+#else
+ if (stat(path, &st)) {
+ lwsl_info("unable to stat %s\n", path);
+ goto bail;
+ }
+#endif
+#endif
wsi->u.http.fop_fd->mod_time = (uint32_t)st.st_mtime;
fflags |= LWS_FOP_FLAG_MOD_TIME_VALID;
lwsl_debug(" %s mode %d\n", path, S_IFMT & st.st_mode);
-#if !defined(WIN32) && LWS_POSIX
+#if !defined(WIN32) && LWS_POSIX && !defined(LWS_WITH_ESP32)
if ((S_IFMT & st.st_mode) == S_IFLNK) {
len = readlink(path, sym, sizeof(sym) - 1);
if (len) {
if (spin == 5)
lwsl_err("symlink loop %s \n", path);
- n = sprintf(sym, "%08lX%08lX",
- (unsigned long)lws_vfs_get_length(wsi->u.http.fop_fd),
+ n = sprintf(sym, "%08llX%08lX",
+ (unsigned long long)lws_vfs_get_length(wsi->u.http.fop_fd),
(unsigned long)lws_vfs_get_mod_time(wsi->u.http.fop_fd));
/* disable ranges if IF_RANGE token invalid */
if (n > (int)strlen(pvo->name) &&
!strcmp(&path[n - strlen(pvo->name)], pvo->name)) {
wsi->sending_chunked = 1;
- wsi->protocol_interpret_idx = (char)(long)pvo->value;
+ wsi->protocol_interpret_idx = (char)(lws_intptr_t)pvo->value;
lwsl_info("want %s interpreted by %s\n", path,
- wsi->vhost->protocols[(int)(long)(pvo->value)].name);
- wsi->protocol = &wsi->vhost->protocols[(int)(long)(pvo->value)];
+ wsi->vhost->protocols[(int)(lws_intptr_t)(pvo->value)].name);
+ wsi->protocol = &wsi->vhost->protocols[(int)(lws_intptr_t)(pvo->value)];
if (lws_ensure_user_space(wsi))
return -1;
break;
#endif
+int lws_clean_url(char *p)
+{
+ while (*p) {
+ if (p[0] == '/' && p[1] == '/') {
+ char *p1 = p;
+ while (*p1) {
+ *p1 = p1[1];
+ p1++;
+ }
+ continue;
+ }
+ p++;
+ }
+
+ return 0;
+}
+
int
lws_http_action(struct lws *wsi)
{
/* we insist on absolute paths */
- if (uri_ptr[0] != '/') {
+ if (!uri_ptr || uri_ptr[0] != '/') {
lws_return_http_status(wsi, HTTP_STATUS_FORBIDDEN, NULL);
goto bail_nuke_ah;
lws_hdr_copy(wsi, content_length_str,
sizeof(content_length_str) - 1,
WSI_TOKEN_HTTP_CONTENT_LENGTH);
- wsi->u.http.content_length = atoi(content_length_str);
+ wsi->u.http.content_length = atoll(content_length_str);
}
if (wsi->http2_substream) {
"%s%s%s/", oprot[lws_is_ssl(wsi)],
lws_hdr_simple_ptr(wsi, WSI_TOKEN_HOST),
uri_ptr);
+ lws_clean_url((char *)end);
n = lws_http_redirect(wsi, HTTP_STATUS_MOVED_PERMANENTLY,
end, n, &p, end);
}
#endif
+#if defined(LWS_WITH_HTTP_PROXY)
+ /*
+ * The mount is a reverse proxy?
+ */
+
+ if (hit->origin_protocol == LWSMPRO_HTTPS ||
+ hit->origin_protocol == LWSMPRO_HTTP) {
+ struct lws_client_connect_info i;
+ char ads[96], rpath[256], *pcolon, *pslash, *p;
+ int n, na;
+
+ memset(&i, 0, sizeof(i));
+ i.context = lws_get_context(wsi);
+
+ pcolon = strchr(hit->origin, ':');
+ pslash = strchr(hit->origin, '/');
+ if (!pslash) {
+ lwsl_err("Proxy mount origin '%s' must have /\n", hit->origin);
+ return -1;
+ }
+ if (pcolon > pslash)
+ pcolon = NULL;
+
+ if (pcolon)
+ n = pcolon - hit->origin;
+ else
+ n = pslash - hit->origin;
+
+ if (n >= sizeof(ads) - 2)
+ n = sizeof(ads) - 2;
+
+ memcpy(ads, hit->origin, n);
+ ads[n] = '\0';
+
+ i.address = ads;
+ i.port = 80;
+ if (hit->origin_protocol == LWSMPRO_HTTPS) {
+ i.port = 443;
+ i.ssl_connection = 1;
+ }
+ if (pcolon)
+ i.port = atoi(pcolon + 1);
+
+ lws_snprintf(rpath, sizeof(rpath) - 1, "/%s/%s", pslash + 1, uri_ptr + hit->mountpoint_len);
+ lws_clean_url(rpath);
+ na = lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_URI_ARGS);
+ if (na) {
+ p = rpath + strlen(rpath);
+ *p++ = '?';
+ lws_hdr_copy(wsi, p, &rpath[sizeof(rpath) - 1] - p, WSI_TOKEN_HTTP_URI_ARGS);
+ while (--na) {
+ if (*p == '\0')
+ *p = '&';
+ p++;
+ }
+ }
+
+
+ i.path = rpath;
+ i.host = i.address;
+ i.origin = NULL;
+ i.method = "GET";
+ i.parent_wsi = wsi;
+ i.uri_replace_from = hit->origin;
+ i.uri_replace_to = hit->mountpoint;
+
+ lwsl_notice("proxying to %s port %d url %s, ssl %d, from %s, to %s\n",
+ i.address, i.port, i.path, i.ssl_connection, i.uri_replace_from, i.uri_replace_to);
+
+ if (!lws_client_connect_via_info(&i)) {
+ lwsl_err("proxy connect fail\n");
+ return 1;
+ }
+
+ return 0;
+ }
+#endif
+
/*
* A particular protocol callback is mounted here?
*
NULL, /* replace with cgi path */
NULL
};
- unsigned char *p, *end, buffer[1024];
lwsl_debug("%s: cgi\n", __func__);
cmd[0] = hit->origin;
lwsl_err("%s: cgi failed\n", __func__);
return -1;
}
- p = buffer + LWS_PRE;
- end = p + sizeof(buffer) - LWS_PRE;
-
- if (lws_add_http_header_status(wsi, HTTP_STATUS_OK, &p, end))
- return 1;
- if (lws_add_http_header_by_token(wsi, WSI_TOKEN_CONNECTION,
- (unsigned char *)"close", 5, &p, end))
- return 1;
- n = lws_write(wsi, buffer + LWS_PRE,
- p - (buffer + LWS_PRE),
- LWS_WRITE_HTTP_HEADERS);
goto deal_body;
}
bail_nuke_ah:
/* we're closing, losing some rx is OK */
- wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ lws_header_table_force_to_detachable_state(wsi);
// lwsl_notice("%s: drop1\n", __func__);
lws_header_table_detach(wsi, 1);
#endif
}
+static int
+lws_server_init_wsi_for_ws(struct lws *wsi)
+{
+ int n;
+
+ wsi->state = LWSS_ESTABLISHED;
+ lws_restart_ws_ping_pong_timer(wsi);
+
+ /*
+ * create the frame buffer for this connection according to the
+ * size mentioned in the protocol definition. If 0 there, use
+ * a big default for compatibility
+ */
+
+ n = wsi->protocol->rx_buffer_size;
+ if (!n)
+ n = wsi->context->pt_serv_buf_size;
+ n += LWS_PRE;
+ wsi->u.ws.rx_ubuf = lws_malloc(n + 4 /* 0x0000ffff zlib */);
+ if (!wsi->u.ws.rx_ubuf) {
+ lwsl_err("Out of Mem allocating rx buffer %d\n", n);
+ return 1;
+ }
+ wsi->u.ws.rx_ubuf_alloc = n;
+ lwsl_debug("Allocating RX buffer %d\n", n);
+
+#if LWS_POSIX && !defined(LWS_WITH_ESP32)
+ if (!wsi->parent_carries_io)
+ if (setsockopt(wsi->desc.sockfd, SOL_SOCKET, SO_SNDBUF,
+ (const char *)&n, sizeof n)) {
+ lwsl_warn("Failed to set SNDBUF to %d", n);
+ return 1;
+ }
+#endif
+
+ /* notify user code that we're ready to roll */
+
+ if (wsi->protocol->callback)
+ if (wsi->protocol->callback(wsi, LWS_CALLBACK_ESTABLISHED,
+ wsi->user_space,
+#ifdef LWS_OPENSSL_SUPPORT
+ wsi->ssl,
+#else
+ NULL,
+#endif
+ 0))
+ return 1;
+
+ return 0;
+}
int
lws_handshake_server(struct lws *wsi, unsigned char **buf, size_t len)
wsi->user_space, NULL, 0))
goto bail_nuke_ah;
- wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ lws_header_table_force_to_detachable_state(wsi);
lws_union_transition(wsi, LWSCM_RAW);
lws_header_table_detach(wsi, 1);
*/
lwsl_info("defaulting to prot handler %d\n",
wsi->vhost->default_protocol_index);
- n = 0;
+ n = wsi->vhost->default_protocol_index;
wsi->protocol = &wsi->vhost->protocols[
(int)wsi->vhost->default_protocol_index];
}
goto bail_nuke_ah;
}
- /*
- * stitch protocol choice into the vh protocol linked list
- * We always insert ourselves at the start of the list
- *
- * X <-> B
- * X <-> pAn <-> pB
- */
- //lwsl_err("%s: pre insert vhost start wsi %p, that wsi prev == %p\n",
- // __func__,
- // wsi->vhost->same_vh_protocol_list[n],
- // wsi->same_vh_protocol_prev);
- wsi->same_vh_protocol_prev = /* guy who points to us */
- &wsi->vhost->same_vh_protocol_list[n];
- wsi->same_vh_protocol_next = /* old first guy is our next */
- wsi->vhost->same_vh_protocol_list[n];
- /* we become the new first guy */
- wsi->vhost->same_vh_protocol_list[n] = wsi;
-
- if (wsi->same_vh_protocol_next)
- /* old first guy points back to us now */
- wsi->same_vh_protocol_next->same_vh_protocol_prev =
- &wsi->same_vh_protocol_next;
-
-
+ lws_same_vh_protocol_insert(wsi, n);
/* we are upgrading to ws, so http/1.1 and keepalive +
* pipelined header considerations about keeping the ah around
wsi->u.hdr = hdr;
lws_pt_unlock(pt);
- lws_restart_ws_ping_pong_timer(wsi);
-
- /*
- * create the frame buffer for this connection according to the
- * size mentioned in the protocol definition. If 0 there, use
- * a big default for compatibility
- */
-
- n = wsi->protocol->rx_buffer_size;
- if (!n)
- n = context->pt_serv_buf_size;
- n += LWS_PRE;
- wsi->u.ws.rx_ubuf = lws_malloc(n + 4 /* 0x0000ffff zlib */);
- if (!wsi->u.ws.rx_ubuf) {
- lwsl_err("Out of Mem allocating rx buffer %d\n", n);
- return 1;
- }
- wsi->u.ws.rx_ubuf_alloc = n;
- lwsl_debug("Allocating RX buffer %d\n", n);
-#if LWS_POSIX && !defined(LWS_WITH_ESP32)
- if (setsockopt(wsi->desc.sockfd, SOL_SOCKET, SO_SNDBUF,
- (const char *)&n, sizeof n)) {
- lwsl_warn("Failed to set SNDBUF to %d", n);
- return 1;
- }
-#endif
-
+ lws_server_init_wsi_for_ws(wsi);
lwsl_parser("accepted v%02d connection\n",
wsi->ietf_spec_revision);
- /* notify user code that we're ready to roll */
-
- if (wsi->protocol->callback)
- if (wsi->protocol->callback(wsi, LWS_CALLBACK_ESTABLISHED,
- wsi->user_space,
-#ifdef LWS_OPENSSL_SUPPORT
- wsi->ssl,
-#else
- NULL,
-#endif
- 0))
- return 1;
-
/* !!! drop ah unreservedly after ESTABLISHED */
if (!wsi->more_rx_waiting) {
- wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ lws_header_table_force_to_detachable_state(wsi);
//lwsl_notice("%p: dropping ah EST\n", wsi);
lws_header_table_detach(wsi, 1);
bail_nuke_ah:
/* drop the header info */
/* we're closing, losing some rx is OK */
- wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ lws_header_table_force_to_detachable_state(wsi);
//lwsl_notice("%s: drop2\n", __func__);
lws_header_table_detach(wsi, 1);
new_wsi->user_space = NULL;
new_wsi->ietf_spec_revision = 0;
new_wsi->desc.sockfd = LWS_SOCK_INVALID;
+ new_wsi->position_in_fds_table = -1;
+
vhost->context->count_wsi_allocated++;
/*
lws_access_log(wsi);
- lwsl_info("%s: wsi %p\n", __func__, wsi);
+ if (!wsi->hdr_parsing_completed) {
+ lwsl_notice("%s: ignoring, ah parsing incomplete\n", __func__);
+ return 0;
+ }
+
+ lwsl_debug("%s: wsi %p\n", __func__, wsi);
/* if we can't go back to accept new headers, drop the connection */
if (wsi->u.http.connection_type != HTTP_CONNECTION_KEEP_ALIVE) {
lwsl_info("%s: %p: close connection\n", __func__, wsi);
wsi->more_rx_waiting);
if (!wsi->more_rx_waiting) {
- wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ lws_header_table_force_to_detachable_state(wsi);
lws_header_table_detach(wsi, 1);
+#ifdef LWS_OPENSSL_SUPPORT
+ /*
+ * additionally... if we are hogging an SSL instance
+ * with no pending pipelined headers (or ah now), and
+ * SSL is scarce, drop this connection without waiting
+ */
+
+ if (wsi->vhost->use_ssl &&
+ wsi->context->simultaneous_ssl_restriction &&
+ wsi->context->simultaneous_ssl ==
+ wsi->context->simultaneous_ssl_restriction) {
+ lwsl_info("%s: simultaneous_ssl_restriction and nothing pipelined\n", __func__);
+ return 1;
+ }
+#endif
} else
lws_header_table_reset(wsi, 1);
}
{
struct lws_context *context = vh->context;
struct lws *new_wsi = lws_create_new_server_wsi(vh);
+ struct lws_context_per_thread *pt;
int n, ssl = 0;
if (!new_wsi) {
- if (type & LWS_ADOPT_SOCKET)
+ if (type & LWS_ADOPT_SOCKET && !(type & LWS_ADOPT_WS_PARENTIO))
compatible_close(fd.sockfd);
return NULL;
}
+ pt = &context->pt[(int)new_wsi->tsi];
+ lws_stats_atomic_bump(context, pt, LWSSTATS_C_CONNECTIONS, 1);
if (parent) {
new_wsi->parent = parent;
new_wsi->sibling_list = parent->child_list;
parent->child_list = new_wsi;
+
+ if (type & LWS_ADOPT_WS_PARENTIO)
+ new_wsi->parent_carries_io = 1;
}
new_wsi->desc = fd;
vh_prot_name, new_wsi->vhost->name);
goto bail;
}
- if (lws_ensure_user_space(new_wsi))
+ if (lws_ensure_user_space(new_wsi)) {
+ lwsl_notice("OOM trying to get user_space\n");
goto bail;
+ }
+ if (type & LWS_ADOPT_WS_PARENTIO) {
+ new_wsi->desc.sockfd = LWS_SOCK_INVALID;
+ lwsl_debug("binding to %s\n", new_wsi->protocol->name);
+ lws_bind_protocol(new_wsi, new_wsi->protocol);
+ lws_union_transition(new_wsi, LWSCM_WS_SERVING);
+ lws_server_init_wsi_for_ws(new_wsi);
+
+ return new_wsi;
+ }
} else
- new_wsi->protocol = &context->vhost_list->
- protocols[vh->default_protocol_index];
+ if (type & LWS_ADOPT_HTTP) /* he will transition later */
+ new_wsi->protocol =
+ &vh->protocols[vh->default_protocol_index];
+ else { /* this is the only time he will transition */
+ lws_bind_protocol(new_wsi,
+ &vh->protocols[vh->raw_protocol_index]);
+ lws_union_transition(new_wsi, LWSCM_RAW);
+ }
if (type & LWS_ADOPT_SOCKET) { /* socket desc */
lwsl_debug("%s: new wsi %p, sockfd %d\n", __func__, new_wsi,
- (int)(size_t)fd.sockfd);
+ (int)(lws_intptr_t)fd.sockfd);
- /* the transport is accepted... give him time to negotiate */
- lws_set_timeout(new_wsi, PENDING_TIMEOUT_ESTABLISH_WITH_SERVER,
- context->timeout_secs);
+ if (type & LWS_ADOPT_HTTP)
+ /* the transport is accepted...
+ * give him time to negotiate */
+ lws_set_timeout(new_wsi,
+ PENDING_TIMEOUT_ESTABLISH_WITH_SERVER,
+ context->timeout_secs);
#if LWS_POSIX == 0
#if defined(LWS_WITH_ESP8266)
#endif
} else /* file desc */
lwsl_debug("%s: new wsi %p, filefd %d\n", __func__, new_wsi,
- (int)(size_t)fd.filefd);
+ (int)(lws_intptr_t)fd.filefd);
/*
* A new connection was accepted. Give the user a chance to
else
n = LWS_CALLBACK_RAW_ADOPT;
}
- if ((new_wsi->protocol->callback)(
- new_wsi, n, NULL, NULL, 0)) {
- if (type & LWS_ADOPT_SOCKET) {
- /* force us off the timeout list by hand */
- lws_set_timeout(new_wsi, NO_PENDING_TIMEOUT, 0);
- compatible_close(new_wsi->desc.sockfd);
- }
- goto bail;
- }
if (!LWS_SSL_ENABLED(new_wsi->vhost) || !(type & LWS_ADOPT_ALLOW_SSL) ||
!(type & LWS_ADOPT_SOCKET)) {
lws_libev_accept(new_wsi, new_wsi->desc);
lws_libuv_accept(new_wsi, new_wsi->desc);
+ lws_libevent_accept(new_wsi, new_wsi->desc);
if (!ssl) {
if (insert_wsi_socket_into_fds(context, new_wsi)) {
goto fail;
}
- if (type & LWS_ADOPT_HTTP)
- if (!lws_header_table_attach(new_wsi, 0))
+ /*
+ * by deferring callback to this point, after insertion to fds,
+ * lws_callback_on_writable() can work from the callback
+ */
+ if ((new_wsi->protocol->callback)(
+ new_wsi, n, new_wsi->user_space, NULL, 0))
+ goto fail;
+
+ if (type & LWS_ADOPT_HTTP) {
+ if (!lws_header_table_attach(new_wsi, 0)) {
lwsl_debug("Attached ah immediately\n");
+ } else {
+ lwsl_notice("%s: waiting for ah\n", __func__);
+ }
+ }
return new_wsi;
return NULL;
bail:
+ lwsl_notice("%s: exiting on bail\n", __func__);
if (parent)
parent->child_list = new_wsi->sibling_list;
if (new_wsi->user_space)
lws_free(new_wsi->user_space);
lws_free(new_wsi);
+ compatible_close(fd.sockfd);
return NULL;
}
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
lws_sockfd_type accept_fd = LWS_SOCK_INVALID;
struct allocated_headers *ah;
+ lws_sock_file_fd_type fd;
+ int opts = LWS_ADOPT_SOCKET | LWS_ADOPT_ALLOW_SSL;
#if LWS_POSIX
- struct sockaddr_in cli_addr;
+ struct sockaddr_storage cli_addr;
socklen_t clilen;
#endif
int n, len;
//lwsl_err("wsi %p: missing ah\n", wsi);
/* no autoservice beacuse we will do it next */
if (lws_header_table_attach(wsi, 0)) {
- lwsl_err("wsi %p: failed to acquire ah\n", wsi);
+ lwsl_info("wsi %p: failed to acquire ah\n", wsi);
goto try_pollout;
}
}
ah->rxlen = ah->rxpos = 0;
goto try_pollout;
}
+
+ /*
+ * make sure ah does not get detached if we
+ * have live data in the rx
+ */
+ if (ah->rxlen)
+ wsi->more_rx_waiting = 1;
}
if (!(ah->rxpos != ah->rxlen && ah->rxlen)) {
}
/* just ignore incoming if waiting for close */
- if (wsi->state != LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
+ if (wsi->state != LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE &&
+ wsi->state != LWSS_HTTP_ISSUING_FILE) {
n = lws_read(wsi, ah->rx + ah->rxpos,
ah->rxlen - ah->rxpos);
if (n < 0) /* we closed wsi */
lwsl_debug("%s: wsi %p: ah read rxpos %d, rxlen %d\n", __func__, wsi, wsi->u.hdr.ah->rxpos, wsi->u.hdr.ah->rxlen);
- if (wsi->u.hdr.ah->rxpos == wsi->u.hdr.ah->rxlen &&
+ if (lws_header_table_is_in_detachable_state(wsi) &&
(wsi->mode != LWSCM_HTTP_SERVING &&
wsi->mode != LWSCM_HTTP_SERVING_ACCEPTED &&
wsi->mode != LWSCM_HTTP2_SERVING))
}
/* just ignore incoming if waiting for close */
- if (wsi->state != LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
+ if (wsi->state != LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE &&
+ wsi->state != LWSS_HTTP_ISSUING_FILE) {
/*
* this may want to send
* (via HTTP callback for example)
}
if (wsi->mode == LWSCM_RAW) {
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB, 1);
+#if defined(LWS_WITH_STATS)
+ {
+ uint64_t ul = time_in_microseconds() - wsi->active_writable_req_us;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_WRITABLE_DELAY, ul);
+ lws_stats_atomic_max(wsi->context, pt, LWSSTATS_MS_WORST_WRITABLE_DELAY, ul);
+ wsi->active_writable_req_us = 0;
+ }
+#endif
n = user_callback_handle_rxflow(wsi->protocol->callback,
wsi, LWS_CALLBACK_RAW_WRITEABLE,
wsi->user_space, NULL, 0);
break;
if (wsi->state != LWSS_HTTP_ISSUING_FILE) {
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB, 1);
+#if defined(LWS_WITH_STATS)
+ {
+ uint64_t ul = time_in_microseconds() - wsi->active_writable_req_us;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_WRITABLE_DELAY, ul);
+ lws_stats_atomic_max(wsi->context, pt, LWSSTATS_MS_WORST_WRITABLE_DELAY, ul);
+ wsi->active_writable_req_us = 0;
+ }
+#endif
+
n = user_callback_handle_rxflow(wsi->protocol->callback,
wsi, LWS_CALLBACK_HTTP_WRITEABLE,
wsi->user_space, NULL, 0);
break;
}
- /* >0 == completion, <0 == error */
+ /* >0 == completion, <0 == error
+ *
+ * We'll get a LWS_CALLBACK_HTTP_FILE_COMPLETION callback when
+ * it's done. That's the case even if we just completed the
+ * send, so wait for that.
+ */
n = lws_serve_http_file_fragment(wsi);
- if (n < 0 || (n > 0 && lws_http_transaction_completed(wsi))) {
- lwsl_info("completed\n");
+ if (n < 0)
goto fail;
- }
break;
if (!(pollfd->revents & LWS_POLLIN) || !(pollfd->events & LWS_POLLIN))
break;
+#ifdef LWS_OPENSSL_SUPPORT
+ /*
+ * can we really accept it, with regards to SSL limit?
+ * another vhost may also have had POLLIN on his listener this
+ * round and used it up already
+ */
+
+ if (wsi->vhost->use_ssl &&
+ context->simultaneous_ssl_restriction &&
+ context->simultaneous_ssl ==
+ context->simultaneous_ssl_restriction)
+ /* no... ignore it, he won't come again until we are
+ * below the simultaneous_ssl_restriction limit and
+ * POLLIN is enabled on him again
+ */
+ break;
+#endif
/* listen socket got an unencrypted connection... */
clilen = sizeof(cli_addr);
if (accept_fd < 0) {
if (LWS_ERRNO == LWS_EAGAIN ||
LWS_ERRNO == LWS_EWOULDBLOCK) {
- lwsl_err("accept asks to try again\n");
+// lwsl_err("accept asks to try again\n");
break;
}
lwsl_err("ERROR on accept: %s\n", strerror(LWS_ERRNO));
lws_plat_set_socket_options(wsi->vhost, accept_fd);
- lwsl_debug("accepted new conn port %u on fd=%d\n",
- ntohs(cli_addr.sin_port), accept_fd);
+#if defined(LWS_USE_IPV6)
+ lwsl_debug("accepted new conn port %u on fd=%d\n",
+ ((cli_addr.ss_family == AF_INET6) ?
+ ntohs(((struct sockaddr_in6 *) &cli_addr)->sin6_port) :
+ ntohs(((struct sockaddr_in *) &cli_addr)->sin_port)),
+ accept_fd);
+#else
+ lwsl_debug("accepted new conn port %u on fd=%d\n",
+ ntohs(((struct sockaddr_in *) &cli_addr)->sin_port),
+ accept_fd);
+#endif
#else
/* not very beautiful... */
*/
if ((wsi->vhost->protocols[0].callback)(wsi,
LWS_CALLBACK_FILTER_NETWORK_CONNECTION,
- NULL, (void *)(long)accept_fd, 0)) {
+ NULL, (void *)(lws_intptr_t)accept_fd, 0)) {
lwsl_debug("Callback denied network connection\n");
compatible_close(accept_fd);
break;
}
- if (!lws_adopt_socket_vhost(wsi->vhost, accept_fd))
+ if (!(wsi->vhost->options & LWS_SERVER_OPTION_ONLY_RAW))
+ opts |= LWS_ADOPT_HTTP;
+
+ fd.sockfd = accept_fd;
+ if (!lws_adopt_descriptor_vhost(wsi->vhost, opts, fd,
+ NULL, NULL))
/* already closed cleanly as necessary */
return 1;
unsigned char *response = pt->serv_buf + LWS_PRE;
unsigned char *p = response;
unsigned char *end = p + context->pt_serv_buf_size - LWS_PRE;
- unsigned long computed_total_content_length;
+ lws_filepos_t computed_total_content_length;
int ret = 0, cclen = 8, n = HTTP_STATUS_OK;
lws_fop_flags_t fflags = LWS_O_RDONLY;
#if defined(LWS_WITH_RANGES)
* Precompute it for the main response header
*/
- computed_total_content_length = (unsigned long)rp->agg +
+ computed_total_content_length = (lws_filepos_t)rp->agg +
6 /* final _lws\r\n */;
lws_ranges_reset(rp);
}
if (ranges == 1) {
- computed_total_content_length = (unsigned long)rp->agg;
+ computed_total_content_length = (lws_filepos_t)rp->agg;
n = lws_snprintf(cache_control, sizeof(cache_control), "bytes %llu-%llu/%llu",
rp->start, rp->end, rp->extent);
}
if (wsi->u.ws.rx_draining_ext) {
+ // lwsl_notice("draining with 0\n");
m = lws_rx_sm(wsi, 0);
if (m < 0)
return -1;
wsi->rxflow_pos++;
/* consume payload bytes efficiently */
- if (wsi->lws_rx_parse_state ==
+ if (
+ wsi->lws_rx_parse_state ==
LWS_RXPS_PAYLOAD_UNTIL_LENGTH_EXHAUSTED) {
m = lws_payload_until_length_exhausted(wsi, buf, &len);
if (wsi->rxflow_buffer)
char *storage;
char *end;
int max_storage;
+
+ char finalized;
};
static int
lwsl_err("%s: NULL spa\n", __func__);
return -1;
}
+ /* we reject any junk after the last part arrived and we finalized */
+ if (ludspa->finalized)
+ return 0;
+
return lws_urldecode_s_process(ludspa->s, in, len);
}
spa->s = NULL;
}
+ spa->finalized = 1;
+
return 0;
}
{
int n = 0;
+ lwsl_notice("%s: destroy spa %p\n", __func__, spa);
+
+ if (spa->s)
+ lws_urldecode_s_destroy(spa->s);
+
+ lwsl_debug("%s %p %p %p %p\n", __func__,
+ spa->param_length,
+ spa->params,
+ spa->storage,
+ spa
+ );
+
+ lws_free(spa->param_length);
+ lws_free(spa->params);
+ lws_free(spa->storage);
+ lws_free(spa);
+
+ return n;
+}
+
+#if 0
+LWS_VISIBLE LWS_EXTERN int
+lws_spa_destroy(struct lws_spa *spa)
+{
+ int n = 0;
+
lwsl_info("%s: destroy spa %p\n", __func__, spa);
if (spa->s)
return n;
}
-
+#endif
LWS_VISIBLE LWS_EXTERN int
lws_chunked_html_process(struct lws_process_html_args *args,
struct lws_process_html_state *s)