static int
lws_calllback_as_writeable(struct lws *wsi)
{
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
int n;
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_WRITEABLE_CB, 1);
+#if defined(LWS_WITH_STATS)
+ {
+ uint64_t ul = time_in_microseconds() - wsi->active_writable_req_us;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_WRITABLE_DELAY, ul);
+ lws_stats_atomic_max(wsi->context, pt, LWSSTATS_MS_WORST_WRITABLE_DELAY, ul);
+ wsi->active_writable_req_us = 0;
+ }
+#endif
+
switch (wsi->mode) {
+ case LWSCM_RAW:
+ n = LWS_CALLBACK_RAW_WRITEABLE;
+ break;
+ case LWSCM_RAW_FILEDESC:
+ n = LWS_CALLBACK_RAW_WRITEABLE_FILE;
+ break;
case LWSCM_WS_CLIENT:
n = LWS_CALLBACK_CLIENT_WRITEABLE;
break;
+ case LWSCM_WSCL_ISSUE_HTTP_BODY:
+ n = LWS_CALLBACK_CLIENT_HTTP_WRITEABLE;
+ break;
case LWSCM_WS_SERVING:
n = LWS_CALLBACK_SERVER_WRITEABLE;
break;
n = LWS_CALLBACK_HTTP_WRITEABLE;
break;
}
- lwsl_info("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
+ lwsl_debug("%s: %p (user=%p)\n", __func__, wsi, wsi->user_space);
return user_callback_handle_rxflow(wsi->protocol->callback,
wsi, (enum lws_callback_reasons) n,
wsi->user_space, NULL, 0);
#endif
int ret, m, n;
- /* pending truncated sends have uber priority */
+ //lwsl_err("%s: %p\n", __func__, wsi);
+
+ wsi->leave_pollout_active = 0;
+ wsi->handling_pollout = 1;
+ /*
+ * if another thread wants POLLOUT on us, from here on while
+ * handling_pollout is set, he will only set leave_pollout_active.
+ * If we are going to disable POLLOUT, we will check that first.
+ */
+
+ /*
+ * user callback is lowest priority to get these notifications
+ * actually, since other pending things cannot be disordered
+ */
+ /* Priority 1: pending truncated sends are incomplete ws fragments
+ * If anything else sent first the protocol would be
+ * corrupted.
+ */
if (wsi->trunc_len) {
- if (lws_issue_raw(wsi, wsi->trunc_alloc +
- wsi->trunc_offset,
- wsi->trunc_len) < 0) {
+ if (lws_issue_raw(wsi, wsi->trunc_alloc + wsi->trunc_offset,
+ wsi->trunc_len) < 0) {
lwsl_info("%s signalling to close\n", __func__);
- return -1;
+ goto bail_die;
}
/* leave POLLOUT active either way */
- return 0;
+ goto bail_ok;
} else
- if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)
- return -1; /* retry closing now */
+ if (wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) {
+ wsi->socket_is_permanently_unusable = 1;
+ goto bail_die; /* retry closing now */
+ }
+
+ if (wsi->mode == LWSCM_WSCL_ISSUE_HTTP_BODY)
+ goto user_service;
+
#ifdef LWS_USE_HTTP2
- /* protocol packets are next */
+ /* Priority 2: protocol packets
+ */
if (wsi->pps) {
lwsl_info("servicing pps %d\n", wsi->pps);
switch (wsi->pps) {
case LWS_PPS_HTTP2_MY_SETTINGS:
case LWS_PPS_HTTP2_ACK_SETTINGS:
- lws_http2_do_pps_send(lws_get_ctx(wsi), wsi);
+ lws_http2_do_pps_send(lws_get_context(wsi), wsi);
break;
default:
break;
wsi->pps = LWS_PPS_NONE;
lws_rx_flow_control(wsi, 1);
- return 0; /* leave POLLOUT active */
+ goto bail_ok; /* leave POLLOUT active */
+ }
+#endif
+
+#ifdef LWS_WITH_CGI
+ if (wsi->cgi) {
+ /* also one shot */
+ if (pollfd)
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ return 1;
+ }
+ goto user_service_go_again;
}
#endif
- /* pending control packets have next priority */
+ /* Priority 3: pending control packets (pong or close)
+ */
if ((wsi->state == LWSS_ESTABLISHED &&
wsi->u.ws.ping_pending_flag) ||
(wsi->state == LWSS_RETURNED_CLOSE_ALREADY &&
if (wsi->u.ws.payload_is_close)
write_type = LWS_WRITE_CLOSE;
- n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[
- LWS_SEND_BUFFER_PRE_PADDING],
+ n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
wsi->u.ws.ping_payload_len, write_type);
if (n < 0)
- return -1;
+ goto bail_die;
/* well he is sent, mark him done */
wsi->u.ws.ping_pending_flag = 0;
if (wsi->u.ws.payload_is_close)
/* oh... a close frame was it... then we are done */
- return -1;
+ goto bail_die;
/* otherwise for PING, leave POLLOUT active either way */
- return 0;
+ goto bail_ok;
}
- /* if we are closing, don't confuse the user with writeable cb */
+ if (wsi->state == LWSS_ESTABLISHED &&
+ !wsi->socket_is_permanently_unusable &&
+ wsi->u.ws.send_check_ping) {
+
+ lwsl_info("issuing ping on wsi %p\n", wsi);
+ wsi->u.ws.send_check_ping = 0;
+ n = lws_write(wsi, &wsi->u.ws.ping_payload_buf[LWS_PRE],
+ 0, LWS_WRITE_PING);
+ if (n < 0)
+ goto bail_die;
+
+ /*
+ * we apparently were able to send the PING in a reasonable time
+ * now reset the clock on our peer to be able to send the
+ * PONG in a reasonable time.
+ */
+
+ lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_GET_PONG,
+ wsi->context->timeout_secs);
+ goto bail_ok;
+ }
+
+ /* Priority 4: if we are closing, not allowed to send more data frags
+ * which means user callback or tx ext flush banned now
+ */
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
goto user_service;
- /* if nothing critical, user can get the callback */
+ /* Priority 5: Tx path extension with more to send
+ *
+ * These are handled as new fragments each time around
+ * So while we must block new writeable callback to enforce
+ * payload ordering, but since they are always complete
+ * fragments control packets can interleave OK.
+ */
+ if (wsi->state == LWSS_ESTABLISHED && wsi->u.ws.tx_draining_ext) {
+ lwsl_ext("SERVICING TX EXT DRAINING\n");
+ if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
+ goto bail_die;
+ /* leave POLLOUT active */
+ goto bail_ok;
+ }
- m = lws_ext_cb_wsi_active_exts(wsi, LWS_EXT_CALLBACK_IS_WRITEABLE,
- NULL, 0);
+ /* Priority 6: user can get the callback
+ */
+ m = lws_ext_cb_active(wsi, LWS_EXT_CB_IS_WRITEABLE, NULL, 0);
+ if (m)
+ goto bail_die;
#ifndef LWS_NO_EXTENSIONS
if (!wsi->extension_data_pending)
goto user_service;
*/
ret = 1;
+ if (wsi->mode == LWSCM_RAW || wsi->mode == LWSCM_RAW_FILEDESC)
+ ret = 0;
while (ret == 1) {
/* default to nobody has more to spill */
/* give every extension a chance to spill */
- m = lws_ext_cb_wsi_active_exts(wsi,
- LWS_EXT_CALLBACK_PACKET_TX_PRESEND,
- &eff_buf, 0);
+ m = lws_ext_cb_active(wsi,
+ LWS_EXT_CB_PACKET_TX_PRESEND,
+ &eff_buf, 0);
if (m < 0) {
lwsl_err("ext reports fatal error\n");
- return -1;
+ goto bail_die;
}
if (m)
/*
if (eff_buf.token_len) {
n = lws_issue_raw(wsi, (unsigned char *)eff_buf.token,
- eff_buf.token_len);
+ eff_buf.token_len);
if (n < 0) {
lwsl_info("closing from POLLOUT spill\n");
- return -1;
+ goto bail_die;
}
/*
* Keep amount spilled small to minimize chance of this
*/
if (n != eff_buf.token_len) {
- lwsl_err("Unable to spill ext %d vs %s\n",
+ lwsl_err("Unable to spill ext %d vs %d\n",
eff_buf.token_len, n);
- return -1;
+ goto bail_die;
}
} else
continue;
* when we come back here and there's nothing more to spill.
*/
- return 0;
+ goto bail_ok;
}
#ifndef LWS_NO_EXTENSIONS
wsi->extension_data_pending = 0;
/* one shot */
if (pollfd) {
- if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
- lwsl_info("failed at set pollfd\n");
- return 1;
- }
+ int eff = wsi->leave_pollout_active;
+
+ if (!eff)
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ goto bail_die;
+ }
+
+ wsi->handling_pollout = 0;
+
+ /* cannot get leave_pollout_active set after the above */
+
+ if (!eff && wsi->leave_pollout_active)
+ /* got set inbetween sampling eff and clearing
+ * handling_pollout, force POLLOUT on */
+ lws_calllback_as_writeable(wsi);
- lws_libev_io(wsi, LWS_EV_STOP | LWS_EV_WRITE);
+ wsi->leave_pollout_active = 0;
}
+ if (wsi->mode != LWSCM_WSCL_ISSUE_HTTP_BODY &&
+ !wsi->hdr_parsing_completed)
+ goto bail_ok;
+
+
+#ifdef LWS_WITH_CGI
+user_service_go_again:
+#endif
+
#ifdef LWS_USE_HTTP2
/*
* we are the 'network wsi' for potentially many muxed child wsi with
wsi->u.http2.requested_POLLOUT = 0;
if (!wsi->u.http2.initialized) {
lwsl_info("pollout on uninitialized http2 conn\n");
- return 0;
+ goto bail_ok;
}
lwsl_info("%s: doing children\n", __func__);
lwsl_info("%s: completed\n", __func__);
- return 0;
+ goto bail_ok;
notify:
#endif
+ wsi->handling_pollout = 0;
+ wsi->leave_pollout_active = 0;
+
return lws_calllback_as_writeable(wsi);
+
+ /*
+ * since these don't disable the POLLOUT, they are always doing the
+ * right thing for leave_pollout_active whether it was set or not.
+ */
+
+bail_ok:
+ wsi->handling_pollout = 0;
+ wsi->leave_pollout_active = 0;
+
+ return 0;
+
+bail_die:
+ wsi->handling_pollout = 0;
+ wsi->leave_pollout_active = 0;
+
+ return -1;
}
int
lws_service_timeout_check(struct lws *wsi, unsigned int sec)
{
+//#if LWS_POSIX
+ struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
+ int n = 0;
+//#endif
+
+ (void)n;
+
/*
* if extensions want in on it (eg, we are a mux parent)
* give them a chance to service child timeouts
*/
- if (lws_ext_cb_wsi_active_exts(wsi, LWS_EXT_CALLBACK_1HZ,
- NULL, sec) < 0)
+ if (lws_ext_cb_active(wsi, LWS_EXT_CB_1HZ, NULL, sec) < 0)
return 0;
if (!wsi->pending_timeout)
* connection
*/
if ((time_t)sec > wsi->pending_timeout_limit) {
- lwsl_info("wsi %p: TIMEDOUT WAITING on %d\n",
- (void *)wsi, wsi->pending_timeout);
+//#if LWS_POSIX
+ if (wsi->desc.sockfd != LWS_SOCK_INVALID && wsi->position_in_fds_table >= 0)
+ n = pt->fds[wsi->position_in_fds_table].events;
+
+ lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_TIMEOUTS, 1);
+
+ /* no need to log normal idle keepalive timeout */
+ if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
+ lwsl_notice("wsi %p: TIMEDOUT WAITING on %d (did hdr %d, ah %p, wl %d, pfd events %d) %llu vs %llu\n",
+ (void *)wsi, wsi->pending_timeout,
+ wsi->hdr_parsing_completed, wsi->u.hdr.ah,
+ pt->ah_wait_list_length, n, (unsigned long long)sec, (unsigned long long)wsi->pending_timeout_limit);
+//#endif
/*
* Since he failed a timeout, he already had a chance to do
* something and was unable to... that includes situations like
* cleanup like flush partials.
*/
wsi->socket_is_permanently_unusable = 1;
+ if (wsi->mode == LWSCM_WSCL_WAITING_SSL)
+ wsi->vhost->protocols[0].callback(wsi,
+ LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
+ wsi->user_space, (void *)"Timed out waiting SSL", 21);
+
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
return 1;
/* a new rxflow, buffer it and warn caller */
lwsl_info("new rxflow input buffer len %d\n", len - n);
wsi->rxflow_buffer = lws_malloc(len - n);
+ if (!wsi->rxflow_buffer)
+ return -1;
wsi->rxflow_len = len - n;
wsi->rxflow_pos = 0;
memcpy(wsi->rxflow_buffer, buf + n, len - n);
return 0;
}
-/**
- * lws_service_fd() - Service polled socket with something waiting
- * @context: Websocket context
- * @pollfd: The pollfd entry describing the socket fd and which events
- * happened.
- *
- * This function takes a pollfd that has POLLIN or POLLOUT activity and
- * services it according to the state of the associated
- * struct lws.
- *
- * The one call deals with all "service" that might happen on a socket
- * including listen accepts, http files as well as websocket protocol.
- *
- * If a pollfd says it has something, you can just pass it to
- * lws_service_fd() whether it is a socket handled by lws or not.
- * If it sees it is a lws socket, the traffic will be handled and
- * pollfd->revents will be zeroed now.
+/* this is used by the platform service code to stop us waiting for network
+ * activity in poll() when we have something that already needs service
+ */
+
+LWS_VISIBLE LWS_EXTERN int
+lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+ int n;
+
+ /* Figure out if we really want to wait in poll()
+ * We only need to wait if really nothing already to do and we have
+ * to wait for something from network
+ */
+
+ /* 1) if we know we are draining rx ext, do not wait in poll */
+ if (pt->rx_draining_ext_list)
+ return 0;
+
+#ifdef LWS_OPENSSL_SUPPORT
+ /* 2) if we know we have non-network pending data, do not wait in poll */
+ if (lws_ssl_anybody_has_buffered_read_tsi(context, tsi)) {
+ lwsl_info("ssl buffered read\n");
+ return 0;
+ }
+#endif
+
+ /* 3) if any ah has pending rx, do not wait in poll */
+ for (n = 0; n < context->max_http_header_pool; n++)
+ if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen) {
+ /* any ah with pending rx must be attached to someone */
+ if (!pt->ah_pool[n].wsi) {
+ lwsl_err("%s: assert: no wsi attached to ah\n", __func__);
+ assert(0);
+ }
+ return 0;
+ }
+
+ return timeout_ms;
+}
+
+/*
+ * guys that need POLLIN service again without waiting for network action
+ * can force POLLIN here if not flowcontrolled, so they will get service.
*
- * If the socket is foreign to lws, it leaves revents alone. So you can
- * see if you should service yourself by checking the pollfd revents
- * after letting lws try to service it.
+ * Return nonzero if anybody got their POLLIN faked
*/
+int
+lws_service_flag_pending(struct lws_context *context, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+#ifdef LWS_OPENSSL_SUPPORT
+ struct lws *wsi_next;
+#endif
+ struct lws *wsi;
+ int forced = 0;
+ int n;
+
+ /* POLLIN faking */
+
+ /*
+ * 1) For all guys with already-available ext data to drain, if they are
+ * not flowcontrolled, fake their POLLIN status
+ */
+ wsi = pt->rx_draining_ext_list;
+ while (wsi) {
+ pt->fds[wsi->position_in_fds_table].revents |=
+ pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
+ if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
+ forced = 1;
+ break;
+ }
+ wsi = wsi->u.ws.rx_draining_ext_list;
+ }
+
+#ifdef LWS_OPENSSL_SUPPORT
+ /*
+ * 2) For all guys with buffered SSL read data already saved up, if they
+ * are not flowcontrolled, fake their POLLIN status so they'll get
+ * service to use up the buffered incoming data, even though their
+ * network socket may have nothing
+ */
+ wsi = pt->pending_read_list;
+ while (wsi) {
+ wsi_next = wsi->pending_read_list_next;
+ pt->fds[wsi->position_in_fds_table].revents |=
+ pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
+ if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) {
+ forced = 1;
+ /*
+ * he's going to get serviced now, take him off the
+ * list of guys with buffered SSL. If he still has some
+ * at the end of the service, he'll get put back on the
+ * list then.
+ */
+ lws_ssl_remove_wsi_from_buffered_list(wsi);
+ }
+
+ wsi = wsi_next;
+ }
+#endif
+ /*
+ * 3) For any wsi who have an ah with pending RX who did not
+ * complete their current headers, and are not flowcontrolled,
+ * fake their POLLIN status so they will be able to drain the
+ * rx buffered in the ah
+ */
+ for (n = 0; n < context->max_http_header_pool; n++)
+ if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen &&
+ !pt->ah_pool[n].wsi->hdr_parsing_completed) {
+ pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |=
+ pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events &
+ LWS_POLLIN;
+ if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents &
+ LWS_POLLIN)
+ forced = 1;
+ }
+
+ return forced;
+}
+
+#ifndef LWS_NO_CLIENT
LWS_VISIBLE int
-lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
+lws_http_client_read(struct lws *wsi, char **buf, int *len)
{
-#if LWS_POSIX
- int idx = 0;
+ int rlen, n;
+
+ rlen = lws_ssl_capable_read(wsi, (unsigned char *)*buf, *len);
+ *len = 0;
+
+ /* allow the source to signal he has data again next time */
+ lws_change_pollfd(wsi, 0, LWS_POLLIN);
+
+ if (rlen == LWS_SSL_CAPABLE_ERROR) {
+ lwsl_notice("%s: SSL capable error\n", __func__);
+ return -1;
+ }
+
+ if (rlen == 0)
+ return -1;
+
+ if (rlen < 0)
+ return 0;
+
+ *len = rlen;
+ wsi->client_rx_avail = 0;
+
+ /*
+ * server may insist on transfer-encoding: chunked,
+ * so http client must deal with it
+ */
+spin_chunks:
+ while (wsi->chunked && (wsi->chunk_parser != ELCP_CONTENT) && *len) {
+ switch (wsi->chunk_parser) {
+ case ELCP_HEX:
+ if ((*buf)[0] == '\x0d') {
+ wsi->chunk_parser = ELCP_CR;
+ break;
+ }
+ n = char_to_hex((*buf)[0]);
+ if (n < 0) {
+ lwsl_debug("chunking failure\n");
+ return -1;
+ }
+ wsi->chunk_remaining <<= 4;
+ wsi->chunk_remaining |= n;
+ break;
+ case ELCP_CR:
+ if ((*buf)[0] != '\x0a') {
+ lwsl_debug("chunking failure\n");
+ return -1;
+ }
+ wsi->chunk_parser = ELCP_CONTENT;
+ lwsl_info("chunk %d\n", wsi->chunk_remaining);
+ if (wsi->chunk_remaining)
+ break;
+ lwsl_info("final chunk\n");
+ goto completed;
+
+ case ELCP_CONTENT:
+ break;
+
+ case ELCP_POST_CR:
+ if ((*buf)[0] != '\x0d') {
+ lwsl_debug("chunking failure\n");
+
+ return -1;
+ }
+
+ wsi->chunk_parser = ELCP_POST_LF;
+ break;
+
+ case ELCP_POST_LF:
+ if ((*buf)[0] != '\x0a')
+ return -1;
+
+ wsi->chunk_parser = ELCP_HEX;
+ wsi->chunk_remaining = 0;
+ break;
+ }
+ (*buf)++;
+ (*len)--;
+ }
+
+ if (wsi->chunked && !wsi->chunk_remaining)
+ return 0;
+
+ if (wsi->u.http.content_remain &&
+ wsi->u.http.content_remain < *len)
+ n = (int)wsi->u.http.content_remain;
+ else
+ n = *len;
+
+ if (wsi->chunked && wsi->chunk_remaining &&
+ wsi->chunk_remaining < n)
+ n = wsi->chunk_remaining;
+
+#ifdef LWS_WITH_HTTP_PROXY
+ /* hubbub */
+ if (wsi->perform_rewrite)
+ lws_rewrite_parse(wsi->rw, (unsigned char *)*buf, n);
+ else
+#endif
+ if (user_callback_handle_rxflow(wsi->protocol->callback,
+ wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ,
+ wsi->user_space, *buf, n)) {
+ lwsl_debug("%s: LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ returned -1\n", __func__);
+
+ return -1;
+ }
+
+ if (wsi->chunked && wsi->chunk_remaining) {
+ (*buf) += n;
+ wsi->chunk_remaining -= n;
+ *len -= n;
+ }
+
+ if (wsi->chunked && !wsi->chunk_remaining)
+ wsi->chunk_parser = ELCP_POST_CR;
+
+ if (wsi->chunked && *len)
+ goto spin_chunks;
+
+ if (wsi->chunked)
+ return 0;
+
+ /* if we know the content length, decrement the content remaining */
+ if (wsi->u.http.content_length > 0)
+ wsi->u.http.content_remain -= n;
+
+ if (wsi->u.http.content_remain || !wsi->u.http.content_length)
+ return 0;
+
+completed:
+ if (user_callback_handle_rxflow(wsi->protocol->callback,
+ wsi, LWS_CALLBACK_COMPLETED_CLIENT_HTTP,
+ wsi->user_space, NULL, 0)) {
+ lwsl_debug("Completed call returned -1\n");
+ return -1;
+ }
+
+ if (lws_http_transaction_completed_client(wsi)) {
+ lwsl_notice("%s: transaction completed says -1\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
#endif
- lws_sockfd_type our_fd = 0;
+
+static int
+lws_is_ws_with_ext(struct lws *wsi)
+{
+#if defined(LWS_NO_EXTENSIONS)
+ return 0;
+#else
+ return wsi->state == LWSS_ESTABLISHED &&
+ !!wsi->count_act_ext;
+#endif
+}
+
+LWS_VISIBLE int
+lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int tsi)
+{
+ struct lws_context_per_thread *pt = &context->pt[tsi];
+ lws_sockfd_type our_fd = 0, tmp_fd;
struct lws_tokens eff_buf;
unsigned int pending = 0;
+ struct lws *wsi, *wsi1;
char draining_flow = 0;
- lws_sockfd_type mfd;
int timed_out = 0;
- struct lws *wsi;
time_t now;
- int n, m;
+ int n = 0, m;
int more;
-#if LWS_POSIX
- if (context->lserv_fd)
- idx = wsi_from_fd(context, context->lserv_fd)->position_in_fds_table;
-#endif
- /*
- * you can call us with pollfd = NULL to just allow the once-per-second
- * global timeout checks; if less than a second since the last check
- * it returns immediately then.
- */
+ if (!context->protocol_init_done)
+ lws_protocol_init(context);
time(&now);
+ /*
+ * handle case that system time was uninitialized when lws started
+ * at boot, and got initialized a little later
+ */
+ if (context->time_up < 1464083026 && now > 1464083026)
+ context->time_up = now;
+
/* TODO: if using libev, we should probably use timeout watchers... */
if (context->last_timeout_check_s != now) {
context->last_timeout_check_s = now;
+#if defined(LWS_WITH_STATS)
+ if (!tsi && now - context->last_dump > 10) {
+ lws_stats_log_dump(context);
+ context->last_dump = now;
+ }
+#endif
+
lws_plat_service_periodic(context);
+ /* retire unused deprecated context */
+#if !defined(LWS_PLAT_OPTEE) && !defined(LWS_WITH_ESP32)
+#if LWS_POSIX && !defined(_WIN32)
+ if (context->deprecated && !context->count_wsi_allocated) {
+ lwsl_notice("%s: ending deprecated context\n", __func__);
+ kill(getpid(), SIGINT);
+ return 0;
+ }
+#endif
+#endif
/* global timeout check once per second */
if (pollfd)
our_fd = pollfd->fd;
- for (n = 0; n < context->fds_count; n++) {
- mfd = context->fds[n].fd;
- wsi = wsi_from_fd(context, mfd);
- if (!wsi)
- continue;
-
- if (lws_service_timeout_check(wsi, (unsigned int)now))
+ wsi = context->pt[tsi].timeout_list;
+ while (wsi) {
+ /* we have to take copies, because he may be deleted */
+ wsi1 = wsi->timeout_list;
+ tmp_fd = wsi->desc.sockfd;
+ if (lws_service_timeout_check(wsi, (unsigned int)now)) {
/* he did time out... */
- if (mfd == our_fd)
+ if (tmp_fd == our_fd)
/* it was the guy we came to service! */
timed_out = 1;
/* he's gone, no need to mark as handled */
+ }
+ wsi = wsi1;
+ }
+#ifdef LWS_WITH_CGI
+ lws_cgi_kill_terminated(pt);
+#endif
+#if 0
+ {
+ char s[300], *p = s;
+
+ for (n = 0; n < context->count_threads; n++)
+ p += sprintf(p, " %7lu (%5d), ",
+ context->pt[n].count_conns,
+ context->pt[n].fds_count);
+
+ lwsl_notice("load: %s\n", s);
+ }
+#endif
+ }
+
+ /*
+ * at intervals, check for ws connections needing ping-pong checks
+ */
+
+ if (context->ws_ping_pong_interval &&
+ context->last_ws_ping_pong_check_s < now + 10) {
+ struct lws_vhost *vh = context->vhost_list;
+ context->last_ws_ping_pong_check_s = now;
+
+ while (vh) {
+ for (n = 0; n < vh->count_protocols; n++) {
+ wsi = vh->same_vh_protocol_list[n];
+
+ while (wsi) {
+ if (wsi->state == LWSS_ESTABLISHED &&
+ !wsi->socket_is_permanently_unusable &&
+ !wsi->u.ws.send_check_ping &&
+ wsi->u.ws.time_next_ping_check &&
+ wsi->u.ws.time_next_ping_check < now) {
+
+ lwsl_info("requesting ping-pong on wsi %p\n", wsi);
+ wsi->u.ws.send_check_ping = 1;
+ lws_set_timeout(wsi, PENDING_TIMEOUT_WS_PONG_CHECK_SEND_PING,
+ context->timeout_secs);
+ lws_callback_on_writable(wsi);
+ wsi->u.ws.time_next_ping_check = now +
+ wsi->context->ws_ping_pong_interval;
+ }
+ wsi = wsi->same_vh_protocol_next;
+ }
+ }
+ vh = vh->vhost_next;
}
}
*/
#if LWS_POSIX
- /*
- * deal with listen service piggybacking
- * every lserv_mod services of other fds, we
- * sneak one in to service the listen socket if there's anything waiting
- *
- * To handle connection storms, as found in ab, if we previously saw a
- * pending connection here, it causes us to check again next time.
- */
-
- if (context->lserv_fd && pollfd != &context->fds[idx]) {
- context->lserv_count++;
- if (context->lserv_seen ||
- context->lserv_count == context->lserv_mod) {
- context->lserv_count = 0;
- m = 1;
- if (context->lserv_seen > 5)
- m = 2;
- while (m--) {
- /*
- * even with extpoll, we prepared this
- * internal fds for listen
- */
- n = lws_poll_listen_fd(&context->fds[idx]);
- if (n <= 0) {
- if (context->lserv_seen)
- context->lserv_seen--;
- break;
- }
- /* there's a conn waiting for us */
- lws_service_fd(context, &context->fds[idx]);
- context->lserv_seen++;
- }
- }
- }
-
/* handle session socket closed */
- if ((!(pollfd->revents & LWS_POLLIN)) &&
- (pollfd->revents & LWS_POLLHUP)) {
-
+ if ((!(pollfd->revents & pollfd->events & LWS_POLLIN)) &&
+ (pollfd->revents & LWS_POLLHUP)) {
+ wsi->socket_is_permanently_unusable = 1;
lwsl_debug("Session Socket %p (fd=%d) dead\n",
(void *)wsi, pollfd->fd);
#endif
+// lwsl_debug("fd=%d, revents=%d, mode=%d, state=%d\n", pollfd->fd, pollfd->revents, (int)wsi->mode, (int)wsi->state);
+ if (pollfd->revents & LWS_POLLHUP) {
+ lwsl_debug("pollhup\n");
+ wsi->socket_is_permanently_unusable = 1;
+ goto close_and_handled;
+ }
+
+
+#ifdef LWS_OPENSSL_SUPPORT
+ if ((wsi->state == LWSS_SHUTDOWN) && lws_is_ssl(wsi) && wsi->ssl)
+ {
+ n = SSL_shutdown(wsi->ssl);
+ lwsl_debug("SSL_shutdown=%d for fd %d\n", n, wsi->desc.sockfd);
+ if (n == 1)
+ {
+ n = shutdown(wsi->desc.sockfd, SHUT_WR);
+ goto close_and_handled;
+ }
+ else if (n == 0)
+ {
+ lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
+ n = 0;
+ goto handled;
+ }
+ else /* n < 0 */
+ {
+ int shutdown_error = SSL_get_error(wsi->ssl, n);
+ lwsl_debug("SSL_shutdown returned %d, SSL_get_error: %d\n", n, shutdown_error);
+ if (shutdown_error == SSL_ERROR_WANT_READ) {
+ lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLIN);
+ n = 0;
+ goto handled;
+ } else if (shutdown_error == SSL_ERROR_WANT_WRITE) {
+ lws_change_pollfd(wsi, LWS_POLLOUT, LWS_POLLOUT);
+ n = 0;
+ goto handled;
+ }
+
+ // actual error occurred, just close the connection
+ n = shutdown(wsi->desc.sockfd, SHUT_WR);
+ goto close_and_handled;
+ }
+ }
+#endif
+
/* okay, what we came here to do... */
switch (wsi->mode) {
case LWSCM_HTTP_SERVING:
+ case LWSCM_HTTP_CLIENT:
case LWSCM_HTTP_SERVING_ACCEPTED:
case LWSCM_SERVER_LISTENER:
case LWSCM_SSL_ACK_PENDING:
+ case LWSCM_SSL_ACK_PENDING_RAW:
+ if (wsi->state == LWSS_CLIENT_HTTP_ESTABLISHED)
+ goto handled;
+
+#ifdef LWS_WITH_CGI
+ if (wsi->cgi && (pollfd->revents & LWS_POLLOUT)) {
+ n = lws_handle_POLLOUT_event(wsi, pollfd);
+ if (n)
+ goto close_and_handled;
+ goto handled;
+ }
+#endif
+ /* fallthru */
+ case LWSCM_RAW:
n = lws_server_socket_service(context, wsi, pollfd);
if (n) /* closed by above */
return 1;
- pending = lws_ssl_pending(wsi);
- if (pending)
- goto handle_pending;
+ goto handled;
+
+ case LWSCM_RAW_FILEDESC:
+
+ if (pollfd->revents & LWS_POLLOUT) {
+ n = lws_calllback_as_writeable(wsi);
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ return 1;
+ }
+ if (n)
+ goto close_and_handled;
+ }
+ n = LWS_CALLBACK_RAW_RX;
+ if (wsi->mode == LWSCM_RAW_FILEDESC)
+ n = LWS_CALLBACK_RAW_RX_FILE;
+
+ if (pollfd->revents & LWS_POLLIN) {
+ if (user_callback_handle_rxflow(
+ wsi->protocol->callback,
+ wsi, n,
+ wsi->user_space, NULL, 0)) {
+ lwsl_debug("raw rx callback closed it\n");
+ goto close_and_handled;
+ }
+ }
+
+ if (pollfd->revents & LWS_POLLHUP)
+ goto close_and_handled;
+ n = 0;
goto handled;
case LWSCM_WS_SERVING:
case LWSCM_WS_CLIENT:
case LWSCM_HTTP2_SERVING:
+ case LWSCM_HTTP_CLIENT_ACCEPTED:
- /* the guy requested a callback when it was OK to write */
+ /* 1: something requested a callback when it was OK to write */
if ((pollfd->revents & LWS_POLLOUT) &&
- (wsi->state == LWSS_ESTABLISHED ||
+ ((wsi->state == LWSS_ESTABLISHED ||
wsi->state == LWSS_HTTP2_ESTABLISHED ||
wsi->state == LWSS_HTTP2_ESTABLISHED_PRE_SETTINGS ||
wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
- wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE) &&
- lws_handle_POLLOUT_event(wsi, pollfd)) {
+ wsi->state == LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE)) &&
+ lws_handle_POLLOUT_event(wsi, pollfd)) {
if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY)
wsi->state = LWSS_FLUSHING_STORED_SEND_BEFORE_CLOSE;
lwsl_info("lws_service_fd: closing\n");
goto close_and_handled;
}
- if (wsi->rxflow_buffer &&
- (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) {
- lwsl_info("draining rxflow\n");
+ if (wsi->state == LWSS_RETURNED_CLOSE_ALREADY ||
+ wsi->state == LWSS_AWAITING_CLOSE_ACK) {
+ /*
+ * we stopped caring about anything except control
+ * packets. Force flow control off, defeat tx
+ * draining.
+ */
+ lws_rx_flow_control(wsi, 1);
+ wsi->u.ws.tx_draining_ext = 0;
+ }
+
+ if (wsi->u.ws.tx_draining_ext)
+ /* we cannot deal with new RX until the TX ext
+ * path has been drained. It's because new
+ * rx will, eg, crap on the wsi rx buf that
+ * may be needed to retain state.
+ *
+ * TX ext drain path MUST go through event loop
+ * to avoid blocking.
+ */
+ break;
+
+ if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW))
+ /* We cannot deal with any kind of new RX
+ * because we are RX-flowcontrolled.
+ */
+ break;
+
+ /* 2: RX Extension needs to be drained
+ */
+
+ if (wsi->state == LWSS_ESTABLISHED &&
+ wsi->u.ws.rx_draining_ext) {
+
+ lwsl_ext("%s: RX EXT DRAINING: Service\n", __func__);
+#ifndef LWS_NO_CLIENT
+ if (wsi->mode == LWSCM_WS_CLIENT) {
+ n = lws_client_rx_sm(wsi, 0);
+ if (n < 0)
+ /* we closed wsi */
+ n = 0;
+ } else
+#endif
+ n = lws_rx_sm(wsi, 0);
+
+ goto handled;
+ }
+
+ if (wsi->u.ws.rx_draining_ext)
+ /*
+ * We have RX EXT content to drain, but can't do it
+ * right now. That means we cannot do anything lower
+ * priority either.
+ */
+ break;
+
+ /* 3: RX Flowcontrol buffer needs to be drained
+ */
+
+ if (wsi->rxflow_buffer) {
+ lwsl_info("draining rxflow (len %d)\n",
+ wsi->rxflow_len - wsi->rxflow_pos
+ );
/* well, drain it */
eff_buf.token = (char *)wsi->rxflow_buffer +
wsi->rxflow_pos;
goto drain;
}
- /* any incoming data ready? */
+ /* 4: any incoming (or ah-stashed incoming rx) data ready?
+ * notice if rx flow going off raced poll(), rx flow wins
+ */
- if (!(pollfd->revents & LWS_POLLIN))
+ if (!(pollfd->revents & pollfd->events & LWS_POLLIN))
break;
+
read:
+ /* all the union members start with hdr, so even in ws mode
+ * we can deal with the ah via u.hdr
+ */
+ if (wsi->u.hdr.ah) {
+ lwsl_info("%s: %p: inherited ah rx\n", __func__, wsi);
+ eff_buf.token_len = wsi->u.hdr.ah->rxlen -
+ wsi->u.hdr.ah->rxpos;
+ eff_buf.token = (char *)wsi->u.hdr.ah->rx +
+ wsi->u.hdr.ah->rxpos;
+ } else {
+ if (wsi->mode != LWSCM_HTTP_CLIENT_ACCEPTED) {
+ /*
+ * extension may not consume everything (eg, pmd may be constrained
+ * as to what it can output...) has to go in per-wsi rx buf area.
+ * Otherwise in large temp serv_buf area.
+ */
+ eff_buf.token = (char *)pt->serv_buf;
+ if (lws_is_ws_with_ext(wsi)) {
+ eff_buf.token_len = wsi->u.ws.rx_ubuf_alloc;
+ } else {
+ eff_buf.token_len = context->pt_serv_buf_size;
+ }
- eff_buf.token_len = lws_ssl_capable_read(wsi,
- context->serv_buf,
- pending ? pending :
- sizeof(context->serv_buf));
- switch (eff_buf.token_len) {
- case 0:
- lwsl_info("service_fd: closing due to 0 length read\n");
- goto close_and_handled;
- case LWS_SSL_CAPABLE_MORE_SERVICE:
- lwsl_info("SSL Capable more service\n");
- n = 0;
- goto handled;
- case LWS_SSL_CAPABLE_ERROR:
- lwsl_info("Closing when error\n");
- goto close_and_handled;
+ if ((unsigned int)eff_buf.token_len > context->pt_serv_buf_size)
+ eff_buf.token_len = context->pt_serv_buf_size;
+
+ eff_buf.token_len = lws_ssl_capable_read(wsi,
+ (unsigned char *)eff_buf.token, pending ? pending :
+ eff_buf.token_len);
+ switch (eff_buf.token_len) {
+ case 0:
+ lwsl_info("%s: zero length read\n", __func__);
+ goto close_and_handled;
+ case LWS_SSL_CAPABLE_MORE_SERVICE:
+ lwsl_info("SSL Capable more service\n");
+ n = 0;
+ goto handled;
+ case LWS_SSL_CAPABLE_ERROR:
+ lwsl_info("Closing when error\n");
+ goto close_and_handled;
+ }
+ // lwsl_notice("Actual RX %d\n", eff_buf.token_len);
+ }
}
+drain:
+#ifndef LWS_NO_CLIENT
+ if (wsi->mode == LWSCM_HTTP_CLIENT_ACCEPTED &&
+ !wsi->told_user_closed) {
+
+ /*
+ * In SSL mode we get POLLIN notification about
+ * encrypted data in.
+ *
+ * But that is not necessarily related to decrypted
+ * data out becoming available; in may need to perform
+ * other in or out before that happens.
+ *
+ * simply mark ourselves as having readable data
+ * and turn off our POLLIN
+ */
+ wsi->client_rx_avail = 1;
+ lws_change_pollfd(wsi, LWS_POLLIN, 0);
+
+ /* let user code know, he'll usually ask for writeable
+ * callback and drain / re-enable it there
+ */
+ if (user_callback_handle_rxflow(
+ wsi->protocol->callback,
+ wsi, LWS_CALLBACK_RECEIVE_CLIENT_HTTP,
+ wsi->user_space, NULL, 0)) {
+ lwsl_notice("LWS_CALLBACK_RECEIVE_CLIENT_HTTP closed it\n");
+ goto close_and_handled;
+ }
+ }
+#endif
/*
* give any active extensions a chance to munge the buffer
* before parse. We pass in a pointer to an lws_tokens struct
* extension callback handling, just the normal input buffer is
* used then so it is efficient.
*/
-
- eff_buf.token = (char *)context->serv_buf;
-drain:
-
do {
more = 0;
- m = lws_ext_cb_wsi_active_exts(wsi,
- LWS_EXT_CALLBACK_PACKET_RX_PREPARSE, &eff_buf, 0);
+ m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_RX_PREPARSE,
+ &eff_buf, 0);
if (m < 0)
goto close_and_handled;
if (m)
/* service incoming data */
if (eff_buf.token_len) {
+ /*
+ * if draining from rxflow buffer, not
+ * critical to track what was used since at the
+ * use it bumps wsi->rxflow_pos. If we come
+ * around again it will pick up from where it
+ * left off.
+ */
+ // lwsl_notice("doing lws_read from pt->serv_buf %p %p for len %d\n", pt->serv_buf, eff_buf.token, (int)eff_buf.token_len);
+
n = lws_read(wsi, (unsigned char *)eff_buf.token,
eff_buf.token_len);
if (n < 0) {
eff_buf.token_len = 0;
} while (more);
+ if (wsi->u.hdr.ah) {
+ lwsl_notice("%s: %p: detaching\n",
+ __func__, wsi);
+ /* show we used all the pending rx up */
+ wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen;
+ /* we can run the normal ah detach flow despite
+ * being in ws union mode, since all union members
+ * start with hdr */
+ lws_header_table_detach(wsi, 0);
+ }
+
pending = lws_ssl_pending(wsi);
if (pending) {
-handle_pending:
- pending = pending > sizeof(context->serv_buf) ?
- sizeof(context->serv_buf) : pending;
+ if (lws_is_ws_with_ext(wsi))
+ pending = pending > wsi->u.ws.rx_ubuf_alloc ?
+ wsi->u.ws.rx_ubuf_alloc : pending;
+ else
+ pending = pending > context->pt_serv_buf_size ?
+ context->pt_serv_buf_size : pending;
goto read;
}
if (draining_flow && wsi->rxflow_buffer &&
- wsi->rxflow_pos == wsi->rxflow_len) {
+ wsi->rxflow_pos == wsi->rxflow_len) {
lwsl_info("flow buffer: drained\n");
lws_free_set_NULL(wsi->rxflow_buffer);
/* having drained the rxflow buffer, can rearm POLLIN */
}
break;
+#ifdef LWS_WITH_CGI
+ case LWSCM_CGI: /* we exist to handle a cgi's stdin/out/err data...
+ * do the callback on our master wsi
+ */
+ {
+ struct lws_cgi_args args;
+
+ if (wsi->cgi_channel >= LWS_STDOUT &&
+ !(pollfd->revents & pollfd->events & LWS_POLLIN))
+ break;
+ if (wsi->cgi_channel == LWS_STDIN &&
+ !(pollfd->revents & pollfd->events & LWS_POLLOUT))
+ break;
+
+ if (wsi->cgi_channel == LWS_STDIN)
+ if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
+ lwsl_info("failed at set pollfd\n");
+ return 1;
+ }
+ args.ch = wsi->cgi_channel;
+ args.stdwsi = &wsi->parent->cgi->stdwsi[0];
+ args.hdr_state = wsi->hdr_state;
+
+ //lwsl_err("CGI LWS_STDOUT waiting wsi %p mode %d state %d\n",
+ // wsi->parent, wsi->parent->mode, wsi->parent->state);
+
+ if (user_callback_handle_rxflow(
+ wsi->parent->protocol->callback,
+ wsi->parent, LWS_CALLBACK_CGI,
+ wsi->parent->user_space,
+ (void *)&args, 0))
+ return 1;
+
+ break;
+ }
+#endif
default:
#ifdef LWS_NO_CLIENT
break;
#else
+ if ((pollfd->revents & LWS_POLLOUT) &&
+ lws_handle_POLLOUT_event(wsi, pollfd)) {
+ lwsl_debug("POLLOUT event closed it\n");
+ goto close_and_handled;
+ }
+
n = lws_client_socket_service(context, wsi, pollfd);
+ if (n)
+ return 1;
goto handled;
#endif
}
goto handled;
close_and_handled:
- lwsl_debug("Close and handled\n");
+ lwsl_debug("%p: Close and handled\n", wsi);
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS);
/*
* pollfd may point to something else after the close
return n;
}
-/**
- * lws_service() - Service any pending websocket activity
- * @context: Websocket context
- * @timeout_ms: Timeout for poll; 0 means return immediately if nothing needed
- * service otherwise block and service immediately, returning
- * after the timeout if nothing needed service.
- *
- * This function deals with any pending websocket traffic, for three
- * kinds of event. It handles these events on both server and client
- * types of connection the same.
- *
- * 1) Accept new connections to our context's server
- *
- * 2) Call the receive callback for incoming frame data received by
- * server or client connections.
- *
- * You need to call this service function periodically to all the above
- * functions to happen; if your application is single-threaded you can
- * just call it in your main event loop.
- *
- * Alternatively you can fork a new process that asynchronously handles
- * calling this service in a loop. In that case you are happy if this
- * call blocks your thread until it needs to take care of something and
- * would call it with a large nonzero timeout. Your loop then takes no
- * CPU while there is nothing happening.
- *
- * If you are calling it in a single-threaded app, you don't want it to
- * wait around blocking other things in your loop from happening, so you
- * would call it with a timeout_ms of 0, so it returns immediately if
- * nothing is pending, or as soon as it services whatever was pending.
- */
+LWS_VISIBLE int
+lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
+{
+ return lws_service_fd_tsi(context, pollfd, 0);
+}
LWS_VISIBLE int
lws_service(struct lws_context *context, int timeout_ms)
return lws_plat_service(context, timeout_ms);
}
+LWS_VISIBLE int
+lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
+{
+ return _lws_plat_service_tsi(context, timeout_ms, tsi);
+}
+