1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ***************************************************************************/
23 #include "curl_setup.h"
25 #include "strtoofft.h"
29 #ifdef HAVE_NETINET_IN_H
30 #include <netinet/in.h>
35 #ifdef HAVE_ARPA_INET_H
36 #include <arpa/inet.h>
41 #ifdef HAVE_SYS_IOCTL_H
42 #include <sys/ioctl.h>
48 #ifdef HAVE_SYS_PARAM_H
49 #include <sys/param.h>
52 #ifdef HAVE_SYS_SELECT_H
53 #include <sys/select.h>
57 #error "We can't compile without socket() support!"
61 #include <curl/curl.h>
64 #include "content_encoding.h"
68 #include "speedcheck.h"
73 #include "vtls/vtls.h"
77 #include "non-ascii.h"
78 #include "curl_printf.h"
80 /* The last #include files should be: */
81 #include "curl_memory.h"
85 * This function will call the read callback to fill our buffer with data
88 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
90 struct SessionHandle *data = conn->data;
91 size_t buffersize = (size_t)bytes;
93 #ifdef CURL_DOES_CONVERSIONS
94 bool sending_http_headers = FALSE;
96 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
97 const struct HTTP *http = data->req.protop;
99 if(http->sending == HTTPSEND_REQUEST)
100 /* We're sending the HTTP request headers, not the data.
101 Remember that so we don't re-translate them into garbage. */
102 sending_http_headers = TRUE;
106 if(data->req.upload_chunky) {
107 /* if chunked Transfer-Encoding */
108 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
109 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
112 /* this function returns a size_t, so we typecast to int to prevent warnings
113 with picky compilers */
114 nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
115 buffersize, data->state.in);
117 if(nread == CURL_READFUNC_ABORT) {
118 failf(data, "operation aborted by callback");
120 return CURLE_ABORTED_BY_CALLBACK;
122 else if(nread == CURL_READFUNC_PAUSE) {
124 if(conn->handler->flags & PROTOPT_NONETWORK) {
125 /* protocols that work without network cannot be paused. This is
126 actually only FILE:// just now, and it can't pause since the transfer
127 isn't done using the "normal" procedure. */
128 failf(data, "Read callback asked for PAUSE when not supported!");
129 return CURLE_READ_ERROR;
132 struct SingleRequest *k = &data->req;
133 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
134 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
135 if(data->req.upload_chunky) {
136 /* Back out the preallocation done above */
137 data->req.upload_fromhere -= (8 + 2);
141 return CURLE_OK; /* nothing was read */
143 else if((size_t)nread > buffersize) {
144 /* the read function returned a too large value */
146 failf(data, "read function returned funny value");
147 return CURLE_READ_ERROR;
150 if(!data->req.forbidchunk && data->req.upload_chunky) {
151 /* if chunked Transfer-Encoding
157 /* On non-ASCII platforms the <DATA> may or may not be
158 translated based on set.prefer_ascii while the protocol
159 portion must always be translated to the network encoding.
160 To further complicate matters, line end conversion might be
161 done later on, so we need to prevent CRLFs from becoming
162 CRCRLFs if that's the case. To do this we use bare LFs
163 here, knowing they'll become CRLFs later on.
167 const char *endofline_native;
168 const char *endofline_network;
172 #ifdef CURL_DO_LINEEND_CONV
173 (data->set.prefer_ascii) ||
176 /* \n will become \r\n later on */
177 endofline_native = "\n";
178 endofline_network = "\x0a";
181 endofline_native = "\r\n";
182 endofline_network = "\x0d\x0a";
184 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
185 "%x%s", nread, endofline_native);
187 /* move buffer pointer */
188 data->req.upload_fromhere -= hexlen;
191 /* copy the prefix to the buffer, leaving out the NUL */
192 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
194 /* always append ASCII CRLF to the data */
195 memcpy(data->req.upload_fromhere + nread,
197 strlen(endofline_network));
199 #ifdef CURL_DOES_CONVERSIONS
202 if(data->set.prefer_ascii) {
203 /* translate the protocol and data */
207 /* just translate the protocol portion */
208 length = strlen(hexbuffer);
210 result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
211 /* Curl_convert_to_network calls failf if unsuccessful */
214 #endif /* CURL_DOES_CONVERSIONS */
216 if((nread - hexlen) == 0)
217 /* mark this as done once this chunk is transferred */
218 data->req.upload_done = TRUE;
220 nread+=(int)strlen(endofline_native); /* for the added end of line */
222 #ifdef CURL_DOES_CONVERSIONS
223 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
225 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
226 /* Curl_convert_to_network calls failf if unsuccessful */
230 #endif /* CURL_DOES_CONVERSIONS */
239 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
240 * POST/PUT with multi-pass authentication when a sending was denied and a
241 * resend is necessary.
243 CURLcode Curl_readrewind(struct connectdata *conn)
245 struct SessionHandle *data = conn->data;
247 conn->bits.rewindaftersend = FALSE; /* we rewind now */
249 /* explicitly switch off sending data on this connection now since we are
250 about to restart a new transfer and thus we want to avoid inadvertently
251 sending more data on the existing connection until the next transfer
253 data->req.keepon &= ~KEEP_SEND;
255 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
256 CURLOPT_HTTPPOST, call app to rewind
258 if(data->set.postfields ||
259 (data->set.httpreq == HTTPREQ_POST_FORM))
262 if(data->set.seek_func) {
265 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
267 failf(data, "seek callback returned error %d", (int)err);
268 return CURLE_SEND_FAIL_REWIND;
271 else if(data->set.ioctl_func) {
274 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
275 data->set.ioctl_client);
276 infof(data, "the ioctl callback returned %d\n", (int)err);
279 /* FIXME: convert to a human readable error message */
280 failf(data, "ioctl callback returned error %d", (int)err);
281 return CURLE_SEND_FAIL_REWIND;
285 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
286 given FILE * stream and we can actually attempt to rewind that
287 ourselves with fseek() */
288 if(data->state.fread_func == (curl_read_callback)fread) {
289 if(-1 != fseek(data->state.in, 0, SEEK_SET))
290 /* successful rewind */
294 /* no callback set or failure above, makes us fail at once */
295 failf(data, "necessary data rewind wasn't possible");
296 return CURLE_SEND_FAIL_REWIND;
302 static int data_pending(const struct connectdata *conn)
304 /* in the case of libssh2, we can never be really sure that we have emptied
305 its internal buffers so we MUST always try until we get EAGAIN back */
306 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
307 #if defined(USE_NGHTTP2)
308 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
309 /* For HTTP/2, we may read up everything including responde body
310 with header fields in Curl_http_readwrite_headers. If no
311 content-length is provided, curl waits for the connection
312 close, which we emulate it using conn->proto.httpc.closed =
313 TRUE. The thing is if we read everything, then http2_recv won't
314 be called and we cannot signal the HTTP/2 stream has closed. As
315 a workaround, we return nonzero here to call http2_recv. */
316 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
318 Curl_ssl_data_pending(conn, FIRSTSOCKET);
322 static void read_rewind(struct connectdata *conn,
325 DEBUGASSERT(conn->read_pos >= thismuch);
327 conn->read_pos -= thismuch;
328 conn->bits.stream_was_rewound = TRUE;
335 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
336 if(conn->master_buffer) {
337 memcpy(buf, conn->master_buffer + conn->read_pos, show);
344 DEBUGF(infof(conn->data,
345 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
346 conn->read_pos, buf));
352 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
353 * remote document with the time provided by CURLOPT_TIMEVAL
355 bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc)
357 if((timeofdoc == 0) || (data->set.timevalue == 0))
360 switch(data->set.timecondition) {
361 case CURL_TIMECOND_IFMODSINCE:
363 if(timeofdoc <= data->set.timevalue) {
365 "The requested document is not new enough\n");
366 data->info.timecond = TRUE;
370 case CURL_TIMECOND_IFUNMODSINCE:
371 if(timeofdoc >= data->set.timevalue) {
373 "The requested document is not old enough\n");
374 data->info.timecond = TRUE;
384 * Go ahead and do a read if we have a readable socket or if
385 * the stream was rewound (in which case we have data in a
388 static CURLcode readwrite_data(struct SessionHandle *data,
389 struct connectdata *conn,
390 struct SingleRequest *k,
391 int *didwhat, bool *done)
393 CURLcode result = CURLE_OK;
394 ssize_t nread; /* number of bytes read */
395 size_t excess = 0; /* excess bytes read */
396 bool is_empty_data = FALSE;
397 bool readmore = FALSE; /* used by RTP to signal for more data */
402 /* This is where we loop until we have read everything there is to
403 read or we get a CURLE_AGAIN */
405 size_t buffersize = data->set.buffer_size?
406 data->set.buffer_size : BUFSIZE;
407 size_t bytestoread = buffersize;
410 #if defined(USE_NGHTTP2)
411 /* For HTTP/2, read data without caring about the content
412 length. This is safe because body in HTTP/2 is always
413 segmented thanks to its framing layer. Meanwhile, we have to
414 call Curl_read to ensure that http2_handle_stream_close is
415 called when we read all incoming bytes for a particular
417 !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
418 conn->httpversion == 20) &&
420 k->size != -1 && !k->header) {
421 /* make sure we don't read "too much" if we can help it since we
422 might be pipelining and then someone else might want to read what
424 curl_off_t totalleft = k->size - k->bytecount;
425 if(totalleft < (curl_off_t)bytestoread)
426 bytestoread = (size_t)totalleft;
430 /* receive data from the network! */
431 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
433 /* read would've blocked */
434 if(CURLE_AGAIN == result)
435 break; /* get out of loop */
441 /* read nothing but since we wanted nothing we consider this an OK
442 situation to proceed from */
443 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
447 if((k->bytecount == 0) && (k->writebytecount == 0)) {
448 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
449 if(k->exp100 > EXP100_SEND_DATA)
450 /* set time stamp to compare with when waiting for the 100 */
451 k->start100 = Curl_tvnow();
454 *didwhat |= KEEP_RECV;
455 /* indicates data of zero size, i.e. empty file */
456 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
458 /* NUL terminate, allowing string ops to be used */
459 if(0 < nread || is_empty_data) {
462 else if(0 >= nread) {
463 /* if we receive 0 or less here, the server closed the connection
464 and we bail out from this! */
465 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
466 k->keepon &= ~KEEP_RECV;
470 /* Default buffer to use when we write the buffer, it may be changed
471 in the flow below before the actual storing is done. */
474 if(conn->handler->readwrite) {
475 result = conn->handler->readwrite(data, conn, &nread, &readmore);
482 #ifndef CURL_DISABLE_HTTP
483 /* Since this is a two-state thing, we check if we are parsing
484 headers at the moment or not. */
486 /* we are in parse-the-header-mode */
487 bool stop_reading = FALSE;
488 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
492 if(conn->handler->readwrite &&
493 (k->maxdownload <= 0 && nread > 0)) {
494 result = conn->handler->readwrite(data, conn, &nread, &readmore);
502 /* We've stopped dealing with input, get out of the do-while loop */
505 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
507 "Rewinding stream by : %zd"
508 " bytes on url %s (zero-length body)\n",
509 nread, data->state.path);
510 read_rewind(conn, (size_t)nread);
514 "Excess found in a non pipelined read:"
516 " url = %s (zero-length body)\n",
517 nread, data->state.path);
524 #endif /* CURL_DISABLE_HTTP */
527 /* This is not an 'else if' since it may be a rest from the header
528 parsing, where the beginning of the buffer is headers and the end
530 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
532 #ifndef CURL_DISABLE_HTTP
533 if(0 == k->bodywrites && !is_empty_data) {
534 /* These checks are only made the first time we are about to
535 write a piece of the body */
536 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
537 /* HTTP-only checks */
539 if(data->req.newurl) {
540 if(conn->bits.close) {
541 /* Abort after the headers if "follow Location" is set
542 and we're set to close anyway. */
543 k->keepon &= ~KEEP_RECV;
547 /* We have a new url to load, but since we want to be able
548 to re-use this connection properly, we read the full
549 response in "ignore more" */
550 k->ignorebody = TRUE;
551 infof(data, "Ignoring the response-body\n");
553 if(data->state.resume_from && !k->content_range &&
554 (data->set.httpreq==HTTPREQ_GET) &&
557 if(k->size == data->state.resume_from) {
558 /* The resume point is at the end of file, consider this fine
559 even if it doesn't allow resume from here. */
560 infof(data, "The entire document is already downloaded");
561 connclose(conn, "already downloaded");
563 k->keepon &= ~KEEP_RECV;
568 /* we wanted to resume a download, although the server doesn't
569 * seem to support this and we did this with a GET (if it
570 * wasn't a GET we did a POST or PUT resume) */
571 failf(data, "HTTP server doesn't seem to support "
572 "byte ranges. Cannot resume.");
573 return CURLE_RANGE_ERROR;
576 if(data->set.timecondition && !data->state.range) {
577 /* A time condition has been set AND no ranges have been
578 requested. This seems to be what chapter 13.3.4 of
579 RFC 2616 defines to be the correct action for a
582 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
584 /* We're simulating a http 304 from server so we return
585 what should have been returned from the server */
586 data->info.httpcode = 304;
587 infof(data, "Simulate a HTTP 304 response!\n");
588 /* we abort the transfer before it is completed == we ruin the
589 re-use ability. Close the connection */
590 connclose(conn, "Simulated 304 handling");
593 } /* we have a time condition */
595 } /* this is HTTP or RTSP */
596 } /* this is the first time we write a body part */
597 #endif /* CURL_DISABLE_HTTP */
601 /* pass data to the debug function before it gets "dechunked" */
602 if(data->set.verbose) {
604 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
605 (size_t)k->hbuflen, conn);
606 if(k->badheader == HEADER_PARTHEADER)
607 Curl_debug(data, CURLINFO_DATA_IN,
608 k->str, (size_t)nread, conn);
611 Curl_debug(data, CURLINFO_DATA_IN,
612 k->str, (size_t)nread, conn);
615 #ifndef CURL_DISABLE_HTTP
618 * Here comes a chunked transfer flying and we need to decode this
619 * properly. While the name says read, this function both reads
620 * and writes away the data. The returned 'nread' holds the number
621 * of actual data it wrote to the client.
625 Curl_httpchunk_read(conn, k->str, nread, &nread);
627 if(CHUNKE_OK < res) {
628 if(CHUNKE_WRITE_ERROR == res) {
629 failf(data, "Failed writing data");
630 return CURLE_WRITE_ERROR;
632 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
633 return CURLE_RECV_ERROR;
635 else if(CHUNKE_STOP == res) {
637 /* we're done reading chunks! */
638 k->keepon &= ~KEEP_RECV; /* read no more */
640 /* There are now possibly N number of bytes at the end of the
641 str buffer that weren't written to the client.
643 We DO care about this data if we are pipelining.
644 Push it back to be read on the next pass. */
646 dataleft = conn->chunk.dataleft;
648 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
650 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
651 /* only attempt the rewind if we truly are pipelining */
652 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
653 read_rewind(conn, dataleft);
657 /* If it returned OK, we just keep going */
659 #endif /* CURL_DISABLE_HTTP */
661 /* Account for body content stored in the header buffer */
662 if(k->badheader && !k->ignorebody) {
663 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
665 k->bytecount += k->hbuflen;
668 if((-1 != k->maxdownload) &&
669 (k->bytecount + nread >= k->maxdownload)) {
671 excess = (size_t)(k->bytecount + nread - k->maxdownload);
672 if(excess > 0 && !k->ignorebody) {
673 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
674 /* The 'excess' amount below can't be more than BUFSIZE which
675 always will fit in a size_t */
677 "Rewinding stream by : %zu"
678 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
679 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
680 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
681 excess, data->state.path,
682 k->size, k->maxdownload, k->bytecount, nread);
683 read_rewind(conn, excess);
687 "Excess found in a non pipelined read:"
689 ", size = %" CURL_FORMAT_CURL_OFF_T
690 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
691 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
692 excess, k->size, k->maxdownload, k->bytecount);
696 nread = (ssize_t) (k->maxdownload - k->bytecount);
697 if(nread < 0 ) /* this should be unusual */
700 k->keepon &= ~KEEP_RECV; /* we're done reading */
703 k->bytecount += nread;
705 Curl_pgrsSetDownloadCounter(data, k->bytecount);
707 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
708 /* If this is chunky transfer, it was already written */
710 if(k->badheader && !k->ignorebody) {
711 /* we parsed a piece of data wrongly assuming it was a header
712 and now we output it as body instead */
714 /* Don't let excess data pollute body writes */
715 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
716 result = Curl_client_write(conn, CLIENTWRITE_BODY,
717 data->state.headerbuff,
720 result = Curl_client_write(conn, CLIENTWRITE_BODY,
721 data->state.headerbuff,
722 (size_t)k->maxdownload);
727 if(k->badheader < HEADER_ALLBAD) {
728 /* This switch handles various content encodings. If there's an
729 error here, be sure to check over the almost identical code
731 Make sure that ALL_CONTENT_ENCODINGS contains all the
732 encodings handled here. */
734 switch (conn->data->set.http_ce_skip ?
735 IDENTITY : k->auto_decoding) {
738 /* This is the default when the server sends no
739 Content-Encoding header. See Curl_readwrite_init; the
740 memset() call initializes k->auto_decoding to zero. */
743 #ifndef CURL_DISABLE_POP3
744 if(conn->handler->protocol&PROTO_FAMILY_POP3)
745 result = Curl_pop3_write(conn, k->str, nread);
747 #endif /* CURL_DISABLE_POP3 */
749 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
756 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
758 result = Curl_unencode_deflate_write(conn, k, nread);
762 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
764 result = Curl_unencode_gzip_write(conn, k, nread);
768 failf (data, "Unrecognized content encoding type. "
769 "libcurl understands `identity', `deflate' and `gzip' "
770 "content encodings.");
771 result = CURLE_BAD_CONTENT_ENCODING;
776 k->badheader = HEADER_NORMAL; /* taken care of now */
782 } /* if(! header and data to read ) */
784 if(conn->handler->readwrite &&
785 (excess > 0 && !conn->bits.stream_was_rewound)) {
786 /* Parse the excess data */
788 nread = (ssize_t)excess;
790 result = conn->handler->readwrite(data, conn, &nread, &readmore);
795 k->keepon |= KEEP_RECV; /* we're not done reading */
800 /* if we received nothing, the server closed the connection and we
802 k->keepon &= ~KEEP_RECV;
805 } while(data_pending(conn) && maxloops--);
807 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
809 /* When we've read the entire thing and the close bit is set, the server
810 may now close the connection. If there's now any kind of sending going
811 on from our side, we need to stop that immediately. */
812 infof(data, "we are done reading and this is set to close, stop send\n");
813 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
820 * Send data to upload to the server, when the socket is writable.
822 static CURLcode readwrite_upload(struct SessionHandle *data,
823 struct connectdata *conn,
824 struct SingleRequest *k,
828 ssize_t bytes_written;
830 ssize_t nread; /* number of bytes read */
831 bool sending_http_headers = FALSE;
833 if((k->bytecount == 0) && (k->writebytecount == 0))
834 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
836 *didwhat |= KEEP_SEND;
840 /* only read more data if there's no upload data already
841 present in the upload buffer */
842 if(0 == data->req.upload_present) {
843 /* init the "upload from here" pointer */
844 data->req.upload_fromhere = k->uploadbuf;
846 if(!k->upload_done) {
847 /* HTTP pollution, this should be written nicer to become more
848 protocol agnostic. */
850 struct HTTP *http = data->req.protop;
852 if((k->exp100 == EXP100_SENDING_REQUEST) &&
853 (http->sending == HTTPSEND_BODY)) {
854 /* If this call is to send body data, we must take some action:
855 We have sent off the full HTTP 1.1 request, and we shall now
856 go into the Expect: 100 state and await such a header */
857 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
858 k->keepon &= ~KEEP_SEND; /* disable writing */
859 k->start100 = Curl_tvnow(); /* timeout count starts now */
860 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
862 /* set a timeout for the multi interface */
863 Curl_expire(data, data->set.expect_100_timeout);
867 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
868 if(http->sending == HTTPSEND_REQUEST)
869 /* We're sending the HTTP request headers, not the data.
870 Remember that so we don't change the line endings. */
871 sending_http_headers = TRUE;
873 sending_http_headers = FALSE;
876 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
880 nread = (ssize_t)fillcount;
883 nread = 0; /* we're done uploading/reading */
885 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
886 /* this is a paused transfer */
891 k->keepon &= ~KEEP_SEND; /* we're done writing */
893 if(conn->bits.rewindaftersend) {
894 result = Curl_readrewind(conn);
901 /* store number of bytes available for upload */
902 data->req.upload_present = nread;
904 /* convert LF to CRLF if so asked */
905 if((!sending_http_headers) && (
906 #ifdef CURL_DO_LINEEND_CONV
907 /* always convert if we're FTPing in ASCII mode */
908 (data->set.prefer_ascii) ||
911 /* Do we need to allocate a scratch buffer? */
912 if(!data->state.scratch) {
913 data->state.scratch = malloc(2 * BUFSIZE);
914 if(!data->state.scratch) {
915 failf(data, "Failed to alloc scratch buffer!");
917 return CURLE_OUT_OF_MEMORY;
922 * ASCII/EBCDIC Note: This is presumably a text (not binary)
923 * transfer so the data should already be in ASCII.
924 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
925 * must be used instead of the escape sequences \r & \n.
927 for(i = 0, si = 0; i < nread; i++, si++) {
928 if(data->req.upload_fromhere[i] == 0x0a) {
929 data->state.scratch[si++] = 0x0d;
930 data->state.scratch[si] = 0x0a;
931 if(!data->set.crlf) {
932 /* we're here only because FTP is in ASCII mode...
933 bump infilesize for the LF we just added */
934 if(data->state.infilesize != -1)
935 data->state.infilesize++;
939 data->state.scratch[si] = data->req.upload_fromhere[i];
943 /* only perform the special operation if we really did replace
947 /* upload from the new (replaced) buffer instead */
948 data->req.upload_fromhere = data->state.scratch;
950 /* set the new amount too */
951 data->req.upload_present = nread;
955 #ifndef CURL_DISABLE_SMTP
956 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
957 result = Curl_smtp_escape_eob(conn, nread);
961 #endif /* CURL_DISABLE_SMTP */
962 } /* if 0 == data->req.upload_present */
964 /* We have a partial buffer left from a previous "round". Use
965 that instead of reading more data */
968 /* write to socket (send away data) */
969 result = Curl_write(conn,
970 conn->writesockfd, /* socket to send to */
971 data->req.upload_fromhere, /* buffer pointer */
972 data->req.upload_present, /* buffer size */
973 &bytes_written); /* actually sent */
978 if(data->set.verbose)
979 /* show the data before we change the pointer upload_fromhere */
980 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
981 (size_t)bytes_written, conn);
983 k->writebytecount += bytes_written;
985 if(k->writebytecount == data->state.infilesize) {
986 /* we have sent all data we were supposed to */
987 k->upload_done = TRUE;
988 infof(data, "We are completely uploaded and fine\n");
991 if(data->req.upload_present != bytes_written) {
992 /* we only wrote a part of the buffer (if anything), deal with it! */
994 /* store the amount of bytes left in the buffer to write */
995 data->req.upload_present -= bytes_written;
997 /* advance the pointer where to find the buffer when the next send
999 data->req.upload_fromhere += bytes_written;
1002 /* we've uploaded that buffer now */
1003 data->req.upload_fromhere = k->uploadbuf;
1004 data->req.upload_present = 0; /* no more bytes left */
1006 if(k->upload_done) {
1007 /* switch off writing, we're done! */
1008 k->keepon &= ~KEEP_SEND; /* we're done writing */
1012 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1014 } WHILE_FALSE; /* just to break out from! */
1020 * Curl_readwrite() is the low-level function to be called when data is to
1021 * be read and written to/from the connection.
1023 CURLcode Curl_readwrite(struct connectdata *conn,
1024 struct SessionHandle *data,
1027 struct SingleRequest *k = &data->req;
1031 curl_socket_t fd_read;
1032 curl_socket_t fd_write;
1033 int select_res = conn->cselect_bits;
1035 conn->cselect_bits = 0;
1037 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1038 then we are in rate limiting state in that transfer direction */
1040 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1041 fd_read = conn->sockfd;
1043 fd_read = CURL_SOCKET_BAD;
1045 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1046 fd_write = conn->writesockfd;
1048 fd_write = CURL_SOCKET_BAD;
1050 if(conn->data->state.drain) {
1051 select_res |= CURL_CSELECT_IN;
1052 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1055 if(!select_res) /* Call for select()/poll() only, if read/write/error
1056 status is not known. */
1057 select_res = Curl_socket_ready(fd_read, fd_write, 0);
1059 if(select_res == CURL_CSELECT_ERR) {
1060 failf(data, "select/poll returned error");
1061 return CURLE_SEND_ERROR;
1064 /* We go ahead and do a read if we have a readable socket or if
1065 the stream was rewound (in which case we have data in a
1067 if((k->keepon & KEEP_RECV) &&
1068 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1070 result = readwrite_data(data, conn, k, &didwhat, done);
1075 /* If we still have writing to do, we check if we have a writable socket. */
1076 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1079 result = readwrite_upload(data, conn, k, &didwhat);
1084 k->now = Curl_tvnow();
1086 /* Update read/write counters */
1088 *k->bytecountp = k->bytecount; /* read count */
1089 if(k->writebytecountp)
1090 *k->writebytecountp = k->writebytecount; /* write count */
1093 /* no read no write, this is a timeout? */
1094 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1095 /* This should allow some time for the header to arrive, but only a
1096 very short time as otherwise it'll be too much wasted time too
1099 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1101 Therefore, when a client sends this header field to an origin server
1102 (possibly via a proxy) from which it has never seen a 100 (Continue)
1103 status, the client SHOULD NOT wait for an indefinite period before
1104 sending the request body.
1108 long ms = Curl_tvdiff(k->now, k->start100);
1109 if(ms >= data->set.expect_100_timeout) {
1110 /* we've waited long enough, continue anyway */
1111 k->exp100 = EXP100_SEND_DATA;
1112 k->keepon |= KEEP_SEND;
1113 infof(data, "Done waiting for 100-continue\n");
1118 if(Curl_pgrsUpdate(conn))
1119 result = CURLE_ABORTED_BY_CALLBACK;
1121 result = Curl_speedcheck(data, k->now);
1126 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1128 failf(data, "Operation timed out after %ld milliseconds with %"
1129 CURL_FORMAT_CURL_OFF_T " out of %"
1130 CURL_FORMAT_CURL_OFF_T " bytes received",
1131 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1135 failf(data, "Operation timed out after %ld milliseconds with %"
1136 CURL_FORMAT_CURL_OFF_T " bytes received",
1137 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1139 return CURLE_OPERATION_TIMEDOUT;
1144 * The transfer has been performed. Just make some general checks before
1148 if(!(data->set.opt_no_body) && (k->size != -1) &&
1149 (k->bytecount != k->size) &&
1150 #ifdef CURL_DO_LINEEND_CONV
1151 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1152 so we'll check to see if the discrepancy can be explained
1153 by the number of CRLFs we've changed to LFs.
1155 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1156 #endif /* CURL_DO_LINEEND_CONV */
1157 !data->req.newurl) {
1158 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1159 " bytes remaining to read",
1160 k->size - k->bytecount);
1161 return CURLE_PARTIAL_FILE;
1163 else if(!(data->set.opt_no_body) &&
1165 (conn->chunk.state != CHUNK_STOP)) {
1167 * In chunked mode, return an error if the connection is closed prior to
1168 * the empty (terminating) chunk is read.
1170 * The condition above used to check for
1171 * conn->proto.http->chunk.datasize != 0 which is true after reading
1172 * *any* chunk, not just the empty chunk.
1175 failf(data, "transfer closed with outstanding read data remaining");
1176 return CURLE_PARTIAL_FILE;
1178 if(Curl_pgrsUpdate(conn))
1179 return CURLE_ABORTED_BY_CALLBACK;
1182 /* Now update the "done" boolean we return */
1183 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1184 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1190 * Curl_single_getsock() gets called by the multi interface code when the app
1191 * has requested to get the sockets for the current connection. This function
1192 * will then be called once for every connection that the multi interface
1193 * keeps track of. This function will only be called for connections that are
1194 * in the proper state to have this information available.
1196 int Curl_single_getsock(const struct connectdata *conn,
1197 curl_socket_t *sock, /* points to numsocks number
1201 const struct SessionHandle *data = conn->data;
1202 int bitmap = GETSOCK_BLANK;
1203 unsigned sockindex = 0;
1205 if(conn->handler->perform_getsock)
1206 return conn->handler->perform_getsock(conn, sock, numsocks);
1209 /* simple check but we might need two slots */
1210 return GETSOCK_BLANK;
1212 /* don't include HOLD and PAUSE connections */
1213 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1215 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1217 bitmap |= GETSOCK_READSOCK(sockindex);
1218 sock[sockindex] = conn->sockfd;
1221 /* don't include HOLD and PAUSE connections */
1222 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1224 if((conn->sockfd != conn->writesockfd) ||
1225 bitmap == GETSOCK_BLANK) {
1226 /* only if they are not the same socket and we have a readable
1227 one, we increase index */
1228 if(bitmap != GETSOCK_BLANK)
1229 sockindex++; /* increase index if we need two entries */
1231 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1233 sock[sockindex] = conn->writesockfd;
1236 bitmap |= GETSOCK_WRITESOCK(sockindex);
1243 * Determine optimum sleep time based on configured rate, current rate,
1245 * Returns value in milliseconds.
1247 * The basic idea is to adjust the desired rate up/down in this method
1248 * based on whether we are running too slow or too fast. Then, calculate
1249 * how many milliseconds to wait for the next packet to achieve this new
1252 long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1255 curl_off_t min_sleep = 0;
1261 /* If running faster than about .1% of the desired speed, slow
1262 * us down a bit. Use shift instead of division as the 0.1%
1263 * cutoff is arbitrary anyway.
1265 if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1266 /* running too fast, decrease target rate by 1/64th of rate */
1267 rate_bps -= rate_bps >> 6;
1270 else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1271 /* running too slow, increase target rate by 1/64th of rate */
1272 rate_bps += rate_bps >> 6;
1275 /* Determine number of milliseconds to wait until we do
1276 * the next packet at the adjusted rate. We should wait
1277 * longer when using larger packets, for instance.
1279 rv = ((curl_off_t)(pkt_size * 1000) / rate_bps);
1281 /* Catch rounding errors and always slow down at least 1ms if
1282 * we are running too fast.
1287 /* Bound value to fit in 'long' on 32-bit platform. That's
1288 * plenty long enough anyway!
1296 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1297 which means this gets called once for each subsequent redirect etc */
1298 void Curl_init_CONNECT(struct SessionHandle *data)
1300 data->state.fread_func = data->set.fread_func_set;
1301 data->state.in = data->set.in_set;
1305 * Curl_pretransfer() is called immediately before a transfer starts, and only
1306 * once for one transfer no matter if it has redirects or do multi-pass
1307 * authentication etc.
1309 CURLcode Curl_pretransfer(struct SessionHandle *data)
1312 if(!data->change.url) {
1313 /* we can't do anything without URL */
1314 failf(data, "No URL set!");
1315 return CURLE_URL_MALFORMAT;
1318 /* Init the SSL session ID cache here. We do it here since we want to do it
1319 after the *_setopt() calls (that could specify the size of the cache) but
1320 before any transfer takes place. */
1321 result = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
1325 data->set.followlocation=0; /* reset the location-follow counter */
1326 data->state.this_is_a_follow = FALSE; /* reset this */
1327 data->state.errorbuf = FALSE; /* no error has occurred */
1328 data->state.httpversion = 0; /* don't assume any particular server version */
1330 data->state.authproblem = FALSE;
1331 data->state.authhost.want = data->set.httpauth;
1332 data->state.authproxy.want = data->set.proxyauth;
1333 Curl_safefree(data->info.wouldredirect);
1334 data->info.wouldredirect = NULL;
1336 if(data->set.httpreq == HTTPREQ_PUT)
1337 data->state.infilesize = data->set.filesize;
1339 data->state.infilesize = data->set.postfieldsize;
1341 /* If there is a list of cookie files to read, do it now! */
1342 if(data->change.cookielist)
1343 Curl_cookie_loadfiles(data);
1345 /* If there is a list of host pairs to deal with */
1346 if(data->change.resolve)
1347 result = Curl_loadhostpairs(data);
1350 /* Allow data->set.use_port to set which port to use. This needs to be
1351 * disabled for example when we follow Location: headers to URLs using
1352 * different ports! */
1353 data->state.allow_port = TRUE;
1355 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1356 /*************************************************************
1357 * Tell signal handler to ignore SIGPIPE
1358 *************************************************************/
1359 if(!data->set.no_signal)
1360 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1363 Curl_initinfo(data); /* reset session-specific information "variables" */
1364 Curl_pgrsResetTimesSizes(data);
1365 Curl_pgrsStartNow(data);
1367 if(data->set.timeout)
1368 Curl_expire(data, data->set.timeout);
1370 if(data->set.connecttimeout)
1371 Curl_expire(data, data->set.connecttimeout);
1373 /* In case the handle is re-used and an authentication method was picked
1374 in the session we need to make sure we only use the one(s) we now
1375 consider to be fine */
1376 data->state.authhost.picked &= data->state.authhost.want;
1377 data->state.authproxy.picked &= data->state.authproxy.want;
1384 * Curl_posttransfer() is called immediately after a transfer ends
1386 CURLcode Curl_posttransfer(struct SessionHandle *data)
1388 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1389 /* restore the signal handler for SIGPIPE before we get back */
1390 if(!data->set.no_signal)
1391 signal(SIGPIPE, data->state.prev_signal);
1393 (void)data; /* unused parameter */
1399 #ifndef CURL_DISABLE_HTTP
1401 * strlen_url() returns the length of the given URL if the spaces within the
1402 * URL were properly URL encoded.
1404 static size_t strlen_url(const char *url)
1406 const unsigned char *ptr;
1408 bool left=TRUE; /* left side of the ? */
1410 for(ptr=(unsigned char *)url; *ptr; ptr++) {
1431 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1432 * the source URL accordingly.
1434 static void strcpy_url(char *output, const char *url)
1436 /* we must add this with whitespace-replacing */
1438 const unsigned char *iptr;
1439 char *optr = output;
1440 for(iptr = (unsigned char *)url; /* read from here */
1441 *iptr; /* until zero byte */
1449 snprintf(optr, 4, "%%%02x", *iptr);
1457 *optr++='%'; /* add a '%' */
1458 *optr++='2'; /* add a '2' */
1459 *optr++='0'; /* add a '0' */
1462 *optr++='+'; /* add a '+' here */
1466 *optr=0; /* zero terminate output buffer */
1471 * Returns true if the given URL is absolute (as opposed to relative)
1473 static bool is_absolute_url(const char *url)
1475 char prot[16]; /* URL protocol string storage */
1476 char letter; /* used for a silly sscanf */
1478 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1482 * Concatenate a relative URL to a base URL making it absolute.
1483 * URL-encodes any spaces.
1484 * The returned pointer must be freed by the caller unless NULL
1485 * (returns NULL on out of memory).
1487 static char *concat_url(const char *base, const char *relurl)
1490 TRY to append this new path to the old URL
1491 to the right of the host part. Oh crap, this is doomed to cause
1492 problems in the future...
1499 const char *useurl = relurl;
1502 /* we must make our own copy of the URL to play with, as it may
1503 point to read-only data */
1504 char *url_clone=strdup(base);
1507 return NULL; /* skip out of this NOW */
1509 /* protsep points to the start of the host name */
1510 protsep=strstr(url_clone, "//");
1514 protsep+=2; /* pass the slashes */
1516 if('/' != relurl[0]) {
1519 /* First we need to find out if there's a ?-letter in the URL,
1520 and cut it and the right-side of that off */
1521 pathsep = strchr(protsep, '?');
1525 /* we have a relative path to append to the last slash if there's one
1526 available, or if the new URL is just a query string (starts with a
1527 '?') we append the new one at the end of the entire currently worked
1529 if(useurl[0] != '?') {
1530 pathsep = strrchr(protsep, '/');
1535 /* Check if there's any slash after the host name, and if so, remember
1536 that position instead */
1537 pathsep = strchr(protsep, '/');
1539 protsep = pathsep+1;
1543 /* now deal with one "./" or any amount of "../" in the newurl
1544 and act accordingly */
1546 if((useurl[0] == '.') && (useurl[1] == '/'))
1547 useurl+=2; /* just skip the "./" */
1549 while((useurl[0] == '.') &&
1550 (useurl[1] == '.') &&
1551 (useurl[2] == '/')) {
1553 useurl+=3; /* pass the "../" */
1558 /* cut off one more level from the right of the original URL */
1559 pathsep = strrchr(protsep, '/');
1570 /* We got a new absolute path for this server */
1572 if((relurl[0] == '/') && (relurl[1] == '/')) {
1573 /* the new URL starts with //, just keep the protocol part from the
1576 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1577 skip the new ones */
1580 /* cut off the original URL from the first slash, or deal with URLs
1582 pathsep = strchr(protsep, '/');
1584 /* When people use badly formatted URLs, such as
1585 "http://www.url.com?dir=/home/daniel" we must not use the first
1586 slash, if there's a ?-letter before it! */
1587 char *sep = strchr(protsep, '?');
1588 if(sep && (sep < pathsep))
1593 /* There was no slash. Now, since we might be operating on a badly
1594 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1595 use a slash separator as it is supposed to, we need to check for a
1596 ?-letter as well! */
1597 pathsep = strchr(protsep, '?');
1604 /* If the new part contains a space, this is a mighty stupid redirect
1605 but we still make an effort to do "right". To the left of a '?'
1606 letter we replace each space with %20 while it is replaced with '+'
1607 on the right side of the '?' letter.
1609 newlen = strlen_url(useurl);
1611 urllen = strlen(url_clone);
1613 newest = malloc(urllen + 1 + /* possible slash */
1614 newlen + 1 /* zero byte */);
1617 free(url_clone); /* don't leak this */
1621 /* copy over the root url part */
1622 memcpy(newest, url_clone, urllen);
1624 /* check if we need to append a slash */
1625 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1628 newest[urllen++]='/';
1630 /* then append the new piece on the right side */
1631 strcpy_url(&newest[urllen], useurl);
1637 #endif /* CURL_DISABLE_HTTP */
1640 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1641 * as given by the remote server and set up the new URL to request.
1643 CURLcode Curl_follow(struct SessionHandle *data,
1644 char *newurl, /* this 'newurl' is the Location: string,
1645 and it must be malloc()ed before passed
1647 followtype type) /* see transfer.h */
1649 #ifdef CURL_DISABLE_HTTP
1653 /* Location: following will not happen when HTTP is disabled */
1654 return CURLE_TOO_MANY_REDIRECTS;
1657 /* Location: redirect */
1658 bool disallowport = FALSE;
1660 if(type == FOLLOW_REDIR) {
1661 if((data->set.maxredirs != -1) &&
1662 (data->set.followlocation >= data->set.maxredirs)) {
1663 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1664 return CURLE_TOO_MANY_REDIRECTS;
1667 /* mark the next request as a followed location: */
1668 data->state.this_is_a_follow = TRUE;
1670 data->set.followlocation++; /* count location-followers */
1672 if(data->set.http_auto_referer) {
1673 /* We are asked to automatically set the previous URL as the referer
1674 when we get the next URL. We pick the ->url field, which may or may
1675 not be 100% correct */
1677 if(data->change.referer_alloc) {
1678 Curl_safefree(data->change.referer);
1679 data->change.referer_alloc = FALSE;
1682 data->change.referer = strdup(data->change.url);
1683 if(!data->change.referer)
1684 return CURLE_OUT_OF_MEMORY;
1685 data->change.referer_alloc = TRUE; /* yes, free this later */
1689 if(!is_absolute_url(newurl)) {
1691 *DANG* this is an RFC 2068 violation. The URL is supposed
1692 to be absolute and this doesn't seem to be that!
1694 char *absolute = concat_url(data->change.url, newurl);
1696 return CURLE_OUT_OF_MEMORY;
1701 /* The new URL MAY contain space or high byte values, that means a mighty
1702 stupid redirect URL but we still make an effort to do "right". */
1704 size_t newlen = strlen_url(newurl);
1706 /* This is an absolute URL, don't allow the custom port number */
1707 disallowport = TRUE;
1709 newest = malloc(newlen+1); /* get memory for this */
1711 return CURLE_OUT_OF_MEMORY;
1712 strcpy_url(newest, newurl); /* create a space-free URL */
1714 free(newurl); /* that was no good */
1715 newurl = newest; /* use this instead now */
1719 if(type == FOLLOW_FAKE) {
1720 /* we're only figuring out the new url if we would've followed locations
1721 but now we're done so we can get out! */
1722 data->info.wouldredirect = newurl;
1727 data->state.allow_port = FALSE;
1729 if(data->change.url_alloc) {
1730 Curl_safefree(data->change.url);
1731 data->change.url_alloc = FALSE;
1734 data->change.url = newurl;
1735 data->change.url_alloc = TRUE;
1736 newurl = NULL; /* don't free! */
1738 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1741 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1742 * differently based on exactly what return code there was.
1744 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1745 * a HTTP (proxy-) authentication scheme other than Basic.
1747 switch(data->info.httpcode) {
1748 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1749 Authorization: XXXX header in the HTTP request code snippet */
1750 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1751 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1752 /* 300 - Multiple Choices */
1753 /* 306 - Not used */
1754 /* 307 - Temporary Redirect */
1755 default: /* for all above (and the unknown ones) */
1756 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1757 * seem to be OK to POST to.
1760 case 301: /* Moved Permanently */
1761 /* (quote from RFC7231, section 6.4.2)
1763 * Note: For historical reasons, a user agent MAY change the request
1764 * method from POST to GET for the subsequent request. If this
1765 * behavior is undesired, the 307 (Temporary Redirect) status code
1766 * can be used instead.
1770 * Many webservers expect this, so these servers often answers to a POST
1771 * request with an error page. To be sure that libcurl gets the page that
1772 * most user agents would get, libcurl has to force GET.
1774 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1775 * can be overridden with CURLOPT_POSTREDIR.
1777 if((data->set.httpreq == HTTPREQ_POST
1778 || data->set.httpreq == HTTPREQ_POST_FORM)
1779 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1780 infof(data, "Switch from POST to GET\n");
1781 data->set.httpreq = HTTPREQ_GET;
1784 case 302: /* Found */
1785 /* (quote from RFC7231, section 6.4.3)
1787 * Note: For historical reasons, a user agent MAY change the request
1788 * method from POST to GET for the subsequent request. If this
1789 * behavior is undesired, the 307 (Temporary Redirect) status code
1790 * can be used instead.
1794 * Many webservers expect this, so these servers often answers to a POST
1795 * request with an error page. To be sure that libcurl gets the page that
1796 * most user agents would get, libcurl has to force GET.
1798 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1799 * can be overridden with CURLOPT_POSTREDIR.
1801 if((data->set.httpreq == HTTPREQ_POST
1802 || data->set.httpreq == HTTPREQ_POST_FORM)
1803 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1804 infof(data, "Switch from POST to GET\n");
1805 data->set.httpreq = HTTPREQ_GET;
1809 case 303: /* See Other */
1810 /* Disable both types of POSTs, unless the user explicitely
1811 asks for POST after POST */
1812 if(data->set.httpreq != HTTPREQ_GET
1813 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1814 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1815 infof(data, "Disables POST, goes with %s\n",
1816 data->set.opt_no_body?"HEAD":"GET");
1819 case 304: /* Not Modified */
1820 /* 304 means we did a conditional request and it was "Not modified".
1821 * We shouldn't get any Location: header in this response!
1824 case 305: /* Use Proxy */
1825 /* (quote from RFC2616, section 10.3.6):
1826 * "The requested resource MUST be accessed through the proxy given
1827 * by the Location field. The Location field gives the URI of the
1828 * proxy. The recipient is expected to repeat this single request
1829 * via the proxy. 305 responses MUST only be generated by origin
1834 Curl_pgrsTime(data, TIMER_REDIRECT);
1835 Curl_pgrsResetTimesSizes(data);
1838 #endif /* CURL_DISABLE_HTTP */
1842 Curl_reconnect_request(struct connectdata **connp)
1844 CURLcode result = CURLE_OK;
1845 struct connectdata *conn = *connp;
1846 struct SessionHandle *data = conn->data;
1848 /* This was a re-use of a connection and we got a write error in the
1849 * DO-phase. Then we DISCONNECT this connection and have another attempt to
1850 * CONNECT and then DO again! The retry cannot possibly find another
1851 * connection to re-use, since we only keep one possible connection for
1854 infof(data, "Re-used connection seems dead, get a new one\n");
1856 connclose(conn, "Reconnect dead connection"); /* enforce close */
1857 result = Curl_done(&conn, result, FALSE); /* we are so done with this */
1859 /* conn may no longer be a good pointer, clear it to avoid mistakes by
1864 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
1865 * here as well. I figure this could happen when the request failed on a FTP
1866 * connection and thus Curl_done() itself tried to use the connection
1867 * (again). Slight Lack of feedback in the report, but I don't think this
1868 * extra check can do much harm.
1870 if(!result || (CURLE_SEND_ERROR == result)) {
1872 bool protocol_done = TRUE;
1874 /* Now, redo the connect and get a new connection */
1875 result = Curl_connect(data, connp, &async, &protocol_done);
1877 /* We have connected or sent away a name resolve query fine */
1879 conn = *connp; /* setup conn to again point to something nice */
1881 /* Now, if async is TRUE here, we need to wait for the name
1883 result = Curl_resolver_wait_resolv(conn, NULL);
1887 /* Resolved, continue with the connection */
1888 result = Curl_async_resolved(conn, &protocol_done);
1898 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1900 NOTE: that the *url is malloc()ed. */
1901 CURLcode Curl_retry_request(struct connectdata *conn,
1904 struct SessionHandle *data = conn->data;
1908 /* if we're talking upload, we can't do the checks below, unless the protocol
1909 is HTTP as when uploading over HTTP we will still get a response */
1910 if(data->set.upload &&
1911 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1914 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1916 !data->set.opt_no_body &&
1917 (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1918 /* We got no data, we attempted to re-use a connection and yet we want a
1919 "body". This might happen if the connection was left alive when we were
1920 done using it before, but that was closed when we wanted to read from
1921 it again. Bad luck. Retry the same request on a fresh connect! */
1922 infof(conn->data, "Connection died, retrying a fresh connect\n");
1923 *url = strdup(conn->data->change.url);
1925 return CURLE_OUT_OF_MEMORY;
1927 connclose(conn, "retry"); /* close this connection */
1928 conn->bits.retry = TRUE; /* mark this as a connection we're about
1929 to retry. Marking it this way should
1930 prevent i.e HTTP transfers to return
1931 error just because nothing has been
1935 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1936 struct HTTP *http = data->req.protop;
1937 if(http->writebytecount)
1938 return Curl_readrewind(conn);
1945 * Curl_setup_transfer() is called to setup some basic properties for the
1946 * upcoming transfer.
1949 Curl_setup_transfer(
1950 struct connectdata *conn, /* connection data */
1951 int sockindex, /* socket index to read from or -1 */
1952 curl_off_t size, /* -1 if unknown at this point */
1953 bool getheader, /* TRUE if header parsing is wanted */
1954 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1955 int writesockindex, /* socket index to write to, it may very well be
1956 the same we read from. -1 disables */
1957 curl_off_t *writecountp /* return number of bytes written or NULL */
1960 struct SessionHandle *data;
1961 struct SingleRequest *k;
1963 DEBUGASSERT(conn != NULL);
1968 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1970 /* now copy all input parameters */
1971 conn->sockfd = sockindex == -1 ?
1972 CURL_SOCKET_BAD : conn->sock[sockindex];
1973 conn->writesockfd = writesockindex == -1 ?
1974 CURL_SOCKET_BAD:conn->sock[writesockindex];
1975 k->getheader = getheader;
1978 k->bytecountp = bytecountp;
1979 k->writebytecountp = writecountp;
1981 /* The code sequence below is placed in this function just because all
1982 necessary input is not always known in do_complete() as this function may
1983 be called after that */
1988 Curl_pgrsSetDownloadSize(data, size);
1990 /* we want header and/or body, if neither then don't do this! */
1991 if(k->getheader || !data->set.opt_no_body) {
1993 if(conn->sockfd != CURL_SOCKET_BAD)
1994 k->keepon |= KEEP_RECV;
1996 if(conn->writesockfd != CURL_SOCKET_BAD) {
1997 struct HTTP *http = data->req.protop;
2000 Even if we require a 100-return code before uploading data, we might
2001 need to write data before that since the REQUEST may not have been
2002 finished sent off just yet.
2004 Thus, we must check if the request has been sent before we set the
2005 state info where we wait for the 100-return code
2007 if((data->state.expect100header) &&
2008 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
2009 (http->sending == HTTPSEND_BODY)) {
2010 /* wait with write until we either got 100-continue or a timeout */
2011 k->exp100 = EXP100_AWAITING_CONTINUE;
2012 k->start100 = Curl_tvnow();
2014 /* Set a timeout for the multi interface. Add the inaccuracy margin so
2015 that we don't fire slightly too early and get denied to run. */
2016 Curl_expire(data, data->set.expect_100_timeout);
2019 if(data->state.expect100header)
2020 /* when we've sent off the rest of the headers, we must await a
2021 100-continue but first finish sending the request */
2022 k->exp100 = EXP100_SENDING_REQUEST;
2024 /* enable the write bit when we're not waiting for continue */
2025 k->keepon |= KEEP_SEND;
2027 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2028 } /* if(k->getheader || !data->set.opt_no_body) */