1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ***************************************************************************/
23 #include "curl_setup.h"
25 #include "strtoofft.h"
29 #ifdef HAVE_NETINET_IN_H
30 #include <netinet/in.h>
35 #ifdef HAVE_ARPA_INET_H
36 #include <arpa/inet.h>
41 #ifdef HAVE_SYS_IOCTL_H
42 #include <sys/ioctl.h>
48 #ifdef HAVE_SYS_PARAM_H
49 #include <sys/param.h>
52 #ifdef HAVE_SYS_SELECT_H
53 #include <sys/select.h>
57 #error "We can't compile without socket() support!"
61 #include <curl/curl.h>
64 #include "content_encoding.h"
68 #include "speedcheck.h"
73 #include "vtls/vtls.h"
74 #include "http_digest.h"
75 #include "curl_ntlm.h"
76 #include "http_negotiate.h"
81 #include "non-ascii.h"
82 #include "curl_printf.h"
84 /* The last #include files should be: */
85 #include "curl_memory.h"
89 * This function will call the read callback to fill our buffer with data
92 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
94 struct SessionHandle *data = conn->data;
95 size_t buffersize = (size_t)bytes;
97 #ifdef CURL_DOES_CONVERSIONS
98 bool sending_http_headers = FALSE;
100 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
101 const struct HTTP *http = data->req.protop;
103 if(http->sending == HTTPSEND_REQUEST)
104 /* We're sending the HTTP request headers, not the data.
105 Remember that so we don't re-translate them into garbage. */
106 sending_http_headers = TRUE;
110 if(data->req.upload_chunky) {
111 /* if chunked Transfer-Encoding */
112 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
113 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
116 /* this function returns a size_t, so we typecast to int to prevent warnings
117 with picky compilers */
118 nread = (int)data->set.fread_func(data->req.upload_fromhere, 1,
119 buffersize, data->set.in);
121 if(nread == CURL_READFUNC_ABORT) {
122 failf(data, "operation aborted by callback");
124 return CURLE_ABORTED_BY_CALLBACK;
126 else if(nread == CURL_READFUNC_PAUSE) {
128 if(conn->handler->flags & PROTOPT_NONETWORK) {
129 /* protocols that work without network cannot be paused. This is
130 actually only FILE:// just now, and it can't pause since the transfer
131 isn't done using the "normal" procedure. */
132 failf(data, "Read callback asked for PAUSE when not supported!");
133 return CURLE_READ_ERROR;
136 struct SingleRequest *k = &data->req;
137 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
138 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
139 if(data->req.upload_chunky) {
140 /* Back out the preallocation done above */
141 data->req.upload_fromhere -= (8 + 2);
145 return CURLE_OK; /* nothing was read */
147 else if((size_t)nread > buffersize) {
148 /* the read function returned a too large value */
150 failf(data, "read function returned funny value");
151 return CURLE_READ_ERROR;
154 if(!data->req.forbidchunk && data->req.upload_chunky) {
155 /* if chunked Transfer-Encoding
161 /* On non-ASCII platforms the <DATA> may or may not be
162 translated based on set.prefer_ascii while the protocol
163 portion must always be translated to the network encoding.
164 To further complicate matters, line end conversion might be
165 done later on, so we need to prevent CRLFs from becoming
166 CRCRLFs if that's the case. To do this we use bare LFs
167 here, knowing they'll become CRLFs later on.
171 const char *endofline_native;
172 const char *endofline_network;
176 #ifdef CURL_DO_LINEEND_CONV
177 (data->set.prefer_ascii) ||
180 /* \n will become \r\n later on */
181 endofline_native = "\n";
182 endofline_network = "\x0a";
185 endofline_native = "\r\n";
186 endofline_network = "\x0d\x0a";
188 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
189 "%x%s", nread, endofline_native);
191 /* move buffer pointer */
192 data->req.upload_fromhere -= hexlen;
195 /* copy the prefix to the buffer, leaving out the NUL */
196 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
198 /* always append ASCII CRLF to the data */
199 memcpy(data->req.upload_fromhere + nread,
201 strlen(endofline_network));
203 #ifdef CURL_DOES_CONVERSIONS
206 if(data->set.prefer_ascii) {
207 /* translate the protocol and data */
211 /* just translate the protocol portion */
212 length = strlen(hexbuffer);
214 result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
215 /* Curl_convert_to_network calls failf if unsuccessful */
218 #endif /* CURL_DOES_CONVERSIONS */
220 if((nread - hexlen) == 0)
221 /* mark this as done once this chunk is transferred */
222 data->req.upload_done = TRUE;
224 nread+=(int)strlen(endofline_native); /* for the added end of line */
226 #ifdef CURL_DOES_CONVERSIONS
227 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
229 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
230 /* Curl_convert_to_network calls failf if unsuccessful */
234 #endif /* CURL_DOES_CONVERSIONS */
243 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
244 * POST/PUT with multi-pass authentication when a sending was denied and a
245 * resend is necessary.
247 CURLcode Curl_readrewind(struct connectdata *conn)
249 struct SessionHandle *data = conn->data;
251 conn->bits.rewindaftersend = FALSE; /* we rewind now */
253 /* explicitly switch off sending data on this connection now since we are
254 about to restart a new transfer and thus we want to avoid inadvertently
255 sending more data on the existing connection until the next transfer
257 data->req.keepon &= ~KEEP_SEND;
259 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
260 CURLOPT_HTTPPOST, call app to rewind
262 if(data->set.postfields ||
263 (data->set.httpreq == HTTPREQ_POST_FORM))
266 if(data->set.seek_func) {
269 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
271 failf(data, "seek callback returned error %d", (int)err);
272 return CURLE_SEND_FAIL_REWIND;
275 else if(data->set.ioctl_func) {
278 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
279 data->set.ioctl_client);
280 infof(data, "the ioctl callback returned %d\n", (int)err);
283 /* FIXME: convert to a human readable error message */
284 failf(data, "ioctl callback returned error %d", (int)err);
285 return CURLE_SEND_FAIL_REWIND;
289 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
290 given FILE * stream and we can actually attempt to rewind that
291 ourselves with fseek() */
292 if(data->set.fread_func == (curl_read_callback)fread) {
293 if(-1 != fseek(data->set.in, 0, SEEK_SET))
294 /* successful rewind */
298 /* no callback set or failure above, makes us fail at once */
299 failf(data, "necessary data rewind wasn't possible");
300 return CURLE_SEND_FAIL_REWIND;
306 static int data_pending(const struct connectdata *conn)
308 /* in the case of libssh2, we can never be really sure that we have emptied
309 its internal buffers so we MUST always try until we get EAGAIN back */
310 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
311 #if defined(USE_NGHTTP2)
312 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
313 /* For HTTP/2, we may read up everything including responde body
314 with header fields in Curl_http_readwrite_headers. If no
315 content-length is provided, curl waits for the connection
316 close, which we emulate it using conn->proto.httpc.closed =
317 TRUE. The thing is if we read everything, then http2_recv won't
318 be called and we cannot signal the HTTP/2 stream has closed. As
319 a workaround, we return nonzero here to call http2_recv. */
320 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
322 Curl_ssl_data_pending(conn, FIRSTSOCKET);
326 static void read_rewind(struct connectdata *conn,
329 DEBUGASSERT(conn->read_pos >= thismuch);
331 conn->read_pos -= thismuch;
332 conn->bits.stream_was_rewound = TRUE;
339 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
340 if(conn->master_buffer) {
341 memcpy(buf, conn->master_buffer + conn->read_pos, show);
348 DEBUGF(infof(conn->data,
349 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
350 conn->read_pos, buf));
356 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
357 * remote document with the time provided by CURLOPT_TIMEVAL
359 bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc)
361 if((timeofdoc == 0) || (data->set.timevalue == 0))
364 switch(data->set.timecondition) {
365 case CURL_TIMECOND_IFMODSINCE:
367 if(timeofdoc <= data->set.timevalue) {
369 "The requested document is not new enough\n");
370 data->info.timecond = TRUE;
374 case CURL_TIMECOND_IFUNMODSINCE:
375 if(timeofdoc >= data->set.timevalue) {
377 "The requested document is not old enough\n");
378 data->info.timecond = TRUE;
388 * Go ahead and do a read if we have a readable socket or if
389 * the stream was rewound (in which case we have data in a
392 static CURLcode readwrite_data(struct SessionHandle *data,
393 struct connectdata *conn,
394 struct SingleRequest *k,
395 int *didwhat, bool *done)
397 CURLcode result = CURLE_OK;
398 ssize_t nread; /* number of bytes read */
399 size_t excess = 0; /* excess bytes read */
400 bool is_empty_data = FALSE;
401 bool readmore = FALSE; /* used by RTP to signal for more data */
405 /* This is where we loop until we have read everything there is to
406 read or we get a CURLE_AGAIN */
408 size_t buffersize = data->set.buffer_size?
409 data->set.buffer_size : BUFSIZE;
410 size_t bytestoread = buffersize;
412 if(k->size != -1 && !k->header) {
413 /* make sure we don't read "too much" if we can help it since we
414 might be pipelining and then someone else might want to read what
416 curl_off_t totalleft = k->size - k->bytecount;
417 if(totalleft < (curl_off_t)bytestoread)
418 bytestoread = (size_t)totalleft;
422 /* receive data from the network! */
423 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
425 /* read would've blocked */
426 if(CURLE_AGAIN == result)
427 break; /* get out of loop */
433 /* read nothing but since we wanted nothing we consider this an OK
434 situation to proceed from */
435 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
439 if((k->bytecount == 0) && (k->writebytecount == 0)) {
440 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
441 if(k->exp100 > EXP100_SEND_DATA)
442 /* set time stamp to compare with when waiting for the 100 */
443 k->start100 = Curl_tvnow();
446 *didwhat |= KEEP_RECV;
447 /* indicates data of zero size, i.e. empty file */
448 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
450 /* NUL terminate, allowing string ops to be used */
451 if(0 < nread || is_empty_data) {
454 else if(0 >= nread) {
455 /* if we receive 0 or less here, the server closed the connection
456 and we bail out from this! */
457 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
458 k->keepon &= ~KEEP_RECV;
462 /* Default buffer to use when we write the buffer, it may be changed
463 in the flow below before the actual storing is done. */
466 if(conn->handler->readwrite) {
467 result = conn->handler->readwrite(data, conn, &nread, &readmore);
474 #ifndef CURL_DISABLE_HTTP
475 /* Since this is a two-state thing, we check if we are parsing
476 headers at the moment or not. */
478 /* we are in parse-the-header-mode */
479 bool stop_reading = FALSE;
480 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
484 if(conn->handler->readwrite &&
485 (k->maxdownload <= 0 && nread > 0)) {
486 result = conn->handler->readwrite(data, conn, &nread, &readmore);
494 /* We've stopped dealing with input, get out of the do-while loop */
497 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
499 "Rewinding stream by : %zd"
500 " bytes on url %s (zero-length body)\n",
501 nread, data->state.path);
502 read_rewind(conn, (size_t)nread);
506 "Excess found in a non pipelined read:"
508 " url = %s (zero-length body)\n",
509 nread, data->state.path);
516 #endif /* CURL_DISABLE_HTTP */
519 /* This is not an 'else if' since it may be a rest from the header
520 parsing, where the beginning of the buffer is headers and the end
522 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
524 #ifndef CURL_DISABLE_HTTP
525 if(0 == k->bodywrites && !is_empty_data) {
526 /* These checks are only made the first time we are about to
527 write a piece of the body */
528 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
529 /* HTTP-only checks */
531 if(data->req.newurl) {
532 if(conn->bits.close) {
533 /* Abort after the headers if "follow Location" is set
534 and we're set to close anyway. */
535 k->keepon &= ~KEEP_RECV;
539 /* We have a new url to load, but since we want to be able
540 to re-use this connection properly, we read the full
541 response in "ignore more" */
542 k->ignorebody = TRUE;
543 infof(data, "Ignoring the response-body\n");
545 if(data->state.resume_from && !k->content_range &&
546 (data->set.httpreq==HTTPREQ_GET) &&
549 if(k->size == data->state.resume_from) {
550 /* The resume point is at the end of file, consider this fine
551 even if it doesn't allow resume from here. */
552 infof(data, "The entire document is already downloaded");
553 connclose(conn, "already downloaded");
555 k->keepon &= ~KEEP_RECV;
560 /* we wanted to resume a download, although the server doesn't
561 * seem to support this and we did this with a GET (if it
562 * wasn't a GET we did a POST or PUT resume) */
563 failf(data, "HTTP server doesn't seem to support "
564 "byte ranges. Cannot resume.");
565 return CURLE_RANGE_ERROR;
568 if(data->set.timecondition && !data->state.range) {
569 /* A time condition has been set AND no ranges have been
570 requested. This seems to be what chapter 13.3.4 of
571 RFC 2616 defines to be the correct action for a
574 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
576 /* We're simulating a http 304 from server so we return
577 what should have been returned from the server */
578 data->info.httpcode = 304;
579 infof(data, "Simulate a HTTP 304 response!\n");
580 /* we abort the transfer before it is completed == we ruin the
581 re-use ability. Close the connection */
582 connclose(conn, "Simulated 304 handling");
585 } /* we have a time condition */
587 } /* this is HTTP or RTSP */
588 } /* this is the first time we write a body part */
589 #endif /* CURL_DISABLE_HTTP */
593 /* pass data to the debug function before it gets "dechunked" */
594 if(data->set.verbose) {
596 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
597 (size_t)k->hbuflen, conn);
598 if(k->badheader == HEADER_PARTHEADER)
599 Curl_debug(data, CURLINFO_DATA_IN,
600 k->str, (size_t)nread, conn);
603 Curl_debug(data, CURLINFO_DATA_IN,
604 k->str, (size_t)nread, conn);
607 #ifndef CURL_DISABLE_HTTP
610 * Here comes a chunked transfer flying and we need to decode this
611 * properly. While the name says read, this function both reads
612 * and writes away the data. The returned 'nread' holds the number
613 * of actual data it wrote to the client.
617 Curl_httpchunk_read(conn, k->str, nread, &nread);
619 if(CHUNKE_OK < res) {
620 if(CHUNKE_WRITE_ERROR == res) {
621 failf(data, "Failed writing data");
622 return CURLE_WRITE_ERROR;
624 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
625 return CURLE_RECV_ERROR;
627 else if(CHUNKE_STOP == res) {
629 /* we're done reading chunks! */
630 k->keepon &= ~KEEP_RECV; /* read no more */
632 /* There are now possibly N number of bytes at the end of the
633 str buffer that weren't written to the client.
635 We DO care about this data if we are pipelining.
636 Push it back to be read on the next pass. */
638 dataleft = conn->chunk.dataleft;
640 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
642 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
643 /* only attempt the rewind if we truly are pipelining */
644 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
645 read_rewind(conn, dataleft);
649 /* If it returned OK, we just keep going */
651 #endif /* CURL_DISABLE_HTTP */
653 /* Account for body content stored in the header buffer */
654 if(k->badheader && !k->ignorebody) {
655 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
657 k->bytecount += k->hbuflen;
660 if((-1 != k->maxdownload) &&
661 (k->bytecount + nread >= k->maxdownload)) {
663 excess = (size_t)(k->bytecount + nread - k->maxdownload);
664 if(excess > 0 && !k->ignorebody) {
665 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
666 /* The 'excess' amount below can't be more than BUFSIZE which
667 always will fit in a size_t */
669 "Rewinding stream by : %zu"
670 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
671 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
672 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
673 excess, data->state.path,
674 k->size, k->maxdownload, k->bytecount, nread);
675 read_rewind(conn, excess);
679 "Excess found in a non pipelined read:"
681 ", size = %" CURL_FORMAT_CURL_OFF_T
682 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
683 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
684 excess, k->size, k->maxdownload, k->bytecount);
688 nread = (ssize_t) (k->maxdownload - k->bytecount);
689 if(nread < 0 ) /* this should be unusual */
692 k->keepon &= ~KEEP_RECV; /* we're done reading */
695 k->bytecount += nread;
697 Curl_pgrsSetDownloadCounter(data, k->bytecount);
699 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
700 /* If this is chunky transfer, it was already written */
702 if(k->badheader && !k->ignorebody) {
703 /* we parsed a piece of data wrongly assuming it was a header
704 and now we output it as body instead */
706 /* Don't let excess data pollute body writes */
707 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
708 result = Curl_client_write(conn, CLIENTWRITE_BODY,
709 data->state.headerbuff,
712 result = Curl_client_write(conn, CLIENTWRITE_BODY,
713 data->state.headerbuff,
714 (size_t)k->maxdownload);
719 if(k->badheader < HEADER_ALLBAD) {
720 /* This switch handles various content encodings. If there's an
721 error here, be sure to check over the almost identical code
723 Make sure that ALL_CONTENT_ENCODINGS contains all the
724 encodings handled here. */
726 switch (conn->data->set.http_ce_skip ?
727 IDENTITY : k->auto_decoding) {
730 /* This is the default when the server sends no
731 Content-Encoding header. See Curl_readwrite_init; the
732 memset() call initializes k->auto_decoding to zero. */
735 #ifndef CURL_DISABLE_POP3
736 if(conn->handler->protocol&PROTO_FAMILY_POP3)
737 result = Curl_pop3_write(conn, k->str, nread);
739 #endif /* CURL_DISABLE_POP3 */
741 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
748 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
750 result = Curl_unencode_deflate_write(conn, k, nread);
754 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
756 result = Curl_unencode_gzip_write(conn, k, nread);
760 failf (data, "Unrecognized content encoding type. "
761 "libcurl understands `identity', `deflate' and `gzip' "
762 "content encodings.");
763 result = CURLE_BAD_CONTENT_ENCODING;
768 k->badheader = HEADER_NORMAL; /* taken care of now */
774 } /* if(! header and data to read ) */
776 if(conn->handler->readwrite &&
777 (excess > 0 && !conn->bits.stream_was_rewound)) {
778 /* Parse the excess data */
780 nread = (ssize_t)excess;
782 result = conn->handler->readwrite(data, conn, &nread, &readmore);
787 k->keepon |= KEEP_RECV; /* we're not done reading */
792 /* if we received nothing, the server closed the connection and we
794 k->keepon &= ~KEEP_RECV;
797 } while(data_pending(conn));
799 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
801 /* When we've read the entire thing and the close bit is set, the server
802 may now close the connection. If there's now any kind of sending going
803 on from our side, we need to stop that immediately. */
804 infof(data, "we are done reading and this is set to close, stop send\n");
805 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
812 * Send data to upload to the server, when the socket is writable.
814 static CURLcode readwrite_upload(struct SessionHandle *data,
815 struct connectdata *conn,
816 struct SingleRequest *k,
820 ssize_t bytes_written;
822 ssize_t nread; /* number of bytes read */
823 bool sending_http_headers = FALSE;
825 if((k->bytecount == 0) && (k->writebytecount == 0))
826 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
828 *didwhat |= KEEP_SEND;
832 /* only read more data if there's no upload data already
833 present in the upload buffer */
834 if(0 == data->req.upload_present) {
835 /* init the "upload from here" pointer */
836 data->req.upload_fromhere = k->uploadbuf;
838 if(!k->upload_done) {
839 /* HTTP pollution, this should be written nicer to become more
840 protocol agnostic. */
842 struct HTTP *http = data->req.protop;
844 if((k->exp100 == EXP100_SENDING_REQUEST) &&
845 (http->sending == HTTPSEND_BODY)) {
846 /* If this call is to send body data, we must take some action:
847 We have sent off the full HTTP 1.1 request, and we shall now
848 go into the Expect: 100 state and await such a header */
849 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
850 k->keepon &= ~KEEP_SEND; /* disable writing */
851 k->start100 = Curl_tvnow(); /* timeout count starts now */
852 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
854 /* set a timeout for the multi interface */
855 Curl_expire(data, data->set.expect_100_timeout);
859 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
860 if(http->sending == HTTPSEND_REQUEST)
861 /* We're sending the HTTP request headers, not the data.
862 Remember that so we don't change the line endings. */
863 sending_http_headers = TRUE;
865 sending_http_headers = FALSE;
868 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
872 nread = (ssize_t)fillcount;
875 nread = 0; /* we're done uploading/reading */
877 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
878 /* this is a paused transfer */
883 k->keepon &= ~KEEP_SEND; /* we're done writing */
885 if(conn->bits.rewindaftersend) {
886 result = Curl_readrewind(conn);
893 /* store number of bytes available for upload */
894 data->req.upload_present = nread;
896 /* convert LF to CRLF if so asked */
897 if((!sending_http_headers) && (
898 #ifdef CURL_DO_LINEEND_CONV
899 /* always convert if we're FTPing in ASCII mode */
900 (data->set.prefer_ascii) ||
903 /* Do we need to allocate a scratch buffer? */
904 if(!data->state.scratch) {
905 data->state.scratch = malloc(2 * BUFSIZE);
906 if(!data->state.scratch) {
907 failf(data, "Failed to alloc scratch buffer!");
909 return CURLE_OUT_OF_MEMORY;
914 * ASCII/EBCDIC Note: This is presumably a text (not binary)
915 * transfer so the data should already be in ASCII.
916 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
917 * must be used instead of the escape sequences \r & \n.
919 for(i = 0, si = 0; i < nread; i++, si++) {
920 if(data->req.upload_fromhere[i] == 0x0a) {
921 data->state.scratch[si++] = 0x0d;
922 data->state.scratch[si] = 0x0a;
923 if(!data->set.crlf) {
924 /* we're here only because FTP is in ASCII mode...
925 bump infilesize for the LF we just added */
926 data->state.infilesize++;
930 data->state.scratch[si] = data->req.upload_fromhere[i];
934 /* only perform the special operation if we really did replace
938 /* upload from the new (replaced) buffer instead */
939 data->req.upload_fromhere = data->state.scratch;
941 /* set the new amount too */
942 data->req.upload_present = nread;
946 #ifndef CURL_DISABLE_SMTP
947 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
948 result = Curl_smtp_escape_eob(conn, nread);
952 #endif /* CURL_DISABLE_SMTP */
953 } /* if 0 == data->req.upload_present */
955 /* We have a partial buffer left from a previous "round". Use
956 that instead of reading more data */
959 /* write to socket (send away data) */
960 result = Curl_write(conn,
961 conn->writesockfd, /* socket to send to */
962 data->req.upload_fromhere, /* buffer pointer */
963 data->req.upload_present, /* buffer size */
964 &bytes_written); /* actually sent */
969 if(data->set.verbose)
970 /* show the data before we change the pointer upload_fromhere */
971 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
972 (size_t)bytes_written, conn);
974 k->writebytecount += bytes_written;
976 if(k->writebytecount == data->state.infilesize) {
977 /* we have sent all data we were supposed to */
978 k->upload_done = TRUE;
979 infof(data, "We are completely uploaded and fine\n");
982 if(data->req.upload_present != bytes_written) {
983 /* we only wrote a part of the buffer (if anything), deal with it! */
985 /* store the amount of bytes left in the buffer to write */
986 data->req.upload_present -= bytes_written;
988 /* advance the pointer where to find the buffer when the next send
990 data->req.upload_fromhere += bytes_written;
993 /* we've uploaded that buffer now */
994 data->req.upload_fromhere = k->uploadbuf;
995 data->req.upload_present = 0; /* no more bytes left */
998 /* switch off writing, we're done! */
999 k->keepon &= ~KEEP_SEND; /* we're done writing */
1003 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1005 } WHILE_FALSE; /* just to break out from! */
1011 * Curl_readwrite() is the low-level function to be called when data is to
1012 * be read and written to/from the connection.
1014 CURLcode Curl_readwrite(struct connectdata *conn,
1015 struct SessionHandle *data,
1018 struct SingleRequest *k = &data->req;
1022 curl_socket_t fd_read;
1023 curl_socket_t fd_write;
1024 int select_res = conn->cselect_bits;
1026 conn->cselect_bits = 0;
1028 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1029 then we are in rate limiting state in that transfer direction */
1031 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1032 fd_read = conn->sockfd;
1034 fd_read = CURL_SOCKET_BAD;
1036 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1037 fd_write = conn->writesockfd;
1039 fd_write = CURL_SOCKET_BAD;
1041 if(conn->data->state.drain) {
1042 select_res |= CURL_CSELECT_IN;
1043 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1046 if(!select_res) /* Call for select()/poll() only, if read/write/error
1047 status is not known. */
1048 select_res = Curl_socket_ready(fd_read, fd_write, 0);
1050 if(select_res == CURL_CSELECT_ERR) {
1051 failf(data, "select/poll returned error");
1052 return CURLE_SEND_ERROR;
1055 /* We go ahead and do a read if we have a readable socket or if
1056 the stream was rewound (in which case we have data in a
1058 if((k->keepon & KEEP_RECV) &&
1059 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1061 result = readwrite_data(data, conn, k, &didwhat, done);
1066 /* If we still have writing to do, we check if we have a writable socket. */
1067 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1070 result = readwrite_upload(data, conn, k, &didwhat);
1075 k->now = Curl_tvnow();
1077 /* Update read/write counters */
1079 *k->bytecountp = k->bytecount; /* read count */
1080 if(k->writebytecountp)
1081 *k->writebytecountp = k->writebytecount; /* write count */
1084 /* no read no write, this is a timeout? */
1085 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1086 /* This should allow some time for the header to arrive, but only a
1087 very short time as otherwise it'll be too much wasted time too
1090 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1092 Therefore, when a client sends this header field to an origin server
1093 (possibly via a proxy) from which it has never seen a 100 (Continue)
1094 status, the client SHOULD NOT wait for an indefinite period before
1095 sending the request body.
1099 long ms = Curl_tvdiff(k->now, k->start100);
1100 if(ms >= data->set.expect_100_timeout) {
1101 /* we've waited long enough, continue anyway */
1102 k->exp100 = EXP100_SEND_DATA;
1103 k->keepon |= KEEP_SEND;
1104 infof(data, "Done waiting for 100-continue\n");
1109 if(Curl_pgrsUpdate(conn))
1110 result = CURLE_ABORTED_BY_CALLBACK;
1112 result = Curl_speedcheck(data, k->now);
1117 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1119 failf(data, "Operation timed out after %ld milliseconds with %"
1120 CURL_FORMAT_CURL_OFF_T " out of %"
1121 CURL_FORMAT_CURL_OFF_T " bytes received",
1122 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1126 failf(data, "Operation timed out after %ld milliseconds with %"
1127 CURL_FORMAT_CURL_OFF_T " bytes received",
1128 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1130 return CURLE_OPERATION_TIMEDOUT;
1135 * The transfer has been performed. Just make some general checks before
1139 if(!(data->set.opt_no_body) && (k->size != -1) &&
1140 (k->bytecount != k->size) &&
1141 #ifdef CURL_DO_LINEEND_CONV
1142 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1143 so we'll check to see if the discrepancy can be explained
1144 by the number of CRLFs we've changed to LFs.
1146 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1147 #endif /* CURL_DO_LINEEND_CONV */
1148 !data->req.newurl) {
1149 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1150 " bytes remaining to read",
1151 k->size - k->bytecount);
1152 return CURLE_PARTIAL_FILE;
1154 else if(!(data->set.opt_no_body) &&
1156 (conn->chunk.state != CHUNK_STOP)) {
1158 * In chunked mode, return an error if the connection is closed prior to
1159 * the empty (terminating) chunk is read.
1161 * The condition above used to check for
1162 * conn->proto.http->chunk.datasize != 0 which is true after reading
1163 * *any* chunk, not just the empty chunk.
1166 failf(data, "transfer closed with outstanding read data remaining");
1167 return CURLE_PARTIAL_FILE;
1169 if(Curl_pgrsUpdate(conn))
1170 return CURLE_ABORTED_BY_CALLBACK;
1173 /* Now update the "done" boolean we return */
1174 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1175 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1181 * Curl_single_getsock() gets called by the multi interface code when the app
1182 * has requested to get the sockets for the current connection. This function
1183 * will then be called once for every connection that the multi interface
1184 * keeps track of. This function will only be called for connections that are
1185 * in the proper state to have this information available.
1187 int Curl_single_getsock(const struct connectdata *conn,
1188 curl_socket_t *sock, /* points to numsocks number
1192 const struct SessionHandle *data = conn->data;
1193 int bitmap = GETSOCK_BLANK;
1194 unsigned sockindex = 0;
1196 if(conn->handler->perform_getsock)
1197 return conn->handler->perform_getsock(conn, sock, numsocks);
1200 /* simple check but we might need two slots */
1201 return GETSOCK_BLANK;
1203 /* don't include HOLD and PAUSE connections */
1204 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1206 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1208 bitmap |= GETSOCK_READSOCK(sockindex);
1209 sock[sockindex] = conn->sockfd;
1212 /* don't include HOLD and PAUSE connections */
1213 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1215 if((conn->sockfd != conn->writesockfd) ||
1216 bitmap == GETSOCK_BLANK) {
1217 /* only if they are not the same socket and we have a readable
1218 one, we increase index */
1219 if(bitmap != GETSOCK_BLANK)
1220 sockindex++; /* increase index if we need two entries */
1222 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1224 sock[sockindex] = conn->writesockfd;
1227 bitmap |= GETSOCK_WRITESOCK(sockindex);
1234 * Determine optimum sleep time based on configured rate, current rate,
1236 * Returns value in milliseconds.
1238 * The basic idea is to adjust the desired rate up/down in this method
1239 * based on whether we are running too slow or too fast. Then, calculate
1240 * how many milliseconds to wait for the next packet to achieve this new
1243 long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1246 curl_off_t min_sleep = 0;
1252 /* If running faster than about .1% of the desired speed, slow
1253 * us down a bit. Use shift instead of division as the 0.1%
1254 * cutoff is arbitrary anyway.
1256 if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1257 /* running too fast, decrease target rate by 1/64th of rate */
1258 rate_bps -= rate_bps >> 6;
1261 else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1262 /* running too slow, increase target rate by 1/64th of rate */
1263 rate_bps += rate_bps >> 6;
1266 /* Determine number of milliseconds to wait until we do
1267 * the next packet at the adjusted rate. We should wait
1268 * longer when using larger packets, for instance.
1270 rv = ((curl_off_t)(pkt_size * 1000) / rate_bps);
1272 /* Catch rounding errors and always slow down at least 1ms if
1273 * we are running too fast.
1278 /* Bound value to fit in 'long' on 32-bit platform. That's
1279 * plenty long enough anyway!
1288 * Curl_pretransfer() is called immediately before a transfer starts.
1290 CURLcode Curl_pretransfer(struct SessionHandle *data)
1293 if(!data->change.url) {
1294 /* we can't do anything without URL */
1295 failf(data, "No URL set!");
1296 return CURLE_URL_MALFORMAT;
1299 /* Init the SSL session ID cache here. We do it here since we want to do it
1300 after the *_setopt() calls (that could specify the size of the cache) but
1301 before any transfer takes place. */
1302 result = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
1306 data->set.followlocation=0; /* reset the location-follow counter */
1307 data->state.this_is_a_follow = FALSE; /* reset this */
1308 data->state.errorbuf = FALSE; /* no error has occurred */
1309 data->state.httpversion = 0; /* don't assume any particular server version */
1311 data->state.authproblem = FALSE;
1312 data->state.authhost.want = data->set.httpauth;
1313 data->state.authproxy.want = data->set.proxyauth;
1314 Curl_safefree(data->info.wouldredirect);
1315 data->info.wouldredirect = NULL;
1317 if(data->set.httpreq == HTTPREQ_PUT)
1318 data->state.infilesize = data->set.filesize;
1320 data->state.infilesize = data->set.postfieldsize;
1322 /* If there is a list of cookie files to read, do it now! */
1323 if(data->change.cookielist)
1324 Curl_cookie_loadfiles(data);
1326 /* If there is a list of host pairs to deal with */
1327 if(data->change.resolve)
1328 result = Curl_loadhostpairs(data);
1331 /* Allow data->set.use_port to set which port to use. This needs to be
1332 * disabled for example when we follow Location: headers to URLs using
1333 * different ports! */
1334 data->state.allow_port = TRUE;
1336 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1337 /*************************************************************
1338 * Tell signal handler to ignore SIGPIPE
1339 *************************************************************/
1340 if(!data->set.no_signal)
1341 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1344 Curl_initinfo(data); /* reset session-specific information "variables" */
1345 Curl_pgrsResetTimesSizes(data);
1346 Curl_pgrsStartNow(data);
1348 if(data->set.timeout)
1349 Curl_expire(data, data->set.timeout);
1351 if(data->set.connecttimeout)
1352 Curl_expire(data, data->set.connecttimeout);
1354 /* In case the handle is re-used and an authentication method was picked
1355 in the session we need to make sure we only use the one(s) we now
1356 consider to be fine */
1357 data->state.authhost.picked &= data->state.authhost.want;
1358 data->state.authproxy.picked &= data->state.authproxy.want;
1365 * Curl_posttransfer() is called immediately after a transfer ends
1367 CURLcode Curl_posttransfer(struct SessionHandle *data)
1369 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1370 /* restore the signal handler for SIGPIPE before we get back */
1371 if(!data->set.no_signal)
1372 signal(SIGPIPE, data->state.prev_signal);
1374 (void)data; /* unused parameter */
1380 #ifndef CURL_DISABLE_HTTP
1382 * strlen_url() returns the length of the given URL if the spaces within the
1383 * URL were properly URL encoded.
1385 static size_t strlen_url(const char *url)
1389 bool left=TRUE; /* left side of the ? */
1391 for(ptr=url; *ptr; ptr++) {
1410 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1411 * the source URL accordingly.
1413 static void strcpy_url(char *output, const char *url)
1415 /* we must add this with whitespace-replacing */
1418 char *optr = output;
1419 for(iptr = url; /* read from here */
1420 *iptr; /* until zero byte */
1431 *optr++='%'; /* add a '%' */
1432 *optr++='2'; /* add a '2' */
1433 *optr++='0'; /* add a '0' */
1436 *optr++='+'; /* add a '+' here */
1440 *optr=0; /* zero terminate output buffer */
1445 * Returns true if the given URL is absolute (as opposed to relative)
1447 static bool is_absolute_url(const char *url)
1449 char prot[16]; /* URL protocol string storage */
1450 char letter; /* used for a silly sscanf */
1452 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1456 * Concatenate a relative URL to a base URL making it absolute.
1457 * URL-encodes any spaces.
1458 * The returned pointer must be freed by the caller unless NULL
1459 * (returns NULL on out of memory).
1461 static char *concat_url(const char *base, const char *relurl)
1464 TRY to append this new path to the old URL
1465 to the right of the host part. Oh crap, this is doomed to cause
1466 problems in the future...
1473 const char *useurl = relurl;
1476 /* we must make our own copy of the URL to play with, as it may
1477 point to read-only data */
1478 char *url_clone=strdup(base);
1481 return NULL; /* skip out of this NOW */
1483 /* protsep points to the start of the host name */
1484 protsep=strstr(url_clone, "//");
1488 protsep+=2; /* pass the slashes */
1490 if('/' != relurl[0]) {
1493 /* First we need to find out if there's a ?-letter in the URL,
1494 and cut it and the right-side of that off */
1495 pathsep = strchr(protsep, '?');
1499 /* we have a relative path to append to the last slash if there's one
1500 available, or if the new URL is just a query string (starts with a
1501 '?') we append the new one at the end of the entire currently worked
1503 if(useurl[0] != '?') {
1504 pathsep = strrchr(protsep, '/');
1509 /* Check if there's any slash after the host name, and if so, remember
1510 that position instead */
1511 pathsep = strchr(protsep, '/');
1513 protsep = pathsep+1;
1517 /* now deal with one "./" or any amount of "../" in the newurl
1518 and act accordingly */
1520 if((useurl[0] == '.') && (useurl[1] == '/'))
1521 useurl+=2; /* just skip the "./" */
1523 while((useurl[0] == '.') &&
1524 (useurl[1] == '.') &&
1525 (useurl[2] == '/')) {
1527 useurl+=3; /* pass the "../" */
1532 /* cut off one more level from the right of the original URL */
1533 pathsep = strrchr(protsep, '/');
1544 /* We got a new absolute path for this server */
1546 if((relurl[0] == '/') && (relurl[1] == '/')) {
1547 /* the new URL starts with //, just keep the protocol part from the
1550 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1551 skip the new ones */
1554 /* cut off the original URL from the first slash, or deal with URLs
1556 pathsep = strchr(protsep, '/');
1558 /* When people use badly formatted URLs, such as
1559 "http://www.url.com?dir=/home/daniel" we must not use the first
1560 slash, if there's a ?-letter before it! */
1561 char *sep = strchr(protsep, '?');
1562 if(sep && (sep < pathsep))
1567 /* There was no slash. Now, since we might be operating on a badly
1568 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1569 use a slash separator as it is supposed to, we need to check for a
1570 ?-letter as well! */
1571 pathsep = strchr(protsep, '?');
1578 /* If the new part contains a space, this is a mighty stupid redirect
1579 but we still make an effort to do "right". To the left of a '?'
1580 letter we replace each space with %20 while it is replaced with '+'
1581 on the right side of the '?' letter.
1583 newlen = strlen_url(useurl);
1585 urllen = strlen(url_clone);
1587 newest = malloc(urllen + 1 + /* possible slash */
1588 newlen + 1 /* zero byte */);
1591 free(url_clone); /* don't leak this */
1595 /* copy over the root url part */
1596 memcpy(newest, url_clone, urllen);
1598 /* check if we need to append a slash */
1599 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1602 newest[urllen++]='/';
1604 /* then append the new piece on the right side */
1605 strcpy_url(&newest[urllen], useurl);
1611 #endif /* CURL_DISABLE_HTTP */
1614 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1615 * as given by the remote server and set up the new URL to request.
1617 CURLcode Curl_follow(struct SessionHandle *data,
1618 char *newurl, /* this 'newurl' is the Location: string,
1619 and it must be malloc()ed before passed
1621 followtype type) /* see transfer.h */
1623 #ifdef CURL_DISABLE_HTTP
1627 /* Location: following will not happen when HTTP is disabled */
1628 return CURLE_TOO_MANY_REDIRECTS;
1631 /* Location: redirect */
1632 bool disallowport = FALSE;
1634 if(type == FOLLOW_REDIR) {
1635 if((data->set.maxredirs != -1) &&
1636 (data->set.followlocation >= data->set.maxredirs)) {
1637 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1638 return CURLE_TOO_MANY_REDIRECTS;
1641 /* mark the next request as a followed location: */
1642 data->state.this_is_a_follow = TRUE;
1644 data->set.followlocation++; /* count location-followers */
1646 if(data->set.http_auto_referer) {
1647 /* We are asked to automatically set the previous URL as the referer
1648 when we get the next URL. We pick the ->url field, which may or may
1649 not be 100% correct */
1651 if(data->change.referer_alloc) {
1652 Curl_safefree(data->change.referer);
1653 data->change.referer_alloc = FALSE;
1656 data->change.referer = strdup(data->change.url);
1657 if(!data->change.referer)
1658 return CURLE_OUT_OF_MEMORY;
1659 data->change.referer_alloc = TRUE; /* yes, free this later */
1663 if(!is_absolute_url(newurl)) {
1665 *DANG* this is an RFC 2068 violation. The URL is supposed
1666 to be absolute and this doesn't seem to be that!
1668 char *absolute = concat_url(data->change.url, newurl);
1670 return CURLE_OUT_OF_MEMORY;
1675 /* This is an absolute URL, don't allow the custom port number */
1676 disallowport = TRUE;
1678 if(strchr(newurl, ' ')) {
1679 /* This new URL contains at least one space, this is a mighty stupid
1680 redirect but we still make an effort to do "right". */
1682 size_t newlen = strlen_url(newurl);
1684 newest = malloc(newlen+1); /* get memory for this */
1686 return CURLE_OUT_OF_MEMORY;
1687 strcpy_url(newest, newurl); /* create a space-free URL */
1689 free(newurl); /* that was no good */
1690 newurl = newest; /* use this instead now */
1695 if(type == FOLLOW_FAKE) {
1696 /* we're only figuring out the new url if we would've followed locations
1697 but now we're done so we can get out! */
1698 data->info.wouldredirect = newurl;
1703 data->state.allow_port = FALSE;
1705 if(data->change.url_alloc) {
1706 Curl_safefree(data->change.url);
1707 data->change.url_alloc = FALSE;
1710 data->change.url = newurl;
1711 data->change.url_alloc = TRUE;
1712 newurl = NULL; /* don't free! */
1714 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1717 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1718 * differently based on exactly what return code there was.
1720 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1721 * a HTTP (proxy-) authentication scheme other than Basic.
1723 switch(data->info.httpcode) {
1724 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1725 Authorization: XXXX header in the HTTP request code snippet */
1726 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1727 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1728 /* 300 - Multiple Choices */
1729 /* 306 - Not used */
1730 /* 307 - Temporary Redirect */
1731 default: /* for all above (and the unknown ones) */
1732 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1733 * seem to be OK to POST to.
1736 case 301: /* Moved Permanently */
1737 /* (quote from RFC7231, section 6.4.2)
1739 * Note: For historical reasons, a user agent MAY change the request
1740 * method from POST to GET for the subsequent request. If this
1741 * behavior is undesired, the 307 (Temporary Redirect) status code
1742 * can be used instead.
1746 * Many webservers expect this, so these servers often answers to a POST
1747 * request with an error page. To be sure that libcurl gets the page that
1748 * most user agents would get, libcurl has to force GET.
1750 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1751 * can be overridden with CURLOPT_POSTREDIR.
1753 if((data->set.httpreq == HTTPREQ_POST
1754 || data->set.httpreq == HTTPREQ_POST_FORM)
1755 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1756 infof(data, "Switch from POST to GET\n");
1757 data->set.httpreq = HTTPREQ_GET;
1760 case 302: /* Found */
1761 /* (quote from RFC7231, section 6.4.3)
1763 * Note: For historical reasons, a user agent MAY change the request
1764 * method from POST to GET for the subsequent request. If this
1765 * behavior is undesired, the 307 (Temporary Redirect) status code
1766 * can be used instead.
1770 * Many webservers expect this, so these servers often answers to a POST
1771 * request with an error page. To be sure that libcurl gets the page that
1772 * most user agents would get, libcurl has to force GET.
1774 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1775 * can be overridden with CURLOPT_POSTREDIR.
1777 if((data->set.httpreq == HTTPREQ_POST
1778 || data->set.httpreq == HTTPREQ_POST_FORM)
1779 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1780 infof(data, "Switch from POST to GET\n");
1781 data->set.httpreq = HTTPREQ_GET;
1785 case 303: /* See Other */
1786 /* Disable both types of POSTs, unless the user explicitely
1787 asks for POST after POST */
1788 if(data->set.httpreq != HTTPREQ_GET
1789 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1790 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1791 infof(data, "Disables POST, goes with %s\n",
1792 data->set.opt_no_body?"HEAD":"GET");
1795 case 304: /* Not Modified */
1796 /* 304 means we did a conditional request and it was "Not modified".
1797 * We shouldn't get any Location: header in this response!
1800 case 305: /* Use Proxy */
1801 /* (quote from RFC2616, section 10.3.6):
1802 * "The requested resource MUST be accessed through the proxy given
1803 * by the Location field. The Location field gives the URI of the
1804 * proxy. The recipient is expected to repeat this single request
1805 * via the proxy. 305 responses MUST only be generated by origin
1810 Curl_pgrsTime(data, TIMER_REDIRECT);
1811 Curl_pgrsResetTimesSizes(data);
1814 #endif /* CURL_DISABLE_HTTP */
1818 Curl_reconnect_request(struct connectdata **connp)
1820 CURLcode result = CURLE_OK;
1821 struct connectdata *conn = *connp;
1822 struct SessionHandle *data = conn->data;
1824 /* This was a re-use of a connection and we got a write error in the
1825 * DO-phase. Then we DISCONNECT this connection and have another attempt to
1826 * CONNECT and then DO again! The retry cannot possibly find another
1827 * connection to re-use, since we only keep one possible connection for
1830 infof(data, "Re-used connection seems dead, get a new one\n");
1832 connclose(conn, "Reconnect dead connection"); /* enforce close */
1833 result = Curl_done(&conn, result, FALSE); /* we are so done with this */
1835 /* conn may no longer be a good pointer, clear it to avoid mistakes by
1840 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
1841 * here as well. I figure this could happen when the request failed on a FTP
1842 * connection and thus Curl_done() itself tried to use the connection
1843 * (again). Slight Lack of feedback in the report, but I don't think this
1844 * extra check can do much harm.
1846 if(!result || (CURLE_SEND_ERROR == result)) {
1848 bool protocol_done = TRUE;
1850 /* Now, redo the connect and get a new connection */
1851 result = Curl_connect(data, connp, &async, &protocol_done);
1853 /* We have connected or sent away a name resolve query fine */
1855 conn = *connp; /* setup conn to again point to something nice */
1857 /* Now, if async is TRUE here, we need to wait for the name
1859 result = Curl_resolver_wait_resolv(conn, NULL);
1863 /* Resolved, continue with the connection */
1864 result = Curl_async_resolved(conn, &protocol_done);
1874 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1876 NOTE: that the *url is malloc()ed. */
1877 CURLcode Curl_retry_request(struct connectdata *conn,
1880 struct SessionHandle *data = conn->data;
1884 /* if we're talking upload, we can't do the checks below, unless the protocol
1885 is HTTP as when uploading over HTTP we will still get a response */
1886 if(data->set.upload &&
1887 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1890 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1892 !data->set.opt_no_body &&
1893 (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1894 /* We got no data, we attempted to re-use a connection and yet we want a
1895 "body". This might happen if the connection was left alive when we were
1896 done using it before, but that was closed when we wanted to read from
1897 it again. Bad luck. Retry the same request on a fresh connect! */
1898 infof(conn->data, "Connection died, retrying a fresh connect\n");
1899 *url = strdup(conn->data->change.url);
1901 return CURLE_OUT_OF_MEMORY;
1903 connclose(conn, "retry"); /* close this connection */
1904 conn->bits.retry = TRUE; /* mark this as a connection we're about
1905 to retry. Marking it this way should
1906 prevent i.e HTTP transfers to return
1907 error just because nothing has been
1911 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1912 struct HTTP *http = data->req.protop;
1913 if(http->writebytecount)
1914 return Curl_readrewind(conn);
1921 * Curl_setup_transfer() is called to setup some basic properties for the
1922 * upcoming transfer.
1925 Curl_setup_transfer(
1926 struct connectdata *conn, /* connection data */
1927 int sockindex, /* socket index to read from or -1 */
1928 curl_off_t size, /* -1 if unknown at this point */
1929 bool getheader, /* TRUE if header parsing is wanted */
1930 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1931 int writesockindex, /* socket index to write to, it may very well be
1932 the same we read from. -1 disables */
1933 curl_off_t *writecountp /* return number of bytes written or NULL */
1936 struct SessionHandle *data;
1937 struct SingleRequest *k;
1939 DEBUGASSERT(conn != NULL);
1944 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1946 /* now copy all input parameters */
1947 conn->sockfd = sockindex == -1 ?
1948 CURL_SOCKET_BAD : conn->sock[sockindex];
1949 conn->writesockfd = writesockindex == -1 ?
1950 CURL_SOCKET_BAD:conn->sock[writesockindex];
1951 k->getheader = getheader;
1954 k->bytecountp = bytecountp;
1955 k->writebytecountp = writecountp;
1957 /* The code sequence below is placed in this function just because all
1958 necessary input is not always known in do_complete() as this function may
1959 be called after that */
1964 Curl_pgrsSetDownloadSize(data, size);
1966 /* we want header and/or body, if neither then don't do this! */
1967 if(k->getheader || !data->set.opt_no_body) {
1969 if(conn->sockfd != CURL_SOCKET_BAD)
1970 k->keepon |= KEEP_RECV;
1972 if(conn->writesockfd != CURL_SOCKET_BAD) {
1973 struct HTTP *http = data->req.protop;
1976 Even if we require a 100-return code before uploading data, we might
1977 need to write data before that since the REQUEST may not have been
1978 finished sent off just yet.
1980 Thus, we must check if the request has been sent before we set the
1981 state info where we wait for the 100-return code
1983 if((data->state.expect100header) &&
1984 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1985 (http->sending == HTTPSEND_BODY)) {
1986 /* wait with write until we either got 100-continue or a timeout */
1987 k->exp100 = EXP100_AWAITING_CONTINUE;
1988 k->start100 = Curl_tvnow();
1990 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1991 that we don't fire slightly too early and get denied to run. */
1992 Curl_expire(data, data->set.expect_100_timeout);
1995 if(data->state.expect100header)
1996 /* when we've sent off the rest of the headers, we must await a
1997 100-continue but first finish sending the request */
1998 k->exp100 = EXP100_SENDING_REQUEST;
2000 /* enable the write bit when we're not waiting for continue */
2001 k->keepon |= KEEP_SEND;
2003 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2004 } /* if(k->getheader || !data->set.opt_no_body) */