1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2013, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ***************************************************************************/
23 #include "curl_setup.h"
25 #include "strtoofft.h"
29 #ifdef HAVE_NETINET_IN_H
30 #include <netinet/in.h>
35 #ifdef HAVE_ARPA_INET_H
36 #include <arpa/inet.h>
41 #ifdef HAVE_SYS_IOCTL_H
42 #include <sys/ioctl.h>
48 #ifdef HAVE_SYS_PARAM_H
49 #include <sys/param.h>
52 #ifdef HAVE_SYS_SELECT_H
53 #include <sys/select.h>
57 #error "We can't compile without socket() support!"
61 #include <curl/curl.h>
64 #include "content_encoding.h"
68 #include "speedcheck.h"
73 #include "vtls/vtls.h"
74 #include "http_digest.h"
75 #include "curl_ntlm.h"
76 #include "http_negotiate.h"
78 #include "curl_memory.h"
82 #include "non-ascii.h"
84 #define _MPRINTF_REPLACE /* use our functions only */
85 #include <curl/mprintf.h>
87 /* The last #include file should be: */
90 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
93 * This function will call the read callback to fill our buffer with data
96 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
98 struct SessionHandle *data = conn->data;
99 size_t buffersize = (size_t)bytes;
101 #ifdef CURL_DOES_CONVERSIONS
102 bool sending_http_headers = FALSE;
104 if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) {
105 const struct HTTP *http = data->req.protop;
107 if(http->sending == HTTPSEND_REQUEST)
108 /* We're sending the HTTP request headers, not the data.
109 Remember that so we don't re-translate them into garbage. */
110 sending_http_headers = TRUE;
114 if(data->req.upload_chunky) {
115 /* if chunked Transfer-Encoding */
116 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
117 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
120 /* this function returns a size_t, so we typecast to int to prevent warnings
121 with picky compilers */
122 nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
123 buffersize, conn->fread_in);
125 if(nread == CURL_READFUNC_ABORT) {
126 failf(data, "operation aborted by callback");
128 return CURLE_ABORTED_BY_CALLBACK;
130 else if(nread == CURL_READFUNC_PAUSE) {
132 if(conn->handler->flags & PROTOPT_NONETWORK) {
133 /* protocols that work without network cannot be paused. This is
134 actually only FILE:// just now, and it can't pause since the transfer
135 isn't done using the "normal" procedure. */
136 failf(data, "Read callback asked for PAUSE when not supported!");
137 return CURLE_READ_ERROR;
140 struct SingleRequest *k = &data->req;
141 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
142 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
143 if(data->req.upload_chunky) {
144 /* Back out the preallocation done above */
145 data->req.upload_fromhere -= (8 + 2);
149 return CURLE_OK; /* nothing was read */
151 else if((size_t)nread > buffersize) {
152 /* the read function returned a too large value */
154 failf(data, "read function returned funny value");
155 return CURLE_READ_ERROR;
158 if(!data->req.forbidchunk && data->req.upload_chunky) {
159 /* if chunked Transfer-Encoding
165 /* On non-ASCII platforms the <DATA> may or may not be
166 translated based on set.prefer_ascii while the protocol
167 portion must always be translated to the network encoding.
168 To further complicate matters, line end conversion might be
169 done later on, so we need to prevent CRLFs from becoming
170 CRCRLFs if that's the case. To do this we use bare LFs
171 here, knowing they'll become CRLFs later on.
175 const char *endofline_native;
176 const char *endofline_network;
180 #ifdef CURL_DO_LINEEND_CONV
181 (data->set.prefer_ascii) ||
184 /* \n will become \r\n later on */
185 endofline_native = "\n";
186 endofline_network = "\x0a";
189 endofline_native = "\r\n";
190 endofline_network = "\x0d\x0a";
192 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
193 "%x%s", nread, endofline_native);
195 /* move buffer pointer */
196 data->req.upload_fromhere -= hexlen;
199 /* copy the prefix to the buffer, leaving out the NUL */
200 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
202 /* always append ASCII CRLF to the data */
203 memcpy(data->req.upload_fromhere + nread,
205 strlen(endofline_network));
207 #ifdef CURL_DOES_CONVERSIONS
210 if(data->set.prefer_ascii) {
211 /* translate the protocol and data */
215 /* just translate the protocol portion */
216 length = strlen(hexbuffer);
218 res = Curl_convert_to_network(data, data->req.upload_fromhere, length);
219 /* Curl_convert_to_network calls failf if unsuccessful */
222 #endif /* CURL_DOES_CONVERSIONS */
224 if((nread - hexlen) == 0)
225 /* mark this as done once this chunk is transferred */
226 data->req.upload_done = TRUE;
228 nread+=(int)strlen(endofline_native); /* for the added end of line */
230 #ifdef CURL_DOES_CONVERSIONS
231 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
233 res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
234 /* Curl_convert_to_network calls failf if unsuccessful */
238 #endif /* CURL_DOES_CONVERSIONS */
247 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
248 * POST/PUT with multi-pass authentication when a sending was denied and a
249 * resend is necessary.
251 CURLcode Curl_readrewind(struct connectdata *conn)
253 struct SessionHandle *data = conn->data;
255 conn->bits.rewindaftersend = FALSE; /* we rewind now */
257 /* explicitly switch off sending data on this connection now since we are
258 about to restart a new transfer and thus we want to avoid inadvertently
259 sending more data on the existing connection until the next transfer
261 data->req.keepon &= ~KEEP_SEND;
263 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
264 CURLOPT_HTTPPOST, call app to rewind
266 if(data->set.postfields ||
267 (data->set.httpreq == HTTPREQ_POST_FORM))
270 if(data->set.seek_func) {
273 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
275 failf(data, "seek callback returned error %d", (int)err);
276 return CURLE_SEND_FAIL_REWIND;
279 else if(data->set.ioctl_func) {
282 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
283 data->set.ioctl_client);
284 infof(data, "the ioctl callback returned %d\n", (int)err);
287 /* FIXME: convert to a human readable error message */
288 failf(data, "ioctl callback returned error %d", (int)err);
289 return CURLE_SEND_FAIL_REWIND;
293 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
294 given FILE * stream and we can actually attempt to rewind that
295 ourselves with fseek() */
296 if(data->set.fread_func == (curl_read_callback)fread) {
297 if(-1 != fseek(data->set.in, 0, SEEK_SET))
298 /* successful rewind */
302 /* no callback set or failure above, makes us fail at once */
303 failf(data, "necessary data rewind wasn't possible");
304 return CURLE_SEND_FAIL_REWIND;
310 static int data_pending(const struct connectdata *conn)
312 /* in the case of libssh2, we can never be really sure that we have emptied
313 its internal buffers so we MUST always try until we get EAGAIN back */
314 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
315 Curl_ssl_data_pending(conn, FIRSTSOCKET);
318 static void read_rewind(struct connectdata *conn,
321 DEBUGASSERT(conn->read_pos >= thismuch);
323 conn->read_pos -= thismuch;
324 conn->bits.stream_was_rewound = TRUE;
331 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
332 if(conn->master_buffer) {
333 memcpy(buf, conn->master_buffer + conn->read_pos, show);
340 DEBUGF(infof(conn->data,
341 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
342 conn->read_pos, buf));
348 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
349 * remote document with the time provided by CURLOPT_TIMEVAL
351 bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc)
353 if((timeofdoc == 0) || (data->set.timevalue == 0))
356 switch(data->set.timecondition) {
357 case CURL_TIMECOND_IFMODSINCE:
359 if(timeofdoc <= data->set.timevalue) {
361 "The requested document is not new enough\n");
362 data->info.timecond = TRUE;
366 case CURL_TIMECOND_IFUNMODSINCE:
367 if(timeofdoc >= data->set.timevalue) {
369 "The requested document is not old enough\n");
370 data->info.timecond = TRUE;
380 * Go ahead and do a read if we have a readable socket or if
381 * the stream was rewound (in which case we have data in a
384 static CURLcode readwrite_data(struct SessionHandle *data,
385 struct connectdata *conn,
386 struct SingleRequest *k,
387 int *didwhat, bool *done)
389 CURLcode result = CURLE_OK;
390 ssize_t nread; /* number of bytes read */
391 size_t excess = 0; /* excess bytes read */
392 bool is_empty_data = FALSE;
393 bool readmore = FALSE; /* used by RTP to signal for more data */
397 /* This is where we loop until we have read everything there is to
398 read or we get a CURLE_AGAIN */
400 size_t buffersize = data->set.buffer_size?
401 data->set.buffer_size : BUFSIZE;
402 size_t bytestoread = buffersize;
404 if(k->size != -1 && !k->header) {
405 /* make sure we don't read "too much" if we can help it since we
406 might be pipelining and then someone else might want to read what
408 curl_off_t totalleft = k->size - k->bytecount;
409 if(totalleft < (curl_off_t)bytestoread)
410 bytestoread = (size_t)totalleft;
414 /* receive data from the network! */
415 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
417 /* read would've blocked */
418 if(CURLE_AGAIN == result)
419 break; /* get out of loop */
425 /* read nothing but since we wanted nothing we consider this an OK
426 situation to proceed from */
430 if((k->bytecount == 0) && (k->writebytecount == 0)) {
431 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
432 if(k->exp100 > EXP100_SEND_DATA)
433 /* set time stamp to compare with when waiting for the 100 */
434 k->start100 = Curl_tvnow();
437 *didwhat |= KEEP_RECV;
438 /* indicates data of zero size, i.e. empty file */
439 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
441 /* NUL terminate, allowing string ops to be used */
442 if(0 < nread || is_empty_data) {
445 else if(0 >= nread) {
446 /* if we receive 0 or less here, the server closed the connection
447 and we bail out from this! */
448 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
449 k->keepon &= ~KEEP_RECV;
453 /* Default buffer to use when we write the buffer, it may be changed
454 in the flow below before the actual storing is done. */
457 if(conn->handler->readwrite) {
458 result = conn->handler->readwrite(data, conn, &nread, &readmore);
465 #ifndef CURL_DISABLE_HTTP
466 /* Since this is a two-state thing, we check if we are parsing
467 headers at the moment or not. */
469 /* we are in parse-the-header-mode */
470 bool stop_reading = FALSE;
471 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
475 if(conn->handler->readwrite &&
476 (k->maxdownload <= 0 && nread > 0)) {
477 result = conn->handler->readwrite(data, conn, &nread, &readmore);
485 /* We've stopped dealing with input, get out of the do-while loop */
488 if(Curl_multi_pipeline_enabled(conn->data->multi)) {
490 "Rewinding stream by : %zd"
491 " bytes on url %s (zero-length body)\n",
492 nread, data->state.path);
493 read_rewind(conn, (size_t)nread);
497 "Excess found in a non pipelined read:"
499 " url = %s (zero-length body)\n",
500 nread, data->state.path);
507 #endif /* CURL_DISABLE_HTTP */
510 /* This is not an 'else if' since it may be a rest from the header
511 parsing, where the beginning of the buffer is headers and the end
513 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
515 #ifndef CURL_DISABLE_HTTP
516 if(0 == k->bodywrites && !is_empty_data) {
517 /* These checks are only made the first time we are about to
518 write a piece of the body */
519 if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) {
520 /* HTTP-only checks */
522 if(data->req.newurl) {
523 if(conn->bits.close) {
524 /* Abort after the headers if "follow Location" is set
525 and we're set to close anyway. */
526 k->keepon &= ~KEEP_RECV;
530 /* We have a new url to load, but since we want to be able
531 to re-use this connection properly, we read the full
532 response in "ignore more" */
533 k->ignorebody = TRUE;
534 infof(data, "Ignoring the response-body\n");
536 if(data->state.resume_from && !k->content_range &&
537 (data->set.httpreq==HTTPREQ_GET) &&
539 /* we wanted to resume a download, although the server doesn't
540 * seem to support this and we did this with a GET (if it
541 * wasn't a GET we did a POST or PUT resume) */
542 failf(data, "HTTP server doesn't seem to support "
543 "byte ranges. Cannot resume.");
544 return CURLE_RANGE_ERROR;
547 if(data->set.timecondition && !data->state.range) {
548 /* A time condition has been set AND no ranges have been
549 requested. This seems to be what chapter 13.3.4 of
550 RFC 2616 defines to be the correct action for a
553 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
555 /* We're simulating a http 304 from server so we return
556 what should have been returned from the server */
557 data->info.httpcode = 304;
558 infof(data, "Simulate a HTTP 304 response!\n");
559 /* we abort the transfer before it is completed == we ruin the
560 re-use ability. Close the connection */
561 conn->bits.close = TRUE;
564 } /* we have a time condition */
566 } /* this is HTTP or RTSP */
567 } /* this is the first time we write a body part */
568 #endif /* CURL_DISABLE_HTTP */
572 /* pass data to the debug function before it gets "dechunked" */
573 if(data->set.verbose) {
575 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
576 (size_t)k->hbuflen, conn);
577 if(k->badheader == HEADER_PARTHEADER)
578 Curl_debug(data, CURLINFO_DATA_IN,
579 k->str, (size_t)nread, conn);
582 Curl_debug(data, CURLINFO_DATA_IN,
583 k->str, (size_t)nread, conn);
586 #ifndef CURL_DISABLE_HTTP
589 * Here comes a chunked transfer flying and we need to decode this
590 * properly. While the name says read, this function both reads
591 * and writes away the data. The returned 'nread' holds the number
592 * of actual data it wrote to the client.
596 Curl_httpchunk_read(conn, k->str, nread, &nread);
598 if(CHUNKE_OK < res) {
599 if(CHUNKE_WRITE_ERROR == res) {
600 failf(data, "Failed writing data");
601 return CURLE_WRITE_ERROR;
603 failf(data, "Problem (%d) in the Chunked-Encoded data", (int)res);
604 return CURLE_RECV_ERROR;
606 else if(CHUNKE_STOP == res) {
608 /* we're done reading chunks! */
609 k->keepon &= ~KEEP_RECV; /* read no more */
611 /* There are now possibly N number of bytes at the end of the
612 str buffer that weren't written to the client.
614 We DO care about this data if we are pipelining.
615 Push it back to be read on the next pass. */
617 dataleft = conn->chunk.dataleft;
619 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
621 if(Curl_multi_pipeline_enabled(conn->data->multi)) {
622 /* only attempt the rewind if we truly are pipelining */
623 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
624 read_rewind(conn, dataleft);
628 /* If it returned OK, we just keep going */
630 #endif /* CURL_DISABLE_HTTP */
632 /* Account for body content stored in the header buffer */
633 if(k->badheader && !k->ignorebody) {
634 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
636 k->bytecount += k->hbuflen;
639 if((-1 != k->maxdownload) &&
640 (k->bytecount + nread >= k->maxdownload)) {
642 excess = (size_t)(k->bytecount + nread - k->maxdownload);
643 if(excess > 0 && !k->ignorebody) {
644 if(Curl_multi_pipeline_enabled(conn->data->multi)) {
645 /* The 'excess' amount below can't be more than BUFSIZE which
646 always will fit in a size_t */
648 "Rewinding stream by : %zu"
649 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
650 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
651 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
652 excess, data->state.path,
653 k->size, k->maxdownload, k->bytecount, nread);
654 read_rewind(conn, excess);
658 "Excess found in a non pipelined read:"
660 ", size = %" CURL_FORMAT_CURL_OFF_T
661 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
662 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
663 excess, k->size, k->maxdownload, k->bytecount);
667 nread = (ssize_t) (k->maxdownload - k->bytecount);
668 if(nread < 0 ) /* this should be unusual */
671 k->keepon &= ~KEEP_RECV; /* we're done reading */
674 k->bytecount += nread;
676 Curl_pgrsSetDownloadCounter(data, k->bytecount);
678 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
679 /* If this is chunky transfer, it was already written */
681 if(k->badheader && !k->ignorebody) {
682 /* we parsed a piece of data wrongly assuming it was a header
683 and now we output it as body instead */
685 /* Don't let excess data pollute body writes */
686 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
687 result = Curl_client_write(conn, CLIENTWRITE_BODY,
688 data->state.headerbuff,
691 result = Curl_client_write(conn, CLIENTWRITE_BODY,
692 data->state.headerbuff,
693 (size_t)k->maxdownload);
698 if(k->badheader < HEADER_ALLBAD) {
699 /* This switch handles various content encodings. If there's an
700 error here, be sure to check over the almost identical code
702 Make sure that ALL_CONTENT_ENCODINGS contains all the
703 encodings handled here. */
705 switch (conn->data->set.http_ce_skip ?
706 IDENTITY : k->auto_decoding) {
709 /* This is the default when the server sends no
710 Content-Encoding header. See Curl_readwrite_init; the
711 memset() call initializes k->auto_decoding to zero. */
714 #ifndef CURL_DISABLE_POP3
715 if(conn->handler->protocol&CURLPROTO_POP3)
716 result = Curl_pop3_write(conn, k->str, nread);
718 #endif /* CURL_DISABLE_POP3 */
720 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
727 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
729 result = Curl_unencode_deflate_write(conn, k, nread);
733 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
735 result = Curl_unencode_gzip_write(conn, k, nread);
740 failf (data, "Unrecognized content encoding type. "
741 "libcurl understands `identity', `deflate' and `gzip' "
742 "content encodings.");
743 result = CURLE_BAD_CONTENT_ENCODING;
748 k->badheader = HEADER_NORMAL; /* taken care of now */
754 } /* if(! header and data to read ) */
756 if(conn->handler->readwrite &&
757 (excess > 0 && !conn->bits.stream_was_rewound)) {
758 /* Parse the excess data */
760 nread = (ssize_t)excess;
762 result = conn->handler->readwrite(data, conn, &nread, &readmore);
767 k->keepon |= KEEP_RECV; /* we're not done reading */
772 /* if we received nothing, the server closed the connection and we
774 k->keepon &= ~KEEP_RECV;
777 } while(data_pending(conn));
779 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
781 /* When we've read the entire thing and the close bit is set, the server
782 may now close the connection. If there's now any kind of sending going
783 on from our side, we need to stop that immediately. */
784 infof(data, "we are done reading and this is set to close, stop send\n");
785 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
792 * Send data to upload to the server, when the socket is writable.
794 static CURLcode readwrite_upload(struct SessionHandle *data,
795 struct connectdata *conn,
796 struct SingleRequest *k,
800 ssize_t bytes_written;
802 ssize_t nread; /* number of bytes read */
803 bool sending_http_headers = FALSE;
805 if((k->bytecount == 0) && (k->writebytecount == 0))
806 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
808 *didwhat |= KEEP_SEND;
811 * We loop here to do the READ and SEND loop until we run out of
812 * data to send or until we get EWOULDBLOCK back
814 * FIXME: above comment is misleading. Currently no looping is
815 * actually done in do-while loop below.
819 /* only read more data if there's no upload data already
820 present in the upload buffer */
821 if(0 == data->req.upload_present) {
822 /* init the "upload from here" pointer */
823 data->req.upload_fromhere = k->uploadbuf;
825 if(!k->upload_done) {
826 /* HTTP pollution, this should be written nicer to become more
827 protocol agnostic. */
829 struct HTTP *http = data->req.protop;
831 if((k->exp100 == EXP100_SENDING_REQUEST) &&
832 (http->sending == HTTPSEND_BODY)) {
833 /* If this call is to send body data, we must take some action:
834 We have sent off the full HTTP 1.1 request, and we shall now
835 go into the Expect: 100 state and await such a header */
836 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
837 k->keepon &= ~KEEP_SEND; /* disable writing */
838 k->start100 = Curl_tvnow(); /* timeout count starts now */
839 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
841 /* set a timeout for the multi interface */
842 Curl_expire(data, CURL_TIMEOUT_EXPECT_100);
846 if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) {
847 if(http->sending == HTTPSEND_REQUEST)
848 /* We're sending the HTTP request headers, not the data.
849 Remember that so we don't change the line endings. */
850 sending_http_headers = TRUE;
852 sending_http_headers = FALSE;
855 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
859 nread = (ssize_t)fillcount;
862 nread = 0; /* we're done uploading/reading */
864 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
865 /* this is a paused transfer */
870 k->keepon &= ~KEEP_SEND; /* we're done writing */
872 if(conn->bits.rewindaftersend) {
873 result = Curl_readrewind(conn);
880 /* store number of bytes available for upload */
881 data->req.upload_present = nread;
883 #ifndef CURL_DISABLE_SMTP
884 if(conn->handler->protocol & CURLPROTO_SMTP) {
885 result = Curl_smtp_escape_eob(conn, nread);
890 #endif /* CURL_DISABLE_SMTP */
892 /* convert LF to CRLF if so asked */
893 if((!sending_http_headers) && (
894 #ifdef CURL_DO_LINEEND_CONV
895 /* always convert if we're FTPing in ASCII mode */
896 (data->set.prefer_ascii) ||
899 if(data->state.scratch == NULL)
900 data->state.scratch = malloc(2*BUFSIZE);
901 if(data->state.scratch == NULL) {
902 failf (data, "Failed to alloc scratch buffer!");
903 return CURLE_OUT_OF_MEMORY;
906 * ASCII/EBCDIC Note: This is presumably a text (not binary)
907 * transfer so the data should already be in ASCII.
908 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
909 * must be used instead of the escape sequences \r & \n.
911 for(i = 0, si = 0; i < nread; i++, si++) {
912 if(data->req.upload_fromhere[i] == 0x0a) {
913 data->state.scratch[si++] = 0x0d;
914 data->state.scratch[si] = 0x0a;
915 if(!data->set.crlf) {
916 /* we're here only because FTP is in ASCII mode...
917 bump infilesize for the LF we just added */
918 data->set.infilesize++;
922 data->state.scratch[si] = data->req.upload_fromhere[i];
925 /* only perform the special operation if we really did replace
929 /* upload from the new (replaced) buffer instead */
930 data->req.upload_fromhere = data->state.scratch;
932 /* set the new amount too */
933 data->req.upload_present = nread;
936 } /* if 0 == data->req.upload_present */
938 /* We have a partial buffer left from a previous "round". Use
939 that instead of reading more data */
942 /* write to socket (send away data) */
943 result = Curl_write(conn,
944 conn->writesockfd, /* socket to send to */
945 data->req.upload_fromhere, /* buffer pointer */
946 data->req.upload_present, /* buffer size */
947 &bytes_written); /* actually sent */
952 if(data->set.verbose)
953 /* show the data before we change the pointer upload_fromhere */
954 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
955 (size_t)bytes_written, conn);
957 k->writebytecount += bytes_written;
959 if(k->writebytecount == data->set.infilesize) {
960 /* we have sent all data we were supposed to */
961 k->upload_done = TRUE;
962 infof(data, "We are completely uploaded and fine\n");
965 if(data->req.upload_present != bytes_written) {
966 /* we only wrote a part of the buffer (if anything), deal with it! */
968 /* store the amount of bytes left in the buffer to write */
969 data->req.upload_present -= bytes_written;
971 /* advance the pointer where to find the buffer when the next send
973 data->req.upload_fromhere += bytes_written;
976 /* we've uploaded that buffer now */
977 data->req.upload_fromhere = k->uploadbuf;
978 data->req.upload_present = 0; /* no more bytes left */
981 /* switch off writing, we're done! */
982 k->keepon &= ~KEEP_SEND; /* we're done writing */
986 Curl_pgrsSetUploadCounter(data, k->writebytecount);
988 } WHILE_FALSE; /* just to break out from! */
994 * Curl_readwrite() is the low-level function to be called when data is to
995 * be read and written to/from the connection.
997 CURLcode Curl_readwrite(struct connectdata *conn,
1000 struct SessionHandle *data = conn->data;
1001 struct SingleRequest *k = &data->req;
1005 curl_socket_t fd_read;
1006 curl_socket_t fd_write;
1007 int select_res = conn->cselect_bits;
1009 conn->cselect_bits = 0;
1011 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1012 then we are in rate limiting state in that transfer direction */
1014 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1015 fd_read = conn->sockfd;
1017 fd_read = CURL_SOCKET_BAD;
1019 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1020 fd_write = conn->writesockfd;
1022 fd_write = CURL_SOCKET_BAD;
1024 if(!select_res) /* Call for select()/poll() only, if read/write/error
1025 status is not known. */
1026 select_res = Curl_socket_ready(fd_read, fd_write, 0);
1028 if(select_res == CURL_CSELECT_ERR) {
1029 failf(data, "select/poll returned error");
1030 return CURLE_SEND_ERROR;
1033 /* We go ahead and do a read if we have a readable socket or if
1034 the stream was rewound (in which case we have data in a
1036 if((k->keepon & KEEP_RECV) &&
1037 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1039 result = readwrite_data(data, conn, k, &didwhat, done);
1044 /* If we still have writing to do, we check if we have a writable socket. */
1045 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1048 result = readwrite_upload(data, conn, k, &didwhat);
1053 k->now = Curl_tvnow();
1055 /* Update read/write counters */
1057 *k->bytecountp = k->bytecount; /* read count */
1058 if(k->writebytecountp)
1059 *k->writebytecountp = k->writebytecount; /* write count */
1062 /* no read no write, this is a timeout? */
1063 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1064 /* This should allow some time for the header to arrive, but only a
1065 very short time as otherwise it'll be too much wasted time too
1068 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1070 Therefore, when a client sends this header field to an origin server
1071 (possibly via a proxy) from which it has never seen a 100 (Continue)
1072 status, the client SHOULD NOT wait for an indefinite period before
1073 sending the request body.
1077 long ms = Curl_tvdiff(k->now, k->start100);
1078 if(ms > CURL_TIMEOUT_EXPECT_100) {
1079 /* we've waited long enough, continue anyway */
1080 k->exp100 = EXP100_SEND_DATA;
1081 k->keepon |= KEEP_SEND;
1082 infof(data, "Done waiting for 100-continue\n");
1087 if(Curl_pgrsUpdate(conn))
1088 result = CURLE_ABORTED_BY_CALLBACK;
1090 result = Curl_speedcheck(data, k->now);
1095 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1097 failf(data, "Operation timed out after %ld milliseconds with %"
1098 CURL_FORMAT_CURL_OFF_T " out of %"
1099 CURL_FORMAT_CURL_OFF_T " bytes received",
1100 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1104 failf(data, "Operation timed out after %ld milliseconds with %"
1105 CURL_FORMAT_CURL_OFF_T " bytes received",
1106 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1108 return CURLE_OPERATION_TIMEDOUT;
1113 * The transfer has been performed. Just make some general checks before
1117 if(!(data->set.opt_no_body) && (k->size != -1) &&
1118 (k->bytecount != k->size) &&
1119 #ifdef CURL_DO_LINEEND_CONV
1120 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1121 so we'll check to see if the discrepancy can be explained
1122 by the number of CRLFs we've changed to LFs.
1124 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1125 #endif /* CURL_DO_LINEEND_CONV */
1126 !data->req.newurl) {
1127 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1128 " bytes remaining to read",
1129 k->size - k->bytecount);
1130 return CURLE_PARTIAL_FILE;
1132 else if(!(data->set.opt_no_body) &&
1134 (conn->chunk.state != CHUNK_STOP)) {
1136 * In chunked mode, return an error if the connection is closed prior to
1137 * the empty (terminating) chunk is read.
1139 * The condition above used to check for
1140 * conn->proto.http->chunk.datasize != 0 which is true after reading
1141 * *any* chunk, not just the empty chunk.
1144 failf(data, "transfer closed with outstanding read data remaining");
1145 return CURLE_PARTIAL_FILE;
1147 if(Curl_pgrsUpdate(conn))
1148 return CURLE_ABORTED_BY_CALLBACK;
1151 /* Now update the "done" boolean we return */
1152 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1153 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1159 * Curl_single_getsock() gets called by the multi interface code when the app
1160 * has requested to get the sockets for the current connection. This function
1161 * will then be called once for every connection that the multi interface
1162 * keeps track of. This function will only be called for connections that are
1163 * in the proper state to have this information available.
1165 int Curl_single_getsock(const struct connectdata *conn,
1166 curl_socket_t *sock, /* points to numsocks number
1170 const struct SessionHandle *data = conn->data;
1171 int bitmap = GETSOCK_BLANK;
1172 unsigned sockindex = 0;
1174 if(conn->handler->perform_getsock)
1175 return conn->handler->perform_getsock(conn, sock, numsocks);
1178 /* simple check but we might need two slots */
1179 return GETSOCK_BLANK;
1181 /* don't include HOLD and PAUSE connections */
1182 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1184 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1186 bitmap |= GETSOCK_READSOCK(sockindex);
1187 sock[sockindex] = conn->sockfd;
1190 /* don't include HOLD and PAUSE connections */
1191 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1193 if((conn->sockfd != conn->writesockfd) ||
1194 !(data->req.keepon & KEEP_RECV)) {
1195 /* only if they are not the same socket or we didn't have a readable
1196 one, we increase index */
1197 if(data->req.keepon & KEEP_RECV)
1198 sockindex++; /* increase index if we need two entries */
1200 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1202 sock[sockindex] = conn->writesockfd;
1205 bitmap |= GETSOCK_WRITESOCK(sockindex);
1212 * Determine optimum sleep time based on configured rate, current rate,
1214 * Returns value in milliseconds.
1216 * The basic idea is to adjust the desired rate up/down in this method
1217 * based on whether we are running too slow or too fast. Then, calculate
1218 * how many milliseconds to wait for the next packet to achieve this new
1221 long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1224 curl_off_t min_sleep = 0;
1230 /* If running faster than about .1% of the desired speed, slow
1231 * us down a bit. Use shift instead of division as the 0.1%
1232 * cutoff is arbitrary anyway.
1234 if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1235 /* running too fast, decrease target rate by 1/64th of rate */
1236 rate_bps -= rate_bps >> 6;
1239 else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1240 /* running too slow, increase target rate by 1/64th of rate */
1241 rate_bps += rate_bps >> 6;
1244 /* Determine number of milliseconds to wait until we do
1245 * the next packet at the adjusted rate. We should wait
1246 * longer when using larger packets, for instance.
1248 rv = ((curl_off_t)((pkt_size * 8) * 1000) / rate_bps);
1250 /* Catch rounding errors and always slow down at least 1ms if
1251 * we are running too fast.
1256 /* Bound value to fit in 'long' on 32-bit platform. That's
1257 * plenty long enough anyway!
1266 * Curl_pretransfer() is called immediately before a transfer starts.
1268 CURLcode Curl_pretransfer(struct SessionHandle *data)
1271 if(!data->change.url) {
1272 /* we can't do anything without URL */
1273 failf(data, "No URL set!");
1274 return CURLE_URL_MALFORMAT;
1277 /* Init the SSL session ID cache here. We do it here since we want to do it
1278 after the *_setopt() calls (that could specify the size of the cache) but
1279 before any transfer takes place. */
1280 res = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
1284 data->set.followlocation=0; /* reset the location-follow counter */
1285 data->state.this_is_a_follow = FALSE; /* reset this */
1286 data->state.errorbuf = FALSE; /* no error has occurred */
1287 data->state.httpversion = 0; /* don't assume any particular server version */
1289 data->state.ssl_connect_retry = FALSE;
1291 data->state.authproblem = FALSE;
1292 data->state.authhost.want = data->set.httpauth;
1293 data->state.authproxy.want = data->set.proxyauth;
1294 Curl_safefree(data->info.wouldredirect);
1295 data->info.wouldredirect = NULL;
1297 /* If there is a list of cookie files to read, do it now! */
1298 if(data->change.cookielist)
1299 Curl_cookie_loadfiles(data);
1301 /* If there is a list of host pairs to deal with */
1302 if(data->change.resolve)
1303 res = Curl_loadhostpairs(data);
1306 /* Allow data->set.use_port to set which port to use. This needs to be
1307 * disabled for example when we follow Location: headers to URLs using
1308 * different ports! */
1309 data->state.allow_port = TRUE;
1311 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1312 /*************************************************************
1313 * Tell signal handler to ignore SIGPIPE
1314 *************************************************************/
1315 if(!data->set.no_signal)
1316 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1319 Curl_initinfo(data); /* reset session-specific information "variables" */
1320 Curl_pgrsStartNow(data);
1322 if(data->set.timeout)
1323 Curl_expire(data, data->set.timeout + MULTI_TIMEOUT_INACCURACY_MS);
1325 if(data->set.connecttimeout)
1326 Curl_expire(data, data->set.connecttimeout +
1327 MULTI_TIMEOUT_INACCURACY_MS);
1329 /* In case the handle is re-used and an authentication method was picked
1330 in the session we need to make sure we only use the one(s) we now
1331 consider to be fine */
1332 data->state.authhost.picked &= data->state.authhost.want;
1333 data->state.authproxy.picked &= data->state.authproxy.want;
1340 * Curl_posttransfer() is called immediately after a transfer ends
1342 CURLcode Curl_posttransfer(struct SessionHandle *data)
1344 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1345 /* restore the signal handler for SIGPIPE before we get back */
1346 if(!data->set.no_signal)
1347 signal(SIGPIPE, data->state.prev_signal);
1349 (void)data; /* unused parameter */
1355 #ifndef CURL_DISABLE_HTTP
1357 * strlen_url() returns the length of the given URL if the spaces within the
1358 * URL were properly URL encoded.
1360 static size_t strlen_url(const char *url)
1364 bool left=TRUE; /* left side of the ? */
1366 for(ptr=url; *ptr; ptr++) {
1385 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1386 * the source URL accordingly.
1388 static void strcpy_url(char *output, const char *url)
1390 /* we must add this with whitespace-replacing */
1393 char *optr = output;
1394 for(iptr = url; /* read from here */
1395 *iptr; /* until zero byte */
1406 *optr++='%'; /* add a '%' */
1407 *optr++='2'; /* add a '2' */
1408 *optr++='0'; /* add a '0' */
1411 *optr++='+'; /* add a '+' here */
1415 *optr=0; /* zero terminate output buffer */
1420 * Returns true if the given URL is absolute (as opposed to relative)
1422 static bool is_absolute_url(const char *url)
1424 char prot[16]; /* URL protocol string storage */
1425 char letter; /* used for a silly sscanf */
1427 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1431 * Concatenate a relative URL to a base URL making it absolute.
1432 * URL-encodes any spaces.
1433 * The returned pointer must be freed by the caller unless NULL
1434 * (returns NULL on out of memory).
1436 static char *concat_url(const char *base, const char *relurl)
1439 TRY to append this new path to the old URL
1440 to the right of the host part. Oh crap, this is doomed to cause
1441 problems in the future...
1448 const char *useurl = relurl;
1451 /* we must make our own copy of the URL to play with, as it may
1452 point to read-only data */
1453 char *url_clone=strdup(base);
1456 return NULL; /* skip out of this NOW */
1458 /* protsep points to the start of the host name */
1459 protsep=strstr(url_clone, "//");
1463 protsep+=2; /* pass the slashes */
1465 if('/' != relurl[0]) {
1468 /* First we need to find out if there's a ?-letter in the URL,
1469 and cut it and the right-side of that off */
1470 pathsep = strchr(protsep, '?');
1474 /* we have a relative path to append to the last slash if there's one
1475 available, or if the new URL is just a query string (starts with a
1476 '?') we append the new one at the end of the entire currently worked
1478 if(useurl[0] != '?') {
1479 pathsep = strrchr(protsep, '/');
1484 /* Check if there's any slash after the host name, and if so, remember
1485 that position instead */
1486 pathsep = strchr(protsep, '/');
1488 protsep = pathsep+1;
1492 /* now deal with one "./" or any amount of "../" in the newurl
1493 and act accordingly */
1495 if((useurl[0] == '.') && (useurl[1] == '/'))
1496 useurl+=2; /* just skip the "./" */
1498 while((useurl[0] == '.') &&
1499 (useurl[1] == '.') &&
1500 (useurl[2] == '/')) {
1502 useurl+=3; /* pass the "../" */
1507 /* cut off one more level from the right of the original URL */
1508 pathsep = strrchr(protsep, '/');
1519 /* We got a new absolute path for this server */
1521 if((relurl[0] == '/') && (relurl[1] == '/')) {
1522 /* the new URL starts with //, just keep the protocol part from the
1525 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1526 skip the new ones */
1529 /* cut off the original URL from the first slash, or deal with URLs
1531 pathsep = strchr(protsep, '/');
1533 /* When people use badly formatted URLs, such as
1534 "http://www.url.com?dir=/home/daniel" we must not use the first
1535 slash, if there's a ?-letter before it! */
1536 char *sep = strchr(protsep, '?');
1537 if(sep && (sep < pathsep))
1542 /* There was no slash. Now, since we might be operating on a badly
1543 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1544 use a slash separator as it is supposed to, we need to check for a
1545 ?-letter as well! */
1546 pathsep = strchr(protsep, '?');
1553 /* If the new part contains a space, this is a mighty stupid redirect
1554 but we still make an effort to do "right". To the left of a '?'
1555 letter we replace each space with %20 while it is replaced with '+'
1556 on the right side of the '?' letter.
1558 newlen = strlen_url(useurl);
1560 urllen = strlen(url_clone);
1562 newest = malloc(urllen + 1 + /* possible slash */
1563 newlen + 1 /* zero byte */);
1566 free(url_clone); /* don't leak this */
1570 /* copy over the root url part */
1571 memcpy(newest, url_clone, urllen);
1573 /* check if we need to append a slash */
1574 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1577 newest[urllen++]='/';
1579 /* then append the new piece on the right side */
1580 strcpy_url(&newest[urllen], useurl);
1586 #endif /* CURL_DISABLE_HTTP */
1589 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1590 * as given by the remote server and set up the new URL to request.
1592 CURLcode Curl_follow(struct SessionHandle *data,
1593 char *newurl, /* this 'newurl' is the Location: string,
1594 and it must be malloc()ed before passed
1596 followtype type) /* see transfer.h */
1598 #ifdef CURL_DISABLE_HTTP
1602 /* Location: following will not happen when HTTP is disabled */
1603 return CURLE_TOO_MANY_REDIRECTS;
1606 /* Location: redirect */
1607 bool disallowport = FALSE;
1609 if(type == FOLLOW_REDIR) {
1610 if((data->set.maxredirs != -1) &&
1611 (data->set.followlocation >= data->set.maxredirs)) {
1612 failf(data,"Maximum (%ld) redirects followed", data->set.maxredirs);
1613 return CURLE_TOO_MANY_REDIRECTS;
1616 /* mark the next request as a followed location: */
1617 data->state.this_is_a_follow = TRUE;
1619 data->set.followlocation++; /* count location-followers */
1621 if(data->set.http_auto_referer) {
1622 /* We are asked to automatically set the previous URL as the referer
1623 when we get the next URL. We pick the ->url field, which may or may
1624 not be 100% correct */
1626 if(data->change.referer_alloc) {
1627 Curl_safefree(data->change.referer);
1628 data->change.referer_alloc = FALSE;
1631 data->change.referer = strdup(data->change.url);
1632 if(!data->change.referer)
1633 return CURLE_OUT_OF_MEMORY;
1634 data->change.referer_alloc = TRUE; /* yes, free this later */
1638 if(!is_absolute_url(newurl)) {
1640 *DANG* this is an RFC 2068 violation. The URL is supposed
1641 to be absolute and this doesn't seem to be that!
1643 char *absolute = concat_url(data->change.url, newurl);
1645 return CURLE_OUT_OF_MEMORY;
1650 /* This is an absolute URL, don't allow the custom port number */
1651 disallowport = TRUE;
1653 if(strchr(newurl, ' ')) {
1654 /* This new URL contains at least one space, this is a mighty stupid
1655 redirect but we still make an effort to do "right". */
1657 size_t newlen = strlen_url(newurl);
1659 newest = malloc(newlen+1); /* get memory for this */
1661 return CURLE_OUT_OF_MEMORY;
1662 strcpy_url(newest, newurl); /* create a space-free URL */
1664 free(newurl); /* that was no good */
1665 newurl = newest; /* use this instead now */
1670 if(type == FOLLOW_FAKE) {
1671 /* we're only figuring out the new url if we would've followed locations
1672 but now we're done so we can get out! */
1673 data->info.wouldredirect = newurl;
1678 data->state.allow_port = FALSE;
1680 if(data->change.url_alloc) {
1681 Curl_safefree(data->change.url);
1682 data->change.url_alloc = FALSE;
1685 data->change.url = newurl;
1686 data->change.url_alloc = TRUE;
1687 newurl = NULL; /* don't free! */
1689 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1692 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1693 * differently based on exactly what return code there was.
1695 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1696 * a HTTP (proxy-) authentication scheme other than Basic.
1698 switch(data->info.httpcode) {
1699 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1700 Authorization: XXXX header in the HTTP request code snippet */
1701 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1702 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1703 /* 300 - Multiple Choices */
1704 /* 306 - Not used */
1705 /* 307 - Temporary Redirect */
1706 default: /* for all above (and the unknown ones) */
1707 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1708 * seem to be OK to POST to.
1711 case 301: /* Moved Permanently */
1712 /* (quote from RFC2616, section 10.3.2):
1714 * When automatically redirecting a POST request after receiving a 301
1715 * status code, some existing HTTP/1.0 user agents will erroneously change
1716 * it into a GET request.
1720 * As most of the important user agents do this obvious RFC2616 violation,
1721 * many webservers expect this. So these servers often answers to a POST
1722 * request with an error page. To be sure that libcurl gets the page that
1723 * most user agents would get, libcurl has to force GET.
1725 * This behavior can be overridden with CURLOPT_POSTREDIR.
1727 if((data->set.httpreq == HTTPREQ_POST
1728 || data->set.httpreq == HTTPREQ_POST_FORM)
1729 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1731 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1732 data->set.httpreq = HTTPREQ_GET;
1735 case 302: /* Found */
1738 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1739 to change the method on the redirected request. However, most
1740 existing user agent implementations treat 302 as if it were a 303
1741 response, performing a GET on the Location field-value regardless
1742 of the original request method. The status codes 303 and 307 have
1743 been added for servers that wish to make unambiguously clear which
1744 kind of reaction is expected of the client.
1748 Note: Many pre-HTTP/1.1 user agents do not understand the 303
1749 status. When interoperability with such clients is a concern, the
1750 302 status code may be used instead, since most user agents react
1751 to a 302 response as described here for 303.
1753 This behavior can be overridden with CURLOPT_POSTREDIR
1755 if((data->set.httpreq == HTTPREQ_POST
1756 || data->set.httpreq == HTTPREQ_POST_FORM)
1757 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1759 "Violate RFC 2616/10.3.3 and switch from POST to GET\n");
1760 data->set.httpreq = HTTPREQ_GET;
1764 case 303: /* See Other */
1765 /* Disable both types of POSTs, unless the user explicitely
1766 asks for POST after POST */
1767 if(data->set.httpreq != HTTPREQ_GET
1768 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1769 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1770 infof(data, "Disables POST, goes with %s\n",
1771 data->set.opt_no_body?"HEAD":"GET");
1774 case 304: /* Not Modified */
1775 /* 304 means we did a conditional request and it was "Not modified".
1776 * We shouldn't get any Location: header in this response!
1779 case 305: /* Use Proxy */
1780 /* (quote from RFC2616, section 10.3.6):
1781 * "The requested resource MUST be accessed through the proxy given
1782 * by the Location field. The Location field gives the URI of the
1783 * proxy. The recipient is expected to repeat this single request
1784 * via the proxy. 305 responses MUST only be generated by origin
1789 Curl_pgrsTime(data, TIMER_REDIRECT);
1790 Curl_pgrsResetTimesSizes(data);
1793 #endif /* CURL_DISABLE_HTTP */
1797 Curl_reconnect_request(struct connectdata **connp)
1799 CURLcode result = CURLE_OK;
1800 struct connectdata *conn = *connp;
1801 struct SessionHandle *data = conn->data;
1803 /* This was a re-use of a connection and we got a write error in the
1804 * DO-phase. Then we DISCONNECT this connection and have another attempt to
1805 * CONNECT and then DO again! The retry cannot possibly find another
1806 * connection to re-use, since we only keep one possible connection for
1809 infof(data, "Re-used connection seems dead, get a new one\n");
1811 conn->bits.close = TRUE; /* enforce close of this connection */
1812 result = Curl_done(&conn, result, FALSE); /* we are so done with this */
1814 /* conn may no longer be a good pointer, clear it to avoid mistakes by
1819 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
1820 * here as well. I figure this could happen when the request failed on a FTP
1821 * connection and thus Curl_done() itself tried to use the connection
1822 * (again). Slight Lack of feedback in the report, but I don't think this
1823 * extra check can do much harm.
1825 if((CURLE_OK == result) || (CURLE_SEND_ERROR == result)) {
1827 bool protocol_done = TRUE;
1829 /* Now, redo the connect and get a new connection */
1830 result = Curl_connect(data, connp, &async, &protocol_done);
1831 if(CURLE_OK == result) {
1832 /* We have connected or sent away a name resolve query fine */
1834 conn = *connp; /* setup conn to again point to something nice */
1836 /* Now, if async is TRUE here, we need to wait for the name
1838 result = Curl_resolver_wait_resolv(conn, NULL);
1842 /* Resolved, continue with the connection */
1843 result = Curl_async_resolved(conn, &protocol_done);
1853 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1855 NOTE: that the *url is malloc()ed. */
1856 CURLcode Curl_retry_request(struct connectdata *conn,
1859 struct SessionHandle *data = conn->data;
1863 /* if we're talking upload, we can't do the checks below, unless the protocol
1864 is HTTP as when uploading over HTTP we will still get a response */
1865 if(data->set.upload &&
1866 !(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)))
1869 if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry ||
1870 ((data->req.bytecount +
1871 data->req.headerbytecount == 0) &&
1873 !data->set.opt_no_body &&
1874 data->set.rtspreq != RTSPREQ_RECEIVE)) {
1875 /* We got no data, we attempted to re-use a connection and yet we want a
1876 "body". This might happen if the connection was left alive when we were
1877 done using it before, but that was closed when we wanted to read from
1878 it again. Bad luck. Retry the same request on a fresh connect! */
1879 infof(conn->data, "Connection died, retrying a fresh connect\n");
1880 *url = strdup(conn->data->change.url);
1882 return CURLE_OUT_OF_MEMORY;
1884 conn->bits.close = TRUE; /* close this connection */
1885 conn->bits.retry = TRUE; /* mark this as a connection we're about
1886 to retry. Marking it this way should
1887 prevent i.e HTTP transfers to return
1888 error just because nothing has been
1892 if(conn->handler->protocol&CURLPROTO_HTTP) {
1893 struct HTTP *http = data->req.protop;
1894 if(http->writebytecount)
1895 return Curl_readrewind(conn);
1902 * Curl_setup_transfer() is called to setup some basic properties for the
1903 * upcoming transfer.
1906 Curl_setup_transfer(
1907 struct connectdata *conn, /* connection data */
1908 int sockindex, /* socket index to read from or -1 */
1909 curl_off_t size, /* -1 if unknown at this point */
1910 bool getheader, /* TRUE if header parsing is wanted */
1911 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1912 int writesockindex, /* socket index to write to, it may very well be
1913 the same we read from. -1 disables */
1914 curl_off_t *writecountp /* return number of bytes written or NULL */
1917 struct SessionHandle *data;
1918 struct SingleRequest *k;
1920 DEBUGASSERT(conn != NULL);
1925 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1927 /* now copy all input parameters */
1928 conn->sockfd = sockindex == -1 ?
1929 CURL_SOCKET_BAD : conn->sock[sockindex];
1930 conn->writesockfd = writesockindex == -1 ?
1931 CURL_SOCKET_BAD:conn->sock[writesockindex];
1932 k->getheader = getheader;
1935 k->bytecountp = bytecountp;
1936 k->writebytecountp = writecountp;
1938 /* The code sequence below is placed in this function just because all
1939 necessary input is not always known in do_complete() as this function may
1940 be called after that */
1945 Curl_pgrsSetDownloadSize(data, size);
1947 /* we want header and/or body, if neither then don't do this! */
1948 if(k->getheader || !data->set.opt_no_body) {
1950 if(conn->sockfd != CURL_SOCKET_BAD)
1951 k->keepon |= KEEP_RECV;
1953 if(conn->writesockfd != CURL_SOCKET_BAD) {
1954 struct HTTP *http = data->req.protop;
1957 Even if we require a 100-return code before uploading data, we might
1958 need to write data before that since the REQUEST may not have been
1959 finished sent off just yet.
1961 Thus, we must check if the request has been sent before we set the
1962 state info where we wait for the 100-return code
1964 if((data->state.expect100header) &&
1965 (conn->handler->protocol&CURLPROTO_HTTP) &&
1966 (http->sending == HTTPSEND_BODY)) {
1967 /* wait with write until we either got 100-continue or a timeout */
1968 k->exp100 = EXP100_AWAITING_CONTINUE;
1969 k->start100 = Curl_tvnow();
1971 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1972 that we don't fire slightly too early and get denied to run. */
1973 Curl_expire(data, CURL_TIMEOUT_EXPECT_100 +
1974 MULTI_TIMEOUT_INACCURACY / 1000);
1977 if(data->state.expect100header)
1978 /* when we've sent off the rest of the headers, we must await a
1979 100-continue but first finish sending the request */
1980 k->exp100 = EXP100_SENDING_REQUEST;
1982 /* enable the write bit when we're not waiting for continue */
1983 k->keepon |= KEEP_SEND;
1985 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
1986 } /* if(k->getheader || !data->set.opt_no_body) */