1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ***************************************************************************/
23 #include "curl_setup.h"
25 #include "strtoofft.h"
29 #ifdef HAVE_NETINET_IN_H
30 #include <netinet/in.h>
35 #ifdef HAVE_ARPA_INET_H
36 #include <arpa/inet.h>
41 #ifdef HAVE_SYS_IOCTL_H
42 #include <sys/ioctl.h>
48 #ifdef HAVE_SYS_PARAM_H
49 #include <sys/param.h>
52 #ifdef HAVE_SYS_SELECT_H
53 #include <sys/select.h>
57 #error "We can't compile without socket() support!"
61 #include <curl/curl.h>
64 #include "content_encoding.h"
68 #include "speedcheck.h"
73 #include "vtls/vtls.h"
77 #include "non-ascii.h"
80 /* The last 3 #include files should be in this order */
81 #include "curl_printf.h"
82 #include "curl_memory.h"
86 * This function will call the read callback to fill our buffer with data
89 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
91 struct Curl_easy *data = conn->data;
92 size_t buffersize = (size_t)bytes;
94 #ifdef CURL_DOES_CONVERSIONS
95 bool sending_http_headers = FALSE;
97 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
98 const struct HTTP *http = data->req.protop;
100 if(http->sending == HTTPSEND_REQUEST)
101 /* We're sending the HTTP request headers, not the data.
102 Remember that so we don't re-translate them into garbage. */
103 sending_http_headers = TRUE;
107 if(data->req.upload_chunky) {
108 /* if chunked Transfer-Encoding */
109 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
110 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
113 /* this function returns a size_t, so we typecast to int to prevent warnings
114 with picky compilers */
115 nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
116 buffersize, data->state.in);
118 if(nread == CURL_READFUNC_ABORT) {
119 failf(data, "operation aborted by callback");
121 return CURLE_ABORTED_BY_CALLBACK;
123 else if(nread == CURL_READFUNC_PAUSE) {
125 if(conn->handler->flags & PROTOPT_NONETWORK) {
126 /* protocols that work without network cannot be paused. This is
127 actually only FILE:// just now, and it can't pause since the transfer
128 isn't done using the "normal" procedure. */
129 failf(data, "Read callback asked for PAUSE when not supported!");
130 return CURLE_READ_ERROR;
133 struct SingleRequest *k = &data->req;
134 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
135 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
136 if(data->req.upload_chunky) {
137 /* Back out the preallocation done above */
138 data->req.upload_fromhere -= (8 + 2);
142 return CURLE_OK; /* nothing was read */
144 else if((size_t)nread > buffersize) {
145 /* the read function returned a too large value */
147 failf(data, "read function returned funny value");
148 return CURLE_READ_ERROR;
151 if(!data->req.forbidchunk && data->req.upload_chunky) {
152 /* if chunked Transfer-Encoding
158 /* On non-ASCII platforms the <DATA> may or may not be
159 translated based on set.prefer_ascii while the protocol
160 portion must always be translated to the network encoding.
161 To further complicate matters, line end conversion might be
162 done later on, so we need to prevent CRLFs from becoming
163 CRCRLFs if that's the case. To do this we use bare LFs
164 here, knowing they'll become CRLFs later on.
168 const char *endofline_native;
169 const char *endofline_network;
173 #ifdef CURL_DO_LINEEND_CONV
174 (data->set.prefer_ascii) ||
177 /* \n will become \r\n later on */
178 endofline_native = "\n";
179 endofline_network = "\x0a";
182 endofline_native = "\r\n";
183 endofline_network = "\x0d\x0a";
185 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
186 "%x%s", nread, endofline_native);
188 /* move buffer pointer */
189 data->req.upload_fromhere -= hexlen;
192 /* copy the prefix to the buffer, leaving out the NUL */
193 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
195 /* always append ASCII CRLF to the data */
196 memcpy(data->req.upload_fromhere + nread,
198 strlen(endofline_network));
200 #ifdef CURL_DOES_CONVERSIONS
203 if(data->set.prefer_ascii) {
204 /* translate the protocol and data */
208 /* just translate the protocol portion */
209 length = strlen(hexbuffer);
211 result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
212 /* Curl_convert_to_network calls failf if unsuccessful */
215 #endif /* CURL_DOES_CONVERSIONS */
217 if((nread - hexlen) == 0)
218 /* mark this as done once this chunk is transferred */
219 data->req.upload_done = TRUE;
221 nread+=(int)strlen(endofline_native); /* for the added end of line */
223 #ifdef CURL_DOES_CONVERSIONS
224 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
226 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
227 /* Curl_convert_to_network calls failf if unsuccessful */
231 #endif /* CURL_DOES_CONVERSIONS */
240 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
241 * POST/PUT with multi-pass authentication when a sending was denied and a
242 * resend is necessary.
244 CURLcode Curl_readrewind(struct connectdata *conn)
246 struct Curl_easy *data = conn->data;
248 conn->bits.rewindaftersend = FALSE; /* we rewind now */
250 /* explicitly switch off sending data on this connection now since we are
251 about to restart a new transfer and thus we want to avoid inadvertently
252 sending more data on the existing connection until the next transfer
254 data->req.keepon &= ~KEEP_SEND;
256 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
257 CURLOPT_HTTPPOST, call app to rewind
259 if(data->set.postfields ||
260 (data->set.httpreq == HTTPREQ_POST_FORM))
263 if(data->set.seek_func) {
266 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
268 failf(data, "seek callback returned error %d", (int)err);
269 return CURLE_SEND_FAIL_REWIND;
272 else if(data->set.ioctl_func) {
275 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
276 data->set.ioctl_client);
277 infof(data, "the ioctl callback returned %d\n", (int)err);
280 /* FIXME: convert to a human readable error message */
281 failf(data, "ioctl callback returned error %d", (int)err);
282 return CURLE_SEND_FAIL_REWIND;
286 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
287 given FILE * stream and we can actually attempt to rewind that
288 ourselves with fseek() */
289 if(data->state.fread_func == (curl_read_callback)fread) {
290 if(-1 != fseek(data->state.in, 0, SEEK_SET))
291 /* successful rewind */
295 /* no callback set or failure above, makes us fail at once */
296 failf(data, "necessary data rewind wasn't possible");
297 return CURLE_SEND_FAIL_REWIND;
303 static int data_pending(const struct connectdata *conn)
305 /* in the case of libssh2, we can never be really sure that we have emptied
306 its internal buffers so we MUST always try until we get EAGAIN back */
307 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
308 #if defined(USE_NGHTTP2)
309 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
310 /* For HTTP/2, we may read up everything including responde body
311 with header fields in Curl_http_readwrite_headers. If no
312 content-length is provided, curl waits for the connection
313 close, which we emulate it using conn->proto.httpc.closed =
314 TRUE. The thing is if we read everything, then http2_recv won't
315 be called and we cannot signal the HTTP/2 stream has closed. As
316 a workaround, we return nonzero here to call http2_recv. */
317 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
319 Curl_ssl_data_pending(conn, FIRSTSOCKET);
323 static void read_rewind(struct connectdata *conn,
326 DEBUGASSERT(conn->read_pos >= thismuch);
328 conn->read_pos -= thismuch;
329 conn->bits.stream_was_rewound = TRUE;
336 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
337 if(conn->master_buffer) {
338 memcpy(buf, conn->master_buffer + conn->read_pos, show);
345 DEBUGF(infof(conn->data,
346 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
347 conn->read_pos, buf));
353 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
354 * remote document with the time provided by CURLOPT_TIMEVAL
356 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
358 if((timeofdoc == 0) || (data->set.timevalue == 0))
361 switch(data->set.timecondition) {
362 case CURL_TIMECOND_IFMODSINCE:
364 if(timeofdoc <= data->set.timevalue) {
366 "The requested document is not new enough\n");
367 data->info.timecond = TRUE;
371 case CURL_TIMECOND_IFUNMODSINCE:
372 if(timeofdoc >= data->set.timevalue) {
374 "The requested document is not old enough\n");
375 data->info.timecond = TRUE;
385 * Go ahead and do a read if we have a readable socket or if
386 * the stream was rewound (in which case we have data in a
389 * return '*comeback' TRUE if we didn't properly drain the socket so this
390 * function should get called again without select() or similar in between!
392 static CURLcode readwrite_data(struct Curl_easy *data,
393 struct connectdata *conn,
394 struct SingleRequest *k,
395 int *didwhat, bool *done,
398 CURLcode result = CURLE_OK;
399 ssize_t nread; /* number of bytes read */
400 size_t excess = 0; /* excess bytes read */
401 bool is_empty_data = FALSE;
402 bool readmore = FALSE; /* used by RTP to signal for more data */
408 /* This is where we loop until we have read everything there is to
409 read or we get a CURLE_AGAIN */
411 size_t buffersize = data->set.buffer_size?
412 data->set.buffer_size : BUFSIZE;
413 size_t bytestoread = buffersize;
416 #if defined(USE_NGHTTP2)
417 /* For HTTP/2, read data without caring about the content
418 length. This is safe because body in HTTP/2 is always
419 segmented thanks to its framing layer. Meanwhile, we have to
420 call Curl_read to ensure that http2_handle_stream_close is
421 called when we read all incoming bytes for a particular
423 !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
424 conn->httpversion == 20) &&
426 k->size != -1 && !k->header) {
427 /* make sure we don't read "too much" if we can help it since we
428 might be pipelining and then someone else might want to read what
430 curl_off_t totalleft = k->size - k->bytecount;
431 if(totalleft < (curl_off_t)bytestoread)
432 bytestoread = (size_t)totalleft;
436 /* receive data from the network! */
437 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
439 /* read would've blocked */
440 if(CURLE_AGAIN == result)
441 break; /* get out of loop */
447 /* read nothing but since we wanted nothing we consider this an OK
448 situation to proceed from */
449 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
453 if((k->bytecount == 0) && (k->writebytecount == 0)) {
454 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
455 if(k->exp100 > EXP100_SEND_DATA)
456 /* set time stamp to compare with when waiting for the 100 */
457 k->start100 = Curl_tvnow();
460 *didwhat |= KEEP_RECV;
461 /* indicates data of zero size, i.e. empty file */
462 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
464 /* NUL terminate, allowing string ops to be used */
465 if(0 < nread || is_empty_data) {
468 else if(0 >= nread) {
469 /* if we receive 0 or less here, the server closed the connection
470 and we bail out from this! */
471 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
472 k->keepon &= ~KEEP_RECV;
476 /* Default buffer to use when we write the buffer, it may be changed
477 in the flow below before the actual storing is done. */
480 if(conn->handler->readwrite) {
481 result = conn->handler->readwrite(data, conn, &nread, &readmore);
488 #ifndef CURL_DISABLE_HTTP
489 /* Since this is a two-state thing, we check if we are parsing
490 headers at the moment or not. */
492 /* we are in parse-the-header-mode */
493 bool stop_reading = FALSE;
494 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
498 if(conn->handler->readwrite &&
499 (k->maxdownload <= 0 && nread > 0)) {
500 result = conn->handler->readwrite(data, conn, &nread, &readmore);
508 /* We've stopped dealing with input, get out of the do-while loop */
511 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
513 "Rewinding stream by : %zd"
514 " bytes on url %s (zero-length body)\n",
515 nread, data->state.path);
516 read_rewind(conn, (size_t)nread);
520 "Excess found in a non pipelined read:"
522 " url = %s (zero-length body)\n",
523 nread, data->state.path);
530 #endif /* CURL_DISABLE_HTTP */
533 /* This is not an 'else if' since it may be a rest from the header
534 parsing, where the beginning of the buffer is headers and the end
536 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
538 #ifndef CURL_DISABLE_HTTP
539 if(0 == k->bodywrites && !is_empty_data) {
540 /* These checks are only made the first time we are about to
541 write a piece of the body */
542 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
543 /* HTTP-only checks */
545 if(data->req.newurl) {
546 if(conn->bits.close) {
547 /* Abort after the headers if "follow Location" is set
548 and we're set to close anyway. */
549 k->keepon &= ~KEEP_RECV;
553 /* We have a new url to load, but since we want to be able
554 to re-use this connection properly, we read the full
555 response in "ignore more" */
556 k->ignorebody = TRUE;
557 infof(data, "Ignoring the response-body\n");
559 if(data->state.resume_from && !k->content_range &&
560 (data->set.httpreq==HTTPREQ_GET) &&
563 if(k->size == data->state.resume_from) {
564 /* The resume point is at the end of file, consider this fine
565 even if it doesn't allow resume from here. */
566 infof(data, "The entire document is already downloaded");
567 connclose(conn, "already downloaded");
569 k->keepon &= ~KEEP_RECV;
574 /* we wanted to resume a download, although the server doesn't
575 * seem to support this and we did this with a GET (if it
576 * wasn't a GET we did a POST or PUT resume) */
577 failf(data, "HTTP server doesn't seem to support "
578 "byte ranges. Cannot resume.");
579 return CURLE_RANGE_ERROR;
582 if(data->set.timecondition && !data->state.range) {
583 /* A time condition has been set AND no ranges have been
584 requested. This seems to be what chapter 13.3.4 of
585 RFC 2616 defines to be the correct action for a
588 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
590 /* We're simulating a http 304 from server so we return
591 what should have been returned from the server */
592 data->info.httpcode = 304;
593 infof(data, "Simulate a HTTP 304 response!\n");
594 /* we abort the transfer before it is completed == we ruin the
595 re-use ability. Close the connection */
596 connclose(conn, "Simulated 304 handling");
599 } /* we have a time condition */
601 } /* this is HTTP or RTSP */
602 } /* this is the first time we write a body part */
603 #endif /* CURL_DISABLE_HTTP */
607 /* pass data to the debug function before it gets "dechunked" */
608 if(data->set.verbose) {
610 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
611 (size_t)k->hbuflen, conn);
612 if(k->badheader == HEADER_PARTHEADER)
613 Curl_debug(data, CURLINFO_DATA_IN,
614 k->str, (size_t)nread, conn);
617 Curl_debug(data, CURLINFO_DATA_IN,
618 k->str, (size_t)nread, conn);
621 #ifndef CURL_DISABLE_HTTP
624 * Here comes a chunked transfer flying and we need to decode this
625 * properly. While the name says read, this function both reads
626 * and writes away the data. The returned 'nread' holds the number
627 * of actual data it wrote to the client.
631 Curl_httpchunk_read(conn, k->str, nread, &nread);
633 if(CHUNKE_OK < res) {
634 if(CHUNKE_WRITE_ERROR == res) {
635 failf(data, "Failed writing data");
636 return CURLE_WRITE_ERROR;
638 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
639 return CURLE_RECV_ERROR;
641 else if(CHUNKE_STOP == res) {
643 /* we're done reading chunks! */
644 k->keepon &= ~KEEP_RECV; /* read no more */
646 /* There are now possibly N number of bytes at the end of the
647 str buffer that weren't written to the client.
649 We DO care about this data if we are pipelining.
650 Push it back to be read on the next pass. */
652 dataleft = conn->chunk.dataleft;
654 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
656 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
657 /* only attempt the rewind if we truly are pipelining */
658 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
659 read_rewind(conn, dataleft);
663 /* If it returned OK, we just keep going */
665 #endif /* CURL_DISABLE_HTTP */
667 /* Account for body content stored in the header buffer */
668 if(k->badheader && !k->ignorebody) {
669 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
671 k->bytecount += k->hbuflen;
674 if((-1 != k->maxdownload) &&
675 (k->bytecount + nread >= k->maxdownload)) {
677 excess = (size_t)(k->bytecount + nread - k->maxdownload);
678 if(excess > 0 && !k->ignorebody) {
679 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
680 /* The 'excess' amount below can't be more than BUFSIZE which
681 always will fit in a size_t */
683 "Rewinding stream by : %zu"
684 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
685 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
686 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
687 excess, data->state.path,
688 k->size, k->maxdownload, k->bytecount, nread);
689 read_rewind(conn, excess);
693 "Excess found in a non pipelined read:"
695 ", size = %" CURL_FORMAT_CURL_OFF_T
696 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
697 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
698 excess, k->size, k->maxdownload, k->bytecount);
702 nread = (ssize_t) (k->maxdownload - k->bytecount);
703 if(nread < 0) /* this should be unusual */
706 k->keepon &= ~KEEP_RECV; /* we're done reading */
709 k->bytecount += nread;
711 Curl_pgrsSetDownloadCounter(data, k->bytecount);
713 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
714 /* If this is chunky transfer, it was already written */
716 if(k->badheader && !k->ignorebody) {
717 /* we parsed a piece of data wrongly assuming it was a header
718 and now we output it as body instead */
720 /* Don't let excess data pollute body writes */
721 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
722 result = Curl_client_write(conn, CLIENTWRITE_BODY,
723 data->state.headerbuff,
726 result = Curl_client_write(conn, CLIENTWRITE_BODY,
727 data->state.headerbuff,
728 (size_t)k->maxdownload);
733 if(k->badheader < HEADER_ALLBAD) {
734 /* This switch handles various content encodings. If there's an
735 error here, be sure to check over the almost identical code
737 Make sure that ALL_CONTENT_ENCODINGS contains all the
738 encodings handled here. */
740 switch (conn->data->set.http_ce_skip ?
741 IDENTITY : k->auto_decoding) {
744 /* This is the default when the server sends no
745 Content-Encoding header. See Curl_readwrite_init; the
746 memset() call initializes k->auto_decoding to zero. */
749 #ifndef CURL_DISABLE_POP3
750 if(conn->handler->protocol&PROTO_FAMILY_POP3)
751 result = Curl_pop3_write(conn, k->str, nread);
753 #endif /* CURL_DISABLE_POP3 */
755 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
762 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
764 result = Curl_unencode_deflate_write(conn, k, nread);
768 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
770 result = Curl_unencode_gzip_write(conn, k, nread);
774 failf (data, "Unrecognized content encoding type. "
775 "libcurl understands `identity', `deflate' and `gzip' "
776 "content encodings.");
777 result = CURLE_BAD_CONTENT_ENCODING;
782 k->badheader = HEADER_NORMAL; /* taken care of now */
788 } /* if(!header and data to read) */
790 if(conn->handler->readwrite &&
791 (excess > 0 && !conn->bits.stream_was_rewound)) {
792 /* Parse the excess data */
794 nread = (ssize_t)excess;
796 result = conn->handler->readwrite(data, conn, &nread, &readmore);
801 k->keepon |= KEEP_RECV; /* we're not done reading */
806 /* if we received nothing, the server closed the connection and we
808 k->keepon &= ~KEEP_RECV;
811 } while(data_pending(conn) && maxloops--);
814 /* we mark it as read-again-please */
815 conn->cselect_bits = CURL_CSELECT_IN;
819 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
821 /* When we've read the entire thing and the close bit is set, the server
822 may now close the connection. If there's now any kind of sending going
823 on from our side, we need to stop that immediately. */
824 infof(data, "we are done reading and this is set to close, stop send\n");
825 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
831 static CURLcode done_sending(struct connectdata *conn,
832 struct SingleRequest *k)
834 k->keepon &= ~KEEP_SEND; /* we're done writing */
836 Curl_http2_done_sending(conn);
838 if(conn->bits.rewindaftersend) {
839 CURLcode result = Curl_readrewind(conn);
848 * Send data to upload to the server, when the socket is writable.
850 static CURLcode readwrite_upload(struct Curl_easy *data,
851 struct connectdata *conn,
852 struct SingleRequest *k,
856 ssize_t bytes_written;
858 ssize_t nread; /* number of bytes read */
859 bool sending_http_headers = FALSE;
861 if((k->bytecount == 0) && (k->writebytecount == 0))
862 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
864 *didwhat |= KEEP_SEND;
868 /* only read more data if there's no upload data already
869 present in the upload buffer */
870 if(0 == data->req.upload_present) {
871 /* init the "upload from here" pointer */
872 data->req.upload_fromhere = k->uploadbuf;
874 if(!k->upload_done) {
875 /* HTTP pollution, this should be written nicer to become more
876 protocol agnostic. */
878 struct HTTP *http = data->req.protop;
880 if((k->exp100 == EXP100_SENDING_REQUEST) &&
881 (http->sending == HTTPSEND_BODY)) {
882 /* If this call is to send body data, we must take some action:
883 We have sent off the full HTTP 1.1 request, and we shall now
884 go into the Expect: 100 state and await such a header */
885 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
886 k->keepon &= ~KEEP_SEND; /* disable writing */
887 k->start100 = Curl_tvnow(); /* timeout count starts now */
888 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
890 /* set a timeout for the multi interface */
891 Curl_expire(data, data->set.expect_100_timeout);
895 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
896 if(http->sending == HTTPSEND_REQUEST)
897 /* We're sending the HTTP request headers, not the data.
898 Remember that so we don't change the line endings. */
899 sending_http_headers = TRUE;
901 sending_http_headers = FALSE;
904 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
908 nread = (ssize_t)fillcount;
911 nread = 0; /* we're done uploading/reading */
913 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
914 /* this is a paused transfer */
918 result = done_sending(conn, k);
924 /* store number of bytes available for upload */
925 data->req.upload_present = nread;
927 /* convert LF to CRLF if so asked */
928 if((!sending_http_headers) && (
929 #ifdef CURL_DO_LINEEND_CONV
930 /* always convert if we're FTPing in ASCII mode */
931 (data->set.prefer_ascii) ||
934 /* Do we need to allocate a scratch buffer? */
935 if(!data->state.scratch) {
936 data->state.scratch = malloc(2 * BUFSIZE);
937 if(!data->state.scratch) {
938 failf(data, "Failed to alloc scratch buffer!");
940 return CURLE_OUT_OF_MEMORY;
945 * ASCII/EBCDIC Note: This is presumably a text (not binary)
946 * transfer so the data should already be in ASCII.
947 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
948 * must be used instead of the escape sequences \r & \n.
950 for(i = 0, si = 0; i < nread; i++, si++) {
951 if(data->req.upload_fromhere[i] == 0x0a) {
952 data->state.scratch[si++] = 0x0d;
953 data->state.scratch[si] = 0x0a;
954 if(!data->set.crlf) {
955 /* we're here only because FTP is in ASCII mode...
956 bump infilesize for the LF we just added */
957 if(data->state.infilesize != -1)
958 data->state.infilesize++;
962 data->state.scratch[si] = data->req.upload_fromhere[i];
966 /* only perform the special operation if we really did replace
970 /* upload from the new (replaced) buffer instead */
971 data->req.upload_fromhere = data->state.scratch;
973 /* set the new amount too */
974 data->req.upload_present = nread;
978 #ifndef CURL_DISABLE_SMTP
979 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
980 result = Curl_smtp_escape_eob(conn, nread);
984 #endif /* CURL_DISABLE_SMTP */
985 } /* if 0 == data->req.upload_present */
987 /* We have a partial buffer left from a previous "round". Use
988 that instead of reading more data */
991 /* write to socket (send away data) */
992 result = Curl_write(conn,
993 conn->writesockfd, /* socket to send to */
994 data->req.upload_fromhere, /* buffer pointer */
995 data->req.upload_present, /* buffer size */
996 &bytes_written); /* actually sent */
1001 if(data->set.verbose)
1002 /* show the data before we change the pointer upload_fromhere */
1003 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
1004 (size_t)bytes_written, conn);
1006 k->writebytecount += bytes_written;
1008 if(k->writebytecount == data->state.infilesize) {
1009 /* we have sent all data we were supposed to */
1010 k->upload_done = TRUE;
1011 infof(data, "We are completely uploaded and fine\n");
1014 if(data->req.upload_present != bytes_written) {
1015 /* we only wrote a part of the buffer (if anything), deal with it! */
1017 /* store the amount of bytes left in the buffer to write */
1018 data->req.upload_present -= bytes_written;
1020 /* advance the pointer where to find the buffer when the next send
1022 data->req.upload_fromhere += bytes_written;
1025 /* we've uploaded that buffer now */
1026 data->req.upload_fromhere = k->uploadbuf;
1027 data->req.upload_present = 0; /* no more bytes left */
1029 if(k->upload_done) {
1030 result = done_sending(conn, k);
1036 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1038 } WHILE_FALSE; /* just to break out from! */
1044 * Curl_readwrite() is the low-level function to be called when data is to
1045 * be read and written to/from the connection.
1047 * return '*comeback' TRUE if we didn't properly drain the socket so this
1048 * function should get called again without select() or similar in between!
1050 CURLcode Curl_readwrite(struct connectdata *conn,
1051 struct Curl_easy *data,
1055 struct SingleRequest *k = &data->req;
1059 curl_socket_t fd_read;
1060 curl_socket_t fd_write;
1061 int select_res = conn->cselect_bits;
1063 conn->cselect_bits = 0;
1065 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1066 then we are in rate limiting state in that transfer direction */
1068 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1069 fd_read = conn->sockfd;
1071 fd_read = CURL_SOCKET_BAD;
1073 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1074 fd_write = conn->writesockfd;
1076 fd_write = CURL_SOCKET_BAD;
1078 if(conn->data->state.drain) {
1079 select_res |= CURL_CSELECT_IN;
1080 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1083 if(!select_res) /* Call for select()/poll() only, if read/write/error
1084 status is not known. */
1085 select_res = Curl_socket_ready(fd_read, fd_write, 0);
1087 if(select_res == CURL_CSELECT_ERR) {
1088 failf(data, "select/poll returned error");
1089 return CURLE_SEND_ERROR;
1092 /* We go ahead and do a read if we have a readable socket or if
1093 the stream was rewound (in which case we have data in a
1095 if((k->keepon & KEEP_RECV) &&
1096 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1098 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1103 /* If we still have writing to do, we check if we have a writable socket. */
1104 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1107 result = readwrite_upload(data, conn, k, &didwhat);
1112 k->now = Curl_tvnow();
1114 /* Update read/write counters */
1116 *k->bytecountp = k->bytecount; /* read count */
1117 if(k->writebytecountp)
1118 *k->writebytecountp = k->writebytecount; /* write count */
1121 /* no read no write, this is a timeout? */
1122 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1123 /* This should allow some time for the header to arrive, but only a
1124 very short time as otherwise it'll be too much wasted time too
1127 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1129 Therefore, when a client sends this header field to an origin server
1130 (possibly via a proxy) from which it has never seen a 100 (Continue)
1131 status, the client SHOULD NOT wait for an indefinite period before
1132 sending the request body.
1136 long ms = Curl_tvdiff(k->now, k->start100);
1137 if(ms >= data->set.expect_100_timeout) {
1138 /* we've waited long enough, continue anyway */
1139 k->exp100 = EXP100_SEND_DATA;
1140 k->keepon |= KEEP_SEND;
1141 infof(data, "Done waiting for 100-continue\n");
1146 if(Curl_pgrsUpdate(conn))
1147 result = CURLE_ABORTED_BY_CALLBACK;
1149 result = Curl_speedcheck(data, k->now);
1154 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1156 failf(data, "Operation timed out after %ld milliseconds with %"
1157 CURL_FORMAT_CURL_OFF_T " out of %"
1158 CURL_FORMAT_CURL_OFF_T " bytes received",
1159 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1163 failf(data, "Operation timed out after %ld milliseconds with %"
1164 CURL_FORMAT_CURL_OFF_T " bytes received",
1165 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1167 return CURLE_OPERATION_TIMEDOUT;
1172 * The transfer has been performed. Just make some general checks before
1176 if(!(data->set.opt_no_body) && (k->size != -1) &&
1177 (k->bytecount != k->size) &&
1178 #ifdef CURL_DO_LINEEND_CONV
1179 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1180 so we'll check to see if the discrepancy can be explained
1181 by the number of CRLFs we've changed to LFs.
1183 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1184 #endif /* CURL_DO_LINEEND_CONV */
1185 !data->req.newurl) {
1186 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1187 " bytes remaining to read",
1188 k->size - k->bytecount);
1189 return CURLE_PARTIAL_FILE;
1191 else if(!(data->set.opt_no_body) &&
1193 (conn->chunk.state != CHUNK_STOP)) {
1195 * In chunked mode, return an error if the connection is closed prior to
1196 * the empty (terminating) chunk is read.
1198 * The condition above used to check for
1199 * conn->proto.http->chunk.datasize != 0 which is true after reading
1200 * *any* chunk, not just the empty chunk.
1203 failf(data, "transfer closed with outstanding read data remaining");
1204 return CURLE_PARTIAL_FILE;
1206 if(Curl_pgrsUpdate(conn))
1207 return CURLE_ABORTED_BY_CALLBACK;
1210 /* Now update the "done" boolean we return */
1211 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1212 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1218 * Curl_single_getsock() gets called by the multi interface code when the app
1219 * has requested to get the sockets for the current connection. This function
1220 * will then be called once for every connection that the multi interface
1221 * keeps track of. This function will only be called for connections that are
1222 * in the proper state to have this information available.
1224 int Curl_single_getsock(const struct connectdata *conn,
1225 curl_socket_t *sock, /* points to numsocks number
1229 const struct Curl_easy *data = conn->data;
1230 int bitmap = GETSOCK_BLANK;
1231 unsigned sockindex = 0;
1233 if(conn->handler->perform_getsock)
1234 return conn->handler->perform_getsock(conn, sock, numsocks);
1237 /* simple check but we might need two slots */
1238 return GETSOCK_BLANK;
1240 /* don't include HOLD and PAUSE connections */
1241 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1243 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1245 bitmap |= GETSOCK_READSOCK(sockindex);
1246 sock[sockindex] = conn->sockfd;
1249 /* don't include HOLD and PAUSE connections */
1250 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1252 if((conn->sockfd != conn->writesockfd) ||
1253 bitmap == GETSOCK_BLANK) {
1254 /* only if they are not the same socket and we have a readable
1255 one, we increase index */
1256 if(bitmap != GETSOCK_BLANK)
1257 sockindex++; /* increase index if we need two entries */
1259 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1261 sock[sockindex] = conn->writesockfd;
1264 bitmap |= GETSOCK_WRITESOCK(sockindex);
1270 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1271 which means this gets called once for each subsequent redirect etc */
1272 void Curl_init_CONNECT(struct Curl_easy *data)
1274 data->state.fread_func = data->set.fread_func_set;
1275 data->state.in = data->set.in_set;
1279 * Curl_pretransfer() is called immediately before a transfer starts, and only
1280 * once for one transfer no matter if it has redirects or do multi-pass
1281 * authentication etc.
1283 CURLcode Curl_pretransfer(struct Curl_easy *data)
1286 if(!data->change.url) {
1287 /* we can't do anything without URL */
1288 failf(data, "No URL set!");
1289 return CURLE_URL_MALFORMAT;
1292 /* Init the SSL session ID cache here. We do it here since we want to do it
1293 after the *_setopt() calls (that could specify the size of the cache) but
1294 before any transfer takes place. */
1295 result = Curl_ssl_initsessions(data, data->set.ssl.max_ssl_sessions);
1299 data->set.followlocation=0; /* reset the location-follow counter */
1300 data->state.this_is_a_follow = FALSE; /* reset this */
1301 data->state.errorbuf = FALSE; /* no error has occurred */
1302 data->state.httpversion = 0; /* don't assume any particular server version */
1304 data->state.authproblem = FALSE;
1305 data->state.authhost.want = data->set.httpauth;
1306 data->state.authproxy.want = data->set.proxyauth;
1307 Curl_safefree(data->info.wouldredirect);
1308 data->info.wouldredirect = NULL;
1310 if(data->set.httpreq == HTTPREQ_PUT)
1311 data->state.infilesize = data->set.filesize;
1313 data->state.infilesize = data->set.postfieldsize;
1315 /* If there is a list of cookie files to read, do it now! */
1316 if(data->change.cookielist)
1317 Curl_cookie_loadfiles(data);
1319 /* If there is a list of host pairs to deal with */
1320 if(data->change.resolve)
1321 result = Curl_loadhostpairs(data);
1324 /* Allow data->set.use_port to set which port to use. This needs to be
1325 * disabled for example when we follow Location: headers to URLs using
1326 * different ports! */
1327 data->state.allow_port = TRUE;
1329 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1330 /*************************************************************
1331 * Tell signal handler to ignore SIGPIPE
1332 *************************************************************/
1333 if(!data->set.no_signal)
1334 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1337 Curl_initinfo(data); /* reset session-specific information "variables" */
1338 Curl_pgrsResetTimesSizes(data);
1339 Curl_pgrsStartNow(data);
1341 if(data->set.timeout)
1342 Curl_expire(data, data->set.timeout);
1344 if(data->set.connecttimeout)
1345 Curl_expire(data, data->set.connecttimeout);
1347 /* In case the handle is re-used and an authentication method was picked
1348 in the session we need to make sure we only use the one(s) we now
1349 consider to be fine */
1350 data->state.authhost.picked &= data->state.authhost.want;
1351 data->state.authproxy.picked &= data->state.authproxy.want;
1353 if(data->set.wildcardmatch) {
1354 struct WildcardData *wc = &data->wildcard;
1356 result = Curl_wildcard_init(wc); /* init wildcard structures */
1358 return CURLE_OUT_OF_MEMORY;
1368 * Curl_posttransfer() is called immediately after a transfer ends
1370 CURLcode Curl_posttransfer(struct Curl_easy *data)
1372 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1373 /* restore the signal handler for SIGPIPE before we get back */
1374 if(!data->set.no_signal)
1375 signal(SIGPIPE, data->state.prev_signal);
1377 (void)data; /* unused parameter */
1383 #ifndef CURL_DISABLE_HTTP
1385 * strlen_url() returns the length of the given URL if the spaces within the
1386 * URL were properly URL encoded.
1388 static size_t strlen_url(const char *url)
1390 const unsigned char *ptr;
1392 bool left=TRUE; /* left side of the ? */
1394 for(ptr=(unsigned char *)url; *ptr; ptr++) {
1415 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1416 * the source URL accordingly.
1418 static void strcpy_url(char *output, const char *url)
1420 /* we must add this with whitespace-replacing */
1422 const unsigned char *iptr;
1423 char *optr = output;
1424 for(iptr = (unsigned char *)url; /* read from here */
1425 *iptr; /* until zero byte */
1433 snprintf(optr, 4, "%%%02x", *iptr);
1441 *optr++='%'; /* add a '%' */
1442 *optr++='2'; /* add a '2' */
1443 *optr++='0'; /* add a '0' */
1446 *optr++='+'; /* add a '+' here */
1450 *optr=0; /* zero terminate output buffer */
1455 * Returns true if the given URL is absolute (as opposed to relative)
1457 static bool is_absolute_url(const char *url)
1459 char prot[16]; /* URL protocol string storage */
1460 char letter; /* used for a silly sscanf */
1462 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1466 * Concatenate a relative URL to a base URL making it absolute.
1467 * URL-encodes any spaces.
1468 * The returned pointer must be freed by the caller unless NULL
1469 * (returns NULL on out of memory).
1471 static char *concat_url(const char *base, const char *relurl)
1474 TRY to append this new path to the old URL
1475 to the right of the host part. Oh crap, this is doomed to cause
1476 problems in the future...
1483 const char *useurl = relurl;
1486 /* we must make our own copy of the URL to play with, as it may
1487 point to read-only data */
1488 char *url_clone=strdup(base);
1491 return NULL; /* skip out of this NOW */
1493 /* protsep points to the start of the host name */
1494 protsep=strstr(url_clone, "//");
1498 protsep+=2; /* pass the slashes */
1500 if('/' != relurl[0]) {
1503 /* First we need to find out if there's a ?-letter in the URL,
1504 and cut it and the right-side of that off */
1505 pathsep = strchr(protsep, '?');
1509 /* we have a relative path to append to the last slash if there's one
1510 available, or if the new URL is just a query string (starts with a
1511 '?') we append the new one at the end of the entire currently worked
1513 if(useurl[0] != '?') {
1514 pathsep = strrchr(protsep, '/');
1519 /* Check if there's any slash after the host name, and if so, remember
1520 that position instead */
1521 pathsep = strchr(protsep, '/');
1523 protsep = pathsep+1;
1527 /* now deal with one "./" or any amount of "../" in the newurl
1528 and act accordingly */
1530 if((useurl[0] == '.') && (useurl[1] == '/'))
1531 useurl+=2; /* just skip the "./" */
1533 while((useurl[0] == '.') &&
1534 (useurl[1] == '.') &&
1535 (useurl[2] == '/')) {
1537 useurl+=3; /* pass the "../" */
1542 /* cut off one more level from the right of the original URL */
1543 pathsep = strrchr(protsep, '/');
1554 /* We got a new absolute path for this server */
1556 if((relurl[0] == '/') && (relurl[1] == '/')) {
1557 /* the new URL starts with //, just keep the protocol part from the
1560 useurl = &relurl[2]; /* we keep the slashes from the original, so we
1561 skip the new ones */
1564 /* cut off the original URL from the first slash, or deal with URLs
1566 pathsep = strchr(protsep, '/');
1568 /* When people use badly formatted URLs, such as
1569 "http://www.url.com?dir=/home/daniel" we must not use the first
1570 slash, if there's a ?-letter before it! */
1571 char *sep = strchr(protsep, '?');
1572 if(sep && (sep < pathsep))
1577 /* There was no slash. Now, since we might be operating on a badly
1578 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1579 use a slash separator as it is supposed to, we need to check for a
1580 ?-letter as well! */
1581 pathsep = strchr(protsep, '?');
1588 /* If the new part contains a space, this is a mighty stupid redirect
1589 but we still make an effort to do "right". To the left of a '?'
1590 letter we replace each space with %20 while it is replaced with '+'
1591 on the right side of the '?' letter.
1593 newlen = strlen_url(useurl);
1595 urllen = strlen(url_clone);
1597 newest = malloc(urllen + 1 + /* possible slash */
1598 newlen + 1 /* zero byte */);
1601 free(url_clone); /* don't leak this */
1605 /* copy over the root url part */
1606 memcpy(newest, url_clone, urllen);
1608 /* check if we need to append a slash */
1609 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1612 newest[urllen++]='/';
1614 /* then append the new piece on the right side */
1615 strcpy_url(&newest[urllen], useurl);
1621 #endif /* CURL_DISABLE_HTTP */
1624 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1625 * as given by the remote server and set up the new URL to request.
1627 CURLcode Curl_follow(struct Curl_easy *data,
1628 char *newurl, /* this 'newurl' is the Location: string,
1629 and it must be malloc()ed before passed
1631 followtype type) /* see transfer.h */
1633 #ifdef CURL_DISABLE_HTTP
1637 /* Location: following will not happen when HTTP is disabled */
1638 return CURLE_TOO_MANY_REDIRECTS;
1641 /* Location: redirect */
1642 bool disallowport = FALSE;
1644 if(type == FOLLOW_REDIR) {
1645 if((data->set.maxredirs != -1) &&
1646 (data->set.followlocation >= data->set.maxredirs)) {
1647 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1648 return CURLE_TOO_MANY_REDIRECTS;
1651 /* mark the next request as a followed location: */
1652 data->state.this_is_a_follow = TRUE;
1654 data->set.followlocation++; /* count location-followers */
1656 if(data->set.http_auto_referer) {
1657 /* We are asked to automatically set the previous URL as the referer
1658 when we get the next URL. We pick the ->url field, which may or may
1659 not be 100% correct */
1661 if(data->change.referer_alloc) {
1662 Curl_safefree(data->change.referer);
1663 data->change.referer_alloc = FALSE;
1666 data->change.referer = strdup(data->change.url);
1667 if(!data->change.referer)
1668 return CURLE_OUT_OF_MEMORY;
1669 data->change.referer_alloc = TRUE; /* yes, free this later */
1673 if(!is_absolute_url(newurl)) {
1675 *DANG* this is an RFC 2068 violation. The URL is supposed
1676 to be absolute and this doesn't seem to be that!
1678 char *absolute = concat_url(data->change.url, newurl);
1680 return CURLE_OUT_OF_MEMORY;
1685 /* The new URL MAY contain space or high byte values, that means a mighty
1686 stupid redirect URL but we still make an effort to do "right". */
1688 size_t newlen = strlen_url(newurl);
1690 /* This is an absolute URL, don't allow the custom port number */
1691 disallowport = TRUE;
1693 newest = malloc(newlen+1); /* get memory for this */
1695 return CURLE_OUT_OF_MEMORY;
1696 strcpy_url(newest, newurl); /* create a space-free URL */
1698 free(newurl); /* that was no good */
1699 newurl = newest; /* use this instead now */
1703 if(type == FOLLOW_FAKE) {
1704 /* we're only figuring out the new url if we would've followed locations
1705 but now we're done so we can get out! */
1706 data->info.wouldredirect = newurl;
1711 data->state.allow_port = FALSE;
1713 if(data->change.url_alloc) {
1714 Curl_safefree(data->change.url);
1715 data->change.url_alloc = FALSE;
1718 data->change.url = newurl;
1719 data->change.url_alloc = TRUE;
1720 newurl = NULL; /* don't free! */
1722 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1725 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1726 * differently based on exactly what return code there was.
1728 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1729 * a HTTP (proxy-) authentication scheme other than Basic.
1731 switch(data->info.httpcode) {
1732 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1733 Authorization: XXXX header in the HTTP request code snippet */
1734 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1735 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1736 /* 300 - Multiple Choices */
1737 /* 306 - Not used */
1738 /* 307 - Temporary Redirect */
1739 default: /* for all above (and the unknown ones) */
1740 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1741 * seem to be OK to POST to.
1744 case 301: /* Moved Permanently */
1745 /* (quote from RFC7231, section 6.4.2)
1747 * Note: For historical reasons, a user agent MAY change the request
1748 * method from POST to GET for the subsequent request. If this
1749 * behavior is undesired, the 307 (Temporary Redirect) status code
1750 * can be used instead.
1754 * Many webservers expect this, so these servers often answers to a POST
1755 * request with an error page. To be sure that libcurl gets the page that
1756 * most user agents would get, libcurl has to force GET.
1758 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1759 * can be overridden with CURLOPT_POSTREDIR.
1761 if((data->set.httpreq == HTTPREQ_POST
1762 || data->set.httpreq == HTTPREQ_POST_FORM)
1763 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1764 infof(data, "Switch from POST to GET\n");
1765 data->set.httpreq = HTTPREQ_GET;
1768 case 302: /* Found */
1769 /* (quote from RFC7231, section 6.4.3)
1771 * Note: For historical reasons, a user agent MAY change the request
1772 * method from POST to GET for the subsequent request. If this
1773 * behavior is undesired, the 307 (Temporary Redirect) status code
1774 * can be used instead.
1778 * Many webservers expect this, so these servers often answers to a POST
1779 * request with an error page. To be sure that libcurl gets the page that
1780 * most user agents would get, libcurl has to force GET.
1782 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1783 * can be overridden with CURLOPT_POSTREDIR.
1785 if((data->set.httpreq == HTTPREQ_POST
1786 || data->set.httpreq == HTTPREQ_POST_FORM)
1787 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1788 infof(data, "Switch from POST to GET\n");
1789 data->set.httpreq = HTTPREQ_GET;
1793 case 303: /* See Other */
1794 /* Disable both types of POSTs, unless the user explicitely
1795 asks for POST after POST */
1796 if(data->set.httpreq != HTTPREQ_GET
1797 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1798 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1799 infof(data, "Disables POST, goes with %s\n",
1800 data->set.opt_no_body?"HEAD":"GET");
1803 case 304: /* Not Modified */
1804 /* 304 means we did a conditional request and it was "Not modified".
1805 * We shouldn't get any Location: header in this response!
1808 case 305: /* Use Proxy */
1809 /* (quote from RFC2616, section 10.3.6):
1810 * "The requested resource MUST be accessed through the proxy given
1811 * by the Location field. The Location field gives the URI of the
1812 * proxy. The recipient is expected to repeat this single request
1813 * via the proxy. 305 responses MUST only be generated by origin
1818 Curl_pgrsTime(data, TIMER_REDIRECT);
1819 Curl_pgrsResetTimesSizes(data);
1822 #endif /* CURL_DISABLE_HTTP */
1825 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1827 NOTE: that the *url is malloc()ed. */
1828 CURLcode Curl_retry_request(struct connectdata *conn,
1831 struct Curl_easy *data = conn->data;
1835 /* if we're talking upload, we can't do the checks below, unless the protocol
1836 is HTTP as when uploading over HTTP we will still get a response */
1837 if(data->set.upload &&
1838 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1841 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1843 (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1844 /* We didn't get a single byte when we attempted to re-use a
1845 connection. This might happen if the connection was left alive when we
1846 were done using it before, but that was closed when we wanted to use it
1847 again. Bad luck. Retry the same request on a fresh connect! */
1848 infof(conn->data, "Connection died, retrying a fresh connect\n");
1849 *url = strdup(conn->data->change.url);
1851 return CURLE_OUT_OF_MEMORY;
1853 connclose(conn, "retry"); /* close this connection */
1854 conn->bits.retry = TRUE; /* mark this as a connection we're about
1855 to retry. Marking it this way should
1856 prevent i.e HTTP transfers to return
1857 error just because nothing has been
1861 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1862 struct HTTP *http = data->req.protop;
1863 if(http->writebytecount)
1864 return Curl_readrewind(conn);
1871 * Curl_setup_transfer() is called to setup some basic properties for the
1872 * upcoming transfer.
1875 Curl_setup_transfer(
1876 struct connectdata *conn, /* connection data */
1877 int sockindex, /* socket index to read from or -1 */
1878 curl_off_t size, /* -1 if unknown at this point */
1879 bool getheader, /* TRUE if header parsing is wanted */
1880 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1881 int writesockindex, /* socket index to write to, it may very well be
1882 the same we read from. -1 disables */
1883 curl_off_t *writecountp /* return number of bytes written or NULL */
1886 struct Curl_easy *data;
1887 struct SingleRequest *k;
1889 DEBUGASSERT(conn != NULL);
1894 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1896 /* now copy all input parameters */
1897 conn->sockfd = sockindex == -1 ?
1898 CURL_SOCKET_BAD : conn->sock[sockindex];
1899 conn->writesockfd = writesockindex == -1 ?
1900 CURL_SOCKET_BAD:conn->sock[writesockindex];
1901 k->getheader = getheader;
1904 k->bytecountp = bytecountp;
1905 k->writebytecountp = writecountp;
1907 /* The code sequence below is placed in this function just because all
1908 necessary input is not always known in do_complete() as this function may
1909 be called after that */
1914 Curl_pgrsSetDownloadSize(data, size);
1916 /* we want header and/or body, if neither then don't do this! */
1917 if(k->getheader || !data->set.opt_no_body) {
1919 if(conn->sockfd != CURL_SOCKET_BAD)
1920 k->keepon |= KEEP_RECV;
1922 if(conn->writesockfd != CURL_SOCKET_BAD) {
1923 struct HTTP *http = data->req.protop;
1926 Even if we require a 100-return code before uploading data, we might
1927 need to write data before that since the REQUEST may not have been
1928 finished sent off just yet.
1930 Thus, we must check if the request has been sent before we set the
1931 state info where we wait for the 100-return code
1933 if((data->state.expect100header) &&
1934 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1935 (http->sending == HTTPSEND_BODY)) {
1936 /* wait with write until we either got 100-continue or a timeout */
1937 k->exp100 = EXP100_AWAITING_CONTINUE;
1938 k->start100 = Curl_tvnow();
1940 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1941 that we don't fire slightly too early and get denied to run. */
1942 Curl_expire(data, data->set.expect_100_timeout);
1945 if(data->state.expect100header)
1946 /* when we've sent off the rest of the headers, we must await a
1947 100-continue but first finish sending the request */
1948 k->exp100 = EXP100_SENDING_REQUEST;
1950 /* enable the write bit when we're not waiting for continue */
1951 k->keepon |= KEEP_SEND;
1953 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
1954 } /* if(k->getheader || !data->set.opt_no_body) */