1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2003, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
22 ***************************************************************************/
26 /* -- WIN32 approved -- */
32 #include <sys/types.h>
39 #if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
44 #ifdef HAVE_SYS_SOCKET_H
45 #include <sys/socket.h>
47 #include <netinet/in.h>
49 #include <sys/resource.h>
54 #ifdef HAVE_ARPA_INET_H
55 #include <arpa/inet.h>
60 #include <sys/ioctl.h>
63 #ifdef HAVE_SYS_PARAM_H
64 #include <sys/param.h>
67 #ifdef HAVE_SYS_SELECT_H
68 #include <sys/select.h>
72 #error "We can't compile without select() support!"
75 #error "We can't compile without socket() support!"
81 #include <curl/curl.h>
82 #include <curl/types.h>
85 #include "content_encoding.h" /* content encoding support. 08/27/02 jhrg */
90 #include "speedcheck.h"
98 #include "http_digest.h"
100 #define _MPRINTF_REPLACE /* use our functions only */
101 #include <curl/mprintf.h>
103 /* The last #include file should be: */
105 #include "memdebug.h"
109 #define min(a, b) ((a) < (b) ? (a) : (b))
112 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
120 /* We keep this static and global since this is read-only and NEVER
121 changed. It should just remain a blanked-out timeout value. */
122 static struct timeval notimeout={0,0};
125 * This function will call the read callback to fill our buffer with data
128 static int fillbuffer(struct connectdata *conn,
131 int buffersize = bytes;
134 if(conn->bits.upload_chunky) {
135 /* if chunked Transfer-Encoding */
136 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
137 conn->upload_fromhere += 10; /* 32bit hex + CRLF */
140 nread = conn->fread(conn->upload_fromhere, 1,
141 buffersize, conn->fread_in);
143 if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
144 /* if chunked Transfer-Encoding */
146 int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
148 /* move buffer pointer */
149 conn->upload_fromhere -= hexlen;
152 /* copy the prefix to the buffer */
153 memcpy(conn->upload_fromhere, hexbuffer, hexlen);
155 /* append CRLF to the data */
156 memcpy(conn->upload_fromhere +
161 /* mark this as done once this chunk is transfered */
162 conn->keep.upload_done = TRUE;
171 * Returns TRUE if member of the list matches prefix of string
174 checkhttpprefix(struct SessionHandle *data,
177 struct curl_slist *head = data->set.http200aliases;
180 if (checkprefix(head->data, s))
185 if(checkprefix("HTTP/", s))
191 CURLcode Curl_readwrite(struct connectdata *conn,
194 struct Curl_transfer_keeper *k = &conn->keep;
195 struct SessionHandle *data = conn->data;
197 ssize_t nread; /* number of bytes read */
200 /* These two are used only if no other select() or _fdset() have been
201 invoked before this. This typicly happens if you use the multi interface
202 and call curl_multi_perform() without calling curl_multi_fdset()
207 fd_set *readfdp = k->readfdp;
208 fd_set *writefdp = k->writefdp;
210 if((k->keepon & KEEP_READ) && !readfdp) {
211 /* reading is requested, but no socket descriptor pointer was set */
212 FD_ZERO(&extrareadfd);
213 FD_SET(conn->sockfd, &extrareadfd);
214 readfdp = &extrareadfd;
216 /* no write, no exceptions, no timeout */
217 select(conn->sockfd+1, readfdp, NULL, NULL, ¬imeout);
219 if((k->keepon & KEEP_WRITE) && !writefdp) {
220 /* writing is requested, but no socket descriptor pointer was set */
221 FD_ZERO(&extrawritefd);
222 FD_SET(conn->writesockfd, &extrawritefd);
223 writefdp = &extrawritefd;
225 /* no read, no exceptions, no timeout */
226 select(conn->writesockfd+1, NULL, writefdp, NULL, ¬imeout);
230 /* If we still have reading to do, we check if we have a readable
231 socket. Sometimes the reafdp is NULL, it no fd_set was done using
232 the multi interface and then we can do nothing but to attempt a
234 if((k->keepon & KEEP_READ) &&
235 (FD_ISSET(conn->sockfd, readfdp))) {
237 bool readdone = TRUE;
239 /* This is where we loop until we have read everything there is to
240 read or we get a EWOULDBLOCK */
242 int buffersize = data->set.buffer_size?
243 data->set.buffer_size:BUFSIZE -1;
245 /* receive data from the network! */
246 result = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
249 break; /* get out of loop */
253 if ((k->bytecount == 0) && (k->writebytecount == 0)) {
254 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
255 if(k->wait100_after_headers)
256 /* set time stamp to compare with when waiting for the 100 */
257 k->start100 = Curl_tvnow();
260 didwhat |= KEEP_READ;
262 /* NULL terminate, allowing string ops to be used */
266 /* if we receive 0 or less here, the server closed the connection and
267 we bail out from this! */
268 else if (0 >= nread) {
269 k->keepon &= ~KEEP_READ;
270 FD_ZERO(&k->rkeepfd);
275 /* Default buffer to use when we write the buffer, it may be changed
276 in the flow below before the actual storing is done. */
279 /* Since this is a two-state thing, we check if we are parsing
280 headers at the moment or not. */
282 /* we are in parse-the-header-mode */
283 bool stop_reading = FALSE;
285 /* header line within buffer loop */
289 /* str_start is start of line within buf */
290 k->str_start = k->str;
292 k->end_ptr = strchr (k->str_start, '\n');
295 /* Not a complete header line within buffer, append the data to
296 the end of the headerbuff. */
298 if (k->hbuflen + nread >= data->state.headersize) {
299 /* We enlarge the header buffer as it is too small */
301 long newsize=MAX((k->hbuflen+nread)*3/2,
302 data->state.headersize*2);
303 hbufp_index = k->hbufp - data->state.headerbuff;
304 newbuff = (char *)realloc(data->state.headerbuff, newsize);
306 failf (data, "Failed to alloc memory for big header!");
307 return CURLE_OUT_OF_MEMORY;
309 data->state.headersize=newsize;
310 data->state.headerbuff = newbuff;
311 k->hbufp = data->state.headerbuff + hbufp_index;
313 memcpy(k->hbufp, k->str, nread);
316 if (!k->headerline && (k->hbuflen>5)) {
317 /* make a first check that this looks like a HTTP header */
318 if(!checkhttpprefix(data, data->state.headerbuff)) {
319 /* this is not the beginning of a HTTP first header line */
321 k->badheader = HEADER_ALLBAD;
326 break; /* read more and try again */
329 /* decrease the size of the remaining buffer */
330 nread -= (k->end_ptr - k->str)+1;
332 k->str = k->end_ptr + 1; /* move past new line */
335 * We're about to copy a chunk of data to the end of the
336 * already received header. We make sure that the full string
337 * fit in the allocated header buffer, or else we enlarge
340 if (k->hbuflen + (k->str - k->str_start) >=
341 data->state.headersize) {
343 long newsize=MAX((k->hbuflen+
344 (k->str-k->str_start))*3/2,
345 data->state.headersize*2);
346 hbufp_index = k->hbufp - data->state.headerbuff;
347 newbuff = (char *)realloc(data->state.headerbuff, newsize);
349 failf (data, "Failed to alloc memory for big header!");
350 return CURLE_OUT_OF_MEMORY;
352 data->state.headersize= newsize;
353 data->state.headerbuff = newbuff;
354 k->hbufp = data->state.headerbuff + hbufp_index;
357 /* copy to end of line */
358 strncpy (k->hbufp, k->str_start, k->str - k->str_start);
359 k->hbufp += k->str - k->str_start;
360 k->hbuflen += k->str - k->str_start;
363 k->p = data->state.headerbuff;
366 * We now have a FULL header line that p points to
370 /* the first read header */
372 !checkhttpprefix(data, data->state.headerbuff)) {
373 /* this is not the beginning of a HTTP first header line */
375 k->badheader = HEADER_PARTHEADER;
380 if (('\n' == *k->p) || ('\r' == *k->p)) {
382 /* Zero-length header line means end of headers! */
385 k->p++; /* pass the \r byte */
387 k->p++; /* pass the \n byte */
389 if(100 == k->httpcode) {
391 * We have made a HTTP PUT or POST and this is 1.1-lingo
392 * that tells us that the server is OK with this and ready
393 * to receive the data.
394 * However, we'll get more headers now so we must get
395 * back into the header-parsing state!
398 k->headerline = 0; /* restart the header line counter */
399 /* if we did wait for this do enable write now! */
400 if (k->write_after_100_header) {
402 k->write_after_100_header = FALSE;
403 FD_SET (conn->writesockfd, &k->writefd); /* write */
404 k->keepon |= KEEP_WRITE;
405 k->wkeepfd = k->writefd;
409 k->header = FALSE; /* no more header to parse! */
411 if (417 == k->httpcode) {
413 * we got: "417 Expectation Failed" this means:
414 * we have made a HTTP call and our Expect Header
415 * seems to cause a problem => abort the write operations
416 * (or prevent them from starting).
418 k->write_after_100_header = FALSE;
419 k->keepon &= ~KEEP_WRITE;
420 FD_ZERO(&k->wkeepfd);
423 /* now, only output this if the header AND body are requested:
425 k->writetype = CLIENTWRITE_HEADER;
426 if (data->set.http_include_header)
427 k->writetype |= CLIENTWRITE_BODY;
429 headerlen = k->p - data->state.headerbuff;
431 result = Curl_client_write(data, k->writetype,
432 data->state.headerbuff,
437 data->info.header_size += headerlen;
438 conn->headerbytecount += headerlen;
442 * really end-of-headers.
444 * If we requested a "no body", this is a good time to get
445 * out and return home.
447 if(data->set.no_body)
449 else if(!conn->bits.close) {
450 /* If this is not the last request before a close, we must
451 set the maximum download size to the size of the
452 expected document or else, we won't know when to stop
455 conn->maxdownload = conn->size;
457 /* If max download size is *zero* (nothing) we already
458 have nothing and can safely return ok now! */
459 if(0 == conn->maxdownload)
463 /* we make sure that this socket isn't read more now */
464 k->keepon &= ~KEEP_READ;
465 FD_ZERO(&k->rkeepfd);
468 break; /* exit header line loop */
471 /* We continue reading headers, so reset the line-based
472 header parsing variables hbufp && hbuflen */
473 k->hbufp = data->state.headerbuff;
479 * Checks for special headers coming up.
482 if (!k->headerline++) {
483 /* This is the first header, it MUST be the error code line
484 or else we consiser this to be the body right away! */
485 int httpversion_major;
486 int nc=sscanf (k->p, " HTTP/%d.%d %3d",
491 k->httpversion += 10 * httpversion_major;
494 /* this is the real world, not a Nirvana
495 NCSA 1.5.x returns this crap when asked for HTTP/1.1
497 nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
500 /* If user has set option HTTP200ALIASES,
501 compare header line against list of aliases
504 if (checkhttpprefix(data, k->p)) {
508 (data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
514 data->info.httpcode = k->httpcode;
515 data->info.httpversion = k->httpversion;
517 /* 404 -> URL not found! */
518 if (data->set.http_fail_on_error &&
519 (k->httpcode >= 400)) {
520 /* If we have been told to fail hard on HTTP-errors,
521 here is the check for that: */
522 /* serious error, go home! */
523 failf (data, "The requested file was not found");
524 return CURLE_HTTP_RETURNED_ERROR;
527 if(k->httpversion == 10)
528 /* Default action for HTTP/1.0 must be to close, unless
529 we get one of those fancy headers that tell us the
530 server keeps it open for us! */
531 conn->bits.close = TRUE;
533 switch(k->httpcode) {
535 /* (quote from RFC2616, section 10.2.5): The server has
536 * fulfilled the request but does not need to return an
537 * entity-body ... The 204 response MUST NOT include a
538 * message-body, and thus is always terminated by the first
539 * empty line after the header fields. */
542 /* (quote from RFC2616, section 10.3.5): The 304 response MUST
543 * NOT contain a message-body, and thus is always terminated
544 * by the first empty line after the header fields. */
554 k->header = FALSE; /* this is not a header line */
559 /* check for Content-Length: header lines to get size */
560 if (checkprefix("Content-Length:", k->p) &&
561 sscanf (k->p+15, " %ld", &k->contentlength)) {
562 conn->size = k->contentlength;
563 Curl_pgrsSetDownloadSize(data, k->contentlength);
565 /* check for Content-Type: header lines to get the mime-type */
566 else if (checkprefix("Content-Type:", k->p)) {
571 /* Find the first non-space letter */
573 *start && isspace((int)*start);
576 end = strchr(start, '\r');
578 end = strchr(start, '\n');
581 /* skip all trailing space letters */
582 for(; isspace((int)*end) && (end > start); end--);
584 /* get length of the type */
587 /* allocate memory of a cloned copy */
588 if(data->info.contenttype)
589 free(data->info.contenttype);
591 data->info.contenttype = malloc(len + 1);
592 if (NULL == data->info.contenttype)
593 return CURLE_OUT_OF_MEMORY;
595 /* copy the content-type string */
596 memcpy(data->info.contenttype, start, len);
597 data->info.contenttype[len] = 0; /* zero terminate */
600 else if((k->httpversion == 10) &&
601 conn->bits.httpproxy &&
602 Curl_compareheader(k->p,
603 "Proxy-Connection:", "keep-alive")) {
605 * When a HTTP/1.0 reply comes when using a proxy, the
606 * 'Proxy-Connection: keep-alive' line tells us the
607 * connection will be kept alive for our pleasure.
608 * Default action for 1.0 is to close.
610 conn->bits.close = FALSE; /* don't close when done */
611 infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
613 else if((k->httpversion == 10) &&
614 Curl_compareheader(k->p, "Connection:", "keep-alive")) {
616 * A HTTP/1.0 reply with the 'Connection: keep-alive' line
617 * tells us the connection will be kept alive for our
618 * pleasure. Default action for 1.0 is to close.
620 * [RFC2068, section 19.7.1] */
621 conn->bits.close = FALSE; /* don't close when done */
622 infof(data, "HTTP/1.0 connection set to keep alive!\n");
624 else if (Curl_compareheader(k->p, "Connection:", "close")) {
626 * [RFC 2616, section 8.1.2.1]
627 * "Connection: close" is HTTP/1.1 language and means that
628 * the connection will close when this request has been
631 conn->bits.close = TRUE; /* close when done */
633 else if (Curl_compareheader(k->p,
634 "Transfer-Encoding:", "chunked")) {
636 * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
637 * means that the server will send a series of "chunks". Each
638 * chunk starts with line with info (including size of the
639 * coming block) (terminated with CRLF), then a block of data
640 * with the previously mentioned size. There can be any amount
641 * of chunks, and a chunk-data set to zero signals the
643 conn->bits.chunk = TRUE; /* chunks coming our way */
645 /* init our chunky engine */
646 Curl_httpchunk_init(conn);
648 else if (checkprefix("Content-Encoding:", k->p) &&
649 data->set.encoding) {
651 * Process Content-Encoding. Look for the values: identity,
652 * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
653 * x-compress are the same as gzip and compress. (Sec 3.5 RFC
654 * 2616). zlib cannot handle compress. However, errors are
655 * handled further down when the response body is processed
659 /* Find the first non-space letter */
661 *start && isspace((int)*start);
664 /* Record the content-encoding for later use. 08/27/02 jhrg */
665 if (checkprefix("identity", start))
666 k->content_encoding = IDENTITY;
667 else if (checkprefix("deflate", start))
668 k->content_encoding = DEFLATE;
669 else if (checkprefix("gzip", start)
670 || checkprefix("x-gzip", start))
671 k->content_encoding = GZIP;
672 else if (checkprefix("compress", start)
673 || checkprefix("x-compress", start))
674 k->content_encoding = COMPRESS;
676 else if (checkprefix("Content-Range:", k->p)) {
677 if (sscanf (k->p+14, " bytes %d-", &k->offset) ||
678 sscanf (k->p+14, " bytes: %d-", &k->offset)) {
679 /* This second format was added August 1st 2000 by Igor
680 Khristophorov since Sun's webserver JavaWebServer/1.1.1
681 obviously sends the header this way! :-( */
682 if (conn->resume_from == k->offset) {
683 /* we asked for a resume and we got it */
684 k->content_range = TRUE;
688 else if(data->cookies &&
689 checkprefix("Set-Cookie:", k->p)) {
690 Curl_cookie_add(data->cookies, TRUE, k->p+11,
691 /* If there is a custom-set Host: name, use it
692 here, or else use real peer host name. */
693 conn->allocptr.cookiehost?
694 conn->allocptr.cookiehost:conn->name,
697 else if(checkprefix("Last-Modified:", k->p) &&
698 (data->set.timecondition || data->set.get_filetime) ) {
699 time_t secs=time(NULL);
700 k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
702 if(data->set.get_filetime)
703 data->info.filetime = k->timeofdoc;
705 else if(checkprefix("WWW-Authenticate:", k->p) &&
706 (401 == k->httpcode) &&
707 1 /* TODO: replace with a check for Digest authentication
709 CURLdigest dig = Curl_input_digest(conn, k->p+
710 strlen("WWW-Authenticate:"));
711 if(CURLDIGEST_FINE == dig) {
712 /* We act on it. Store our new url, which happens to be
713 the same one we already use! */
714 conn->newurl = strdup(data->change.url); /* clone string */
717 else if ((k->httpcode >= 300 && k->httpcode < 400) &&
718 checkprefix("Location:", k->p)) {
719 if(data->set.http_follow_location) {
720 /* this is the URL that the server advices us to get instead */
725 start += 9; /* pass "Location:" */
727 /* Skip spaces and tabs. We do this to support multiple
728 white spaces after the "Location:" keyword. */
729 while(*start && isspace((int)*start ))
731 ptr = start; /* start scanning here */
733 /* scan through the string to find the end */
734 while(*ptr && !isspace((int)*ptr))
736 backup = *ptr; /* store the ending letter */
738 *ptr = '\0'; /* zero terminate */
739 conn->newurl = strdup(start); /* clone string */
740 *ptr = backup; /* restore ending letter */
743 #if 0 /* for consideration */
745 /* This is a Location: but we have not been instructed to
747 infof(data, "We ignore this location header as instructed\n");
753 * End of header-checks. Write them to the client.
756 k->writetype = CLIENTWRITE_HEADER;
757 if (data->set.http_include_header)
758 k->writetype |= CLIENTWRITE_BODY;
760 if(data->set.verbose)
761 Curl_debug(data, CURLINFO_HEADER_IN,
764 result = Curl_client_write(data, k->writetype, k->p,
769 data->info.header_size += k->hbuflen;
770 conn->headerbytecount += k->hbuflen;
772 /* reset hbufp pointer && hbuflen */
773 k->hbufp = data->state.headerbuff;
776 while (!stop_reading && *k->str); /* header line within buffer */
779 /* We've stopped dealing with input, get out of the do-while loop */
782 /* We might have reached the end of the header part here, but
783 there might be a non-header part left in the end of the read
786 } /* end if header mode */
788 /* This is not an 'else if' since it may be a rest from the header
789 parsing, where the beginning of the buffer is headers and the end
791 if (k->str && !k->header && (nread > 0)) {
793 if(0 == k->bodywrites) {
794 /* These checks are only made the first time we are about to
795 write a piece of the body */
796 if(conn->protocol&PROT_HTTP) {
797 /* HTTP-only checks */
799 /* abort after the headers if "follow Location" is set */
800 infof (data, "Follow to new URL: %s\n", conn->newurl);
801 k->keepon &= ~KEEP_READ;
802 FD_ZERO(&k->rkeepfd);
806 else if (conn->resume_from &&
808 (data->set.httpreq==HTTPREQ_GET)) {
809 /* we wanted to resume a download, although the server
810 doesn't seem to support this and we did this with a GET
811 (if it wasn't a GET we did a POST or PUT resume) */
812 failf (data, "HTTP server doesn't seem to support "
813 "byte ranges. Cannot resume.");
814 return CURLE_HTTP_RANGE_ERROR;
816 else if(data->set.timecondition && !conn->range) {
817 /* A time condition has been set AND no ranges have been
818 requested. This seems to be what chapter 13.3.4 of
819 RFC 2616 defines to be the correct action for a
821 if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
822 switch(data->set.timecondition) {
823 case TIMECOND_IFMODSINCE:
825 if(k->timeofdoc < data->set.timevalue) {
827 "The requested document is not new enough\n");
832 case TIMECOND_IFUNMODSINCE:
833 if(k->timeofdoc > data->set.timevalue) {
835 "The requested document is not old enough\n");
841 } /* two valid time strings */
842 } /* we have a time condition */
845 } /* this is the first time we write a body part */
848 /* pass data to the debug function before it gets "dechunked" */
849 if(data->set.verbose) {
851 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
853 if(k->badheader == HEADER_PARTHEADER)
854 Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
857 Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
860 if(conn->bits.chunk) {
862 * Bless me father for I have sinned. Here comes a chunked
863 * transfer flying and we need to decode this properly. While
864 * the name says read, this function both reads and writes away
865 * the data. The returned 'nread' holds the number of actual
866 * data it wrote to the client. */
868 Curl_httpchunk_read(conn, k->str, nread, &nread);
870 if(CHUNKE_OK < res) {
871 if(CHUNKE_WRITE_ERROR == res) {
872 failf(data, "Failed writing data");
873 return CURLE_WRITE_ERROR;
875 failf(data, "Received problem %d in the chunky parser", res);
876 return CURLE_RECV_ERROR;
878 else if(CHUNKE_STOP == res) {
879 /* we're done reading chunks! */
880 k->keepon &= ~KEEP_READ; /* read no more */
881 FD_ZERO(&k->rkeepfd);
883 /* There are now possibly N number of bytes at the end of the
884 str buffer that weren't written to the client, but we don't
885 care about them right now. */
887 /* If it returned OK, we just keep going */
890 if((-1 != conn->maxdownload) &&
891 (k->bytecount + nread >= conn->maxdownload)) {
892 nread = conn->maxdownload - k->bytecount;
893 if(nread < 0 ) /* this should be unusual */
896 k->keepon &= ~KEEP_READ; /* we're done reading */
897 FD_ZERO(&k->rkeepfd);
900 k->bytecount += nread;
902 Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
904 if(!conn->bits.chunk && (nread || k->badheader)) {
905 /* If this is chunky transfer, it was already written */
908 /* we parsed a piece of data wrongly assuming it was a header
909 and now we output it as body instead */
910 result = Curl_client_write(data, CLIENTWRITE_BODY,
911 data->state.headerbuff,
914 if(k->badheader < HEADER_ALLBAD) {
915 /* This switch handles various content encodings. If there's an
916 error here, be sure to check over the almost identical code
917 in http_chunks.c. 08/29/02 jhrg
918 Make sure that ALL_CONTENT_ENCODINGS contains all the
919 encodings handled here. */
921 switch (k->content_encoding) {
924 /* This is the default when the server sends no
925 Content-Encoding header. See Curl_readwrite_init; the
926 memset() call initializes k->content_encoding to zero.
928 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
934 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
935 result = Curl_unencode_deflate_write(data, k, nread);
939 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
940 result = Curl_unencode_gzip_write(data, k, nread);
943 case COMPRESS: /* FIXME 08/27/02 jhrg */
945 failf (data, "Unrecognized content encoding type. "
946 "libcurl understands `identity', `deflate' and `gzip' "
947 "content encodings.");
948 result = CURLE_BAD_CONTENT_ENCODING;
953 k->badheader = HEADER_NORMAL; /* taken care of now */
959 } /* if (! header and data to read ) */
963 } /* if( read from socket ) */
965 /* If we still have writing to do, we check if we have a writable
966 socket. Sometimes the writefdp is NULL, it no fd_set was done using
967 the multi interface and then we can do nothing but to attempt a
969 if((k->keepon & KEEP_WRITE) &&
970 (FD_ISSET(conn->writesockfd, writefdp)) ) {
974 ssize_t bytes_written;
977 if ((k->bytecount == 0) && (k->writebytecount == 0))
978 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
980 didwhat |= KEEP_WRITE;
983 * We loop here to do the READ and SEND loop until we run out of
984 * data to send or until we get EWOULDBLOCK back
988 /* only read more data if there's no upload data already
989 present in the upload buffer */
990 if(0 == conn->upload_present) {
991 /* init the "upload from here" pointer */
992 conn->upload_fromhere = k->uploadbuf;
994 if(!k->upload_done) {
995 /* HTTP pollution, this should be written nicer to become more
996 protocol agnostic. */
998 if(k->wait100_after_headers &&
999 (conn->proto.http->sending == HTTPSEND_BODY)) {
1000 /* If this call is to send body data, we must take some action:
1001 We have sent off the full HTTP 1.1 request, and we shall now
1002 go into the Expect: 100 state and await such a header */
1003 k->wait100_after_headers = FALSE; /* headers sent */
1004 k->write_after_100_header = TRUE; /* wait for the header */
1005 FD_ZERO (&k->writefd); /* clear it */
1006 k->wkeepfd = k->writefd; /* set the keeper variable */
1007 k->keepon &= ~KEEP_WRITE; /* disable writing */
1008 k->start100 = Curl_tvnow(); /* timeout count starts now */
1009 didwhat &= ~KEEP_WRITE; /* we didn't write anything actually */
1013 nread = fillbuffer(conn, BUFSIZE);
1016 nread = 0; /* we're done uploading/reading */
1018 /* the signed int typecase of nread of for systems that has
1022 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1023 FD_ZERO(&k->wkeepfd);
1028 /* store number of bytes available for upload */
1029 conn->upload_present = nread;
1031 /* convert LF to CRLF if so asked */
1032 if (data->set.crlf) {
1033 if(data->state.scratch == NULL)
1034 data->state.scratch = malloc(2*BUFSIZE);
1035 if(data->state.scratch == NULL) {
1036 failf (data, "Failed to alloc scratch buffer!");
1037 return CURLE_OUT_OF_MEMORY;
1039 for(i = 0, si = 0; i < nread; i++, si++) {
1040 if (conn->upload_fromhere[i] == 0x0a) {
1041 data->state.scratch[si++] = 0x0d;
1042 data->state.scratch[si] = 0x0a;
1045 data->state.scratch[si] = conn->upload_fromhere[i];
1048 /* only perform the special operation if we really did replace
1052 /* upload from the new (replaced) buffer instead */
1053 conn->upload_fromhere = data->state.scratch;
1055 /* set the new amount too */
1056 conn->upload_present = nread;
1061 /* We have a partial buffer left from a previous "round". Use
1062 that instead of reading more data */
1065 /* write to socket (send away data) */
1066 result = Curl_write(conn,
1067 conn->writesockfd, /* socket to send to */
1068 conn->upload_fromhere, /* buffer pointer */
1069 conn->upload_present, /* buffer size */
1070 &bytes_written); /* actually send away */
1074 if(data->set.verbose)
1075 /* show the data before we change the pointer upload_fromhere */
1076 Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
1079 if(conn->upload_present != bytes_written) {
1080 /* we only wrote a part of the buffer (if anything), deal with it! */
1082 /* store the amount of bytes left in the buffer to write */
1083 conn->upload_present -= bytes_written;
1085 /* advance the pointer where to find the buffer when the next send
1087 conn->upload_fromhere += bytes_written;
1089 writedone = TRUE; /* we are done, stop the loop */
1092 /* we've uploaded that buffer now */
1093 conn->upload_fromhere = k->uploadbuf;
1094 conn->upload_present = 0; /* no more bytes left */
1096 if(k->upload_done) {
1097 /* switch off writing, we're done! */
1098 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1099 FD_ZERO(&k->wkeepfd);
1104 k->writebytecount += bytes_written;
1105 Curl_pgrsSetUploadCounter(data, (double)k->writebytecount);
1107 } while(!writedone); /* loop until we're done writing! */
1111 } while(0); /* just to break out from! */
1113 k->now = Curl_tvnow();
1115 /* Update read/write counters */
1116 if(conn->bytecountp)
1117 *conn->bytecountp = k->bytecount; /* read count */
1118 if(conn->writebytecountp)
1119 *conn->writebytecountp = k->writebytecount; /* write count */
1122 /* no read no write, this is a timeout? */
1123 if (k->write_after_100_header) {
1124 /* This should allow some time for the header to arrive, but only a
1125 very short time as otherwise it'll be too much wasted times too
1128 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1130 Therefore, when a client sends this header field to an origin server
1131 (possibly via a proxy) from which it has never seen a 100 (Continue)
1132 status, the client SHOULD NOT wait for an indefinite period before
1133 sending the request body.
1137 int ms = Curl_tvdiff(k->now, k->start100);
1138 if(ms > CURL_TIMEOUT_EXPECT_100) {
1139 /* we've waited long enough, continue anyway */
1140 k->write_after_100_header = FALSE;
1141 FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1142 k->keepon |= KEEP_WRITE;
1143 k->wkeepfd = k->writefd;
1148 if(Curl_pgrsUpdate(conn))
1149 result = CURLE_ABORTED_BY_CALLBACK;
1151 result = Curl_speedcheck (data, k->now);
1155 if (data->set.timeout &&
1156 ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
1157 failf (data, "Operation timed out with %d out of %d bytes received",
1158 k->bytecount, conn->size);
1159 return CURLE_OPERATION_TIMEOUTED;
1164 * The transfer has been performed. Just make some general checks before
1168 if(!(data->set.no_body) && k->contentlength &&
1169 (k->bytecount != k->contentlength) &&
1171 failf(data, "transfer closed with %d bytes remaining to read",
1172 k->contentlength-k->bytecount);
1173 return CURLE_PARTIAL_FILE;
1175 else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
1176 failf(data, "transfer closed with at least %d bytes remaining",
1177 conn->proto.http->chunk.datasize);
1178 return CURLE_PARTIAL_FILE;
1180 if(Curl_pgrsUpdate(conn))
1181 return CURLE_ABORTED_BY_CALLBACK;
1184 /* Now update the "done" boolean we return */
1190 CURLcode Curl_readwrite_init(struct connectdata *conn)
1192 struct SessionHandle *data = conn->data;
1193 struct Curl_transfer_keeper *k = &conn->keep;
1195 /* NB: the content encoding software depends on this initialization of
1196 Curl_transfer_keeper. 08/28/02 jhrg */
1197 memset(k, 0, sizeof(struct Curl_transfer_keeper));
1199 k->start = Curl_tvnow(); /* start time */
1200 k->now = k->start; /* current time is now */
1201 k->header = TRUE; /* assume header */
1202 k->httpversion = -1; /* unknown at this point */
1204 data = conn->data; /* there's the root struct */
1205 k->buf = data->state.buffer;
1206 k->uploadbuf = data->state.uploadbuffer;
1207 k->maxfd = (conn->sockfd>conn->writesockfd?
1208 conn->sockfd:conn->writesockfd)+1;
1209 k->hbufp = data->state.headerbuff;
1211 Curl_pgrsTime(data, TIMER_PRETRANSFER);
1212 Curl_speedinit(data);
1214 Curl_pgrsSetUploadCounter(data, 0);
1215 Curl_pgrsSetDownloadCounter(data, 0);
1217 if (!conn->bits.getheader) {
1220 Curl_pgrsSetDownloadSize(data, conn->size);
1222 /* we want header and/or body, if neither then don't do this! */
1223 if(conn->bits.getheader || !data->set.no_body) {
1225 FD_ZERO (&k->readfd); /* clear it */
1226 if(conn->sockfd != -1) {
1227 FD_SET (conn->sockfd, &k->readfd); /* read socket */
1228 k->keepon |= KEEP_READ;
1231 FD_ZERO (&k->writefd); /* clear it */
1232 if(conn->writesockfd != -1) {
1235 Even if we require a 100-return code before uploading data, we might
1236 need to write data before that since the REQUEST may not have been
1237 finished sent off just yet.
1239 Thus, we must check if the request has been sent before we set the
1240 state info where we wait for the 100-return code
1242 if (data->set.expect100header &&
1243 (conn->proto.http->sending == HTTPSEND_BODY)) {
1244 /* wait with write until we either got 100-continue or a timeout */
1245 k->write_after_100_header = TRUE;
1246 k->start100 = k->start;
1249 if(data->set.expect100header)
1250 /* when we've sent off the rest of the headers, we must await a
1252 k->wait100_after_headers = TRUE;
1253 FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1254 k->keepon |= KEEP_WRITE;
1258 /* get these in backup variables to be able to restore them on each lap in
1259 the select() loop */
1260 k->rkeepfd = k->readfd;
1261 k->wkeepfd = k->writefd;
1268 void Curl_single_fdset(struct connectdata *conn,
1269 fd_set *read_fd_set,
1270 fd_set *write_fd_set,
1274 *max_fd = -1; /* init */
1275 if(conn->keep.keepon & KEEP_READ) {
1276 FD_SET(conn->sockfd, read_fd_set);
1277 *max_fd = conn->sockfd;
1278 conn->keep.readfdp = read_fd_set; /* store the address of the set */
1280 if(conn->keep.keepon & KEEP_WRITE) {
1281 FD_SET(conn->writesockfd, write_fd_set);
1282 if(conn->writesockfd > *max_fd)
1283 *max_fd = conn->writesockfd;
1284 conn->keep.writefdp = write_fd_set; /* store the address of the set */
1286 /* we don't use exceptions, only touch that one to prevent compiler
1288 *exc_fd_set = *exc_fd_set;
1295 * This function is what performs the actual transfer. It is capable of
1296 * doing both ways simultaneously.
1297 * The transfer must already have been setup by a call to Curl_Transfer().
1299 * Note that headers are created in a preallocated buffer of a default size.
1300 * That buffer can be enlarged on demand, but it is never shrinken again.
1302 * Parts of this function was once written by the friendly Mark Butler
1303 * <butlerm@xmission.com>.
1307 Transfer(struct connectdata *conn)
1309 struct SessionHandle *data = conn->data;
1311 struct Curl_transfer_keeper *k = &conn->keep;
1314 Curl_readwrite_init(conn);
1316 if((conn->sockfd == -1) && (conn->writesockfd == -1))
1317 /* nothing to read, nothing to write, we're already OK! */
1320 /* we want header and/or body, if neither then don't do this! */
1321 if(!conn->bits.getheader && data->set.no_body)
1324 k->writefdp = &k->writefd; /* store the address of the set */
1325 k->readfdp = &k->readfd; /* store the address of the set */
1328 struct timeval interval;
1329 k->readfd = k->rkeepfd; /* set these every lap in the loop */
1330 k->writefd = k->wkeepfd;
1331 interval.tv_sec = 1;
1332 interval.tv_usec = 0;
1334 switch (select (k->maxfd, k->readfdp, k->writefdp, NULL, &interval)) {
1335 case -1: /* select() error, stop reading */
1337 /* The EINTR is not serious, and it seems you might get this more
1338 ofen when using the lib in a multi-threaded environment! */
1343 done = TRUE; /* no more read or write */
1345 case 0: /* timeout */
1346 result = Curl_readwrite(conn, &done);
1349 default: /* readable descriptors */
1350 result = Curl_readwrite(conn, &done);
1356 /* "done" signals to us if the transfer(s) are ready */
1362 CURLcode Curl_pretransfer(struct SessionHandle *data)
1364 if(!data->change.url)
1365 /* we can't do anything wihout URL */
1366 return CURLE_URL_MALFORMAT;
1369 /* Init the SSL session ID cache here. We do it here since we want to
1370 do it after the *_setopt() calls (that could change the size) but
1371 before any transfer. */
1372 Curl_SSL_InitSessions(data, data->set.ssl.numsessions);
1375 data->set.followlocation=0; /* reset the location-follow counter */
1376 data->state.this_is_a_follow = FALSE; /* reset this */
1377 data->state.errorbuf = FALSE; /* no error has occurred */
1379 /* If there was a list of cookie files to read and we haven't done it before,
1381 if(data->change.cookielist) {
1382 struct curl_slist *list = data->change.cookielist;
1384 data->cookies = Curl_cookie_init(list->data,
1386 data->set.cookiesession);
1389 curl_slist_free_all(data->change.cookielist); /* clean up list */
1390 data->change.cookielist = NULL; /* don't do this again! */
1395 /* Allow data->set.use_port to set which port to use. This needs to be
1396 * disabled for example when we follow Location: headers to URLs using
1397 * different ports! */
1398 data->state.allow_port = TRUE;
1400 #if defined(HAVE_SIGNAL) && defined(SIGPIPE)
1401 /*************************************************************
1402 * Tell signal handler to ignore SIGPIPE
1403 *************************************************************/
1404 if(!data->set.no_signal)
1405 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1408 Curl_initinfo(data); /* reset session-specific information "variables" */
1409 Curl_pgrsStartNow(data);
1414 CURLcode Curl_posttransfer(struct SessionHandle *data)
1416 #if defined(HAVE_SIGNAL) && defined(SIGPIPE)
1417 /* restore the signal handler for SIGPIPE before we get back */
1418 if(!data->set.no_signal)
1419 signal(SIGPIPE, data->state.prev_signal);
1425 CURLcode Curl_follow(struct SessionHandle *data,
1426 char *newurl) /* this 'newurl' is the Location: string,
1427 and it must be malloc()ed before passed
1430 /* Location: redirect */
1431 char prot[16]; /* URL protocol string storage */
1432 char letter; /* used for a silly sscanf */
1434 if (data->set.maxredirs &&
1435 (data->set.followlocation >= data->set.maxredirs)) {
1436 failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
1437 return CURLE_TOO_MANY_REDIRECTS;
1440 /* mark the next request as a followed location: */
1441 data->state.this_is_a_follow = TRUE;
1443 data->set.followlocation++; /* count location-followers */
1445 if(data->set.http_auto_referer) {
1446 /* We are asked to automatically set the previous URL as the
1447 referer when we get the next URL. We pick the ->url field,
1448 which may or may not be 100% correct */
1450 if(data->change.referer_alloc)
1451 /* If we already have an allocated referer, free this first */
1452 free(data->change.referer);
1454 data->change.referer = strdup(data->change.url);
1455 data->change.referer_alloc = TRUE; /* yes, free this later */
1458 if(2 != sscanf(newurl, "%15[^?&/:]://%c", prot, &letter)) {
1460 *DANG* this is an RFC 2068 violation. The URL is supposed
1461 to be absolute and this doesn't seem to be that!
1463 Instead, we have to TRY to append this new path to the old URL
1464 to the right of the host part. Oh crap, this is doomed to cause
1465 problems in the future...
1471 char *useurl = newurl;
1473 /* we must make our own copy of the URL to play with, as it may
1474 point to read-only data */
1475 char *url_clone=strdup(data->change.url);
1478 return CURLE_OUT_OF_MEMORY; /* skip out of this NOW */
1480 /* protsep points to the start of the host name */
1481 protsep=strstr(url_clone, "//");
1485 protsep+=2; /* pass the slashes */
1487 if('/' != newurl[0]) {
1490 /* First we need to find out if there's a ?-letter in the URL,
1491 and cut it and the right-side of that off */
1492 pathsep = strrchr(protsep, '?');
1496 /* we have a relative path to append to the last slash if
1497 there's one available */
1498 pathsep = strrchr(protsep, '/');
1502 /* Check if there's any slash after the host name, and if so,
1503 remember that position instead */
1504 pathsep = strchr(protsep, '/');
1506 protsep = pathsep+1;
1510 /* now deal with one "./" or any amount of "../" in the newurl
1511 and act accordingly */
1513 if((useurl[0] == '.') && (useurl[1] == '/'))
1514 useurl+=2; /* just skip the "./" */
1516 while((useurl[0] == '.') &&
1517 (useurl[1] == '.') &&
1518 (useurl[2] == '/')) {
1520 useurl+=3; /* pass the "../" */
1525 /* cut off one more level from the right of the original URL */
1526 pathsep = strrchr(protsep, '/');
1537 /* We got a new absolute path for this server, cut off from the
1539 pathsep = strchr(protsep, '/');
1544 newest=(char *)malloc( strlen(url_clone) +
1545 1 + /* possible slash */
1546 strlen(useurl) + 1/* zero byte */);
1549 return CURLE_OUT_OF_MEMORY; /* go out from this */
1551 sprintf(newest, "%s%s%s", url_clone,
1552 (('/' == useurl[0]) || (protsep && !*protsep))?"":"/",
1554 free(newurl); /* newurl is the allocated pointer */
1559 /* This is an absolute URL, don't allow the custom port number */
1560 data->state.allow_port = FALSE;
1562 if(data->change.url_alloc)
1563 free(data->change.url);
1565 data->change.url_alloc = TRUE; /* the URL is allocated */
1567 /* TBD: set the URL with curl_setopt() */
1568 data->change.url = newurl;
1569 newurl = NULL; /* don't free! */
1571 infof(data, "Follows Location: to new URL: '%s'\n", data->change.url);
1574 * We get here when the HTTP code is 300-399. We need to perform
1575 * differently based on exactly what return code there was.
1576 * Discussed on the curl mailing list and posted about on the 26th
1579 * News from 7.10.6: we can also get here on a 401, in case we need to
1580 * do Digest authentication.
1582 switch(data->info.httpcode) {
1584 /* Act on a digest authentication, we keep on moving and do the
1585 Authorization: Digest header in the HTTP request code snippet */
1587 case 300: /* Multiple Choices */
1588 case 306: /* Not used */
1589 case 307: /* Temporary Redirect */
1590 default: /* for all unknown ones */
1591 /* These are explicitly mention since I've checked RFC2616 and they
1592 * seem to be OK to POST to.
1595 case 301: /* Moved Permanently */
1596 /* (quote from RFC2616, section 10.3.2):
1598 * Note: When automatically redirecting a POST request after
1599 * receiving a 301 status code, some existing HTTP/1.0 user agents
1600 * will erroneously change it into a GET request.
1603 * Warning: Because most of importants user agents do this clear
1604 * RFC2616 violation, many webservers expect this misbehavior. So
1605 * these servers often answers to a POST request with an error page.
1606 * To be sure that libcurl gets the page that most user agents
1607 * would get, libcurl has to force GET:
1609 if( data->set.httpreq == HTTPREQ_POST
1610 || data->set.httpreq == HTTPREQ_POST_FORM) {
1612 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1613 data->set.httpreq = HTTPREQ_GET;
1616 case 302: /* Found */
1619 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1620 to change the method on the redirected request. However, most
1621 existing user agent implementations treat 302 as if it were a 303
1622 response, performing a GET on the Location field-value regardless
1623 of the original request method. The status codes 303 and 307 have
1624 been added for servers that wish to make unambiguously clear which
1625 kind of reaction is expected of the client.
1629 Note: Many pre-HTTP/1.1 user agents do not understand the 303
1630 status. When interoperability with such clients is a concern, the
1631 302 status code may be used instead, since most user agents react
1632 to a 302 response as described here for 303.
1634 case 303: /* See Other */
1635 /* Disable both types of POSTs, since doing a second POST when
1636 * following isn't what anyone would want! */
1637 if(data->set.httpreq != HTTPREQ_GET) {
1638 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1639 infof(data, "Disables POST, goes with %s\n",
1640 data->set.no_body?"HEAD":"GET");
1643 case 304: /* Not Modified */
1644 /* 304 means we did a conditional request and it was "Not modified".
1645 * We shouldn't get any Location: header in this response!
1648 case 305: /* Use Proxy */
1649 /* (quote from RFC2616, section 10.3.6):
1650 * "The requested resource MUST be accessed through the proxy given
1651 * by the Location field. The Location field gives the URI of the
1652 * proxy. The recipient is expected to repeat this single request
1653 * via the proxy. 305 responses MUST only be generated by origin
1658 Curl_pgrsTime(data, TIMER_REDIRECT);
1659 Curl_pgrsResetTimes(data);
1664 CURLcode Curl_perform(struct SessionHandle *data)
1668 struct connectdata *conn=NULL;
1669 char *newurl = NULL; /* possibly a new URL to follow to! */
1671 data->state.used_interface = Curl_if_easy;
1673 res = Curl_pretransfer(data);
1678 * It is important that there is NO 'return' from this function at any other
1679 * place than falling down to the end of the function! This is because we
1680 * have cleanup stuff that must be done before we get back, and that is only
1681 * performed after this do-while loop.
1685 Curl_pgrsTime(data, TIMER_STARTSINGLE);
1686 res = Curl_connect(data, &conn);
1687 if(res == CURLE_OK) {
1688 res = Curl_do(&conn);
1690 if(res == CURLE_OK) {
1691 CURLcode res2; /* just a local extra result container */
1693 if(conn->protocol&PROT_FTPS)
1694 /* FTPS, disable ssl while transfering data */
1695 conn->ssl.use = FALSE;
1696 res = Transfer(conn); /* now fetch that URL please */
1697 if(conn->protocol&PROT_FTPS)
1698 /* FTPS, enable ssl again after havving transferred data */
1699 conn->ssl.use = TRUE;
1703 * We must duplicate the new URL here as the connection data
1704 * may be free()ed in the Curl_done() function.
1706 newurl = conn->newurl?strdup(conn->newurl):NULL;
1708 /* The transfer phase returned error, we mark the connection to get
1709 * closed to prevent being re-used. This is becasue we can't
1710 * possibly know if the connection is in a good shape or not now. */
1711 conn->bits.close = TRUE;
1713 if(-1 !=conn->secondarysocket) {
1714 /* if we failed anywhere, we must clean up the secondary socket if
1716 sclose(conn->secondarysocket);
1717 conn->secondarysocket=-1;
1721 /* Always run Curl_done(), even if some of the previous calls
1722 failed, but return the previous (original) error code */
1723 res2 = Curl_done(conn);
1730 * Important: 'conn' cannot be used here, since it may have been closed
1731 * in 'Curl_done' or other functions.
1734 if((res == CURLE_OK) && newurl) {
1735 res = Curl_follow(data, newurl);
1736 if(CURLE_OK == res) {
1742 break; /* it only reaches here when this shouldn't loop */
1744 } while(1); /* loop if Location: */
1749 /* run post-transfer uncondionally, but don't clobber the return code if
1750 we already have an error code recorder */
1751 res2 = Curl_posttransfer(data);
1759 Curl_Transfer(struct connectdata *c_conn, /* connection data */
1760 int sockfd, /* socket to read from or -1 */
1761 int size, /* -1 if unknown at this point */
1762 bool getheader, /* TRUE if header parsing is wanted */
1763 long *bytecountp, /* return number of bytes read or NULL */
1764 int writesockfd, /* socket to write to, it may very well be
1765 the same we read from. -1 disables */
1766 long *writebytecountp /* return number of bytes written or
1770 struct connectdata *conn = (struct connectdata *)c_conn;
1772 return CURLE_BAD_FUNCTION_ARGUMENT;
1774 /* now copy all input parameters */
1775 conn->sockfd = sockfd;
1777 conn->bits.getheader = getheader;
1778 conn->bytecountp = bytecountp;
1779 conn->writesockfd = writesockfd;
1780 conn->writebytecountp = writebytecountp;