* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
#include "progress.h"
#include "curl_base64.h"
#include "cookie.h"
-#include "strequal.h"
#include "vauth/vauth.h"
#include "vtls/vtls.h"
#include "http_digest.h"
#include "parsedate.h" /* for the week day and month names */
#include "strtoofft.h"
#include "multiif.h"
-#include "rawstr.h"
+#include "strcase.h"
#include "content_encoding.h"
#include "http_proxy.h"
#include "warnless.h"
#include "pipeline.h"
#include "http2.h"
#include "connect.h"
+#include "strdup.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
struct Curl_easy *data = conn->data;
for(head = data->set.headers;head; head=head->next) {
- if(Curl_raw_nequal(head->data, thisheader, thislen))
+ if(strncasecompare(head->data, thisheader, thislen))
return head->data;
}
for(head = (conn->bits.proxy && data->set.sep_headers) ?
data->set.proxyheaders : data->set.headers;
head; head=head->next) {
- if(Curl_raw_nequal(head->data, thisheader, thislen))
+ if(strncasecompare(head->data, thisheader, thislen))
return head->data;
}
if(proxy) {
userp = &conn->allocptr.proxyuserpwd;
- user = conn->proxyuser;
- pwd = conn->proxypasswd;
+ user = conn->http_proxy.user;
+ pwd = conn->http_proxy.passwd;
}
else {
userp = &conn->allocptr.userpwd;
pwd = conn->passwd;
}
- snprintf(data->state.buffer, sizeof(data->state.buffer), "%s:%s", user, pwd);
+ snprintf(data->state.buffer, CURL_BUFSIZE(data->set.buffer_size),
+ "%s:%s", user, pwd);
result = Curl_base64_encode(data,
data->state.buffer, strlen(data->state.buffer),
}
}
if(http_should_fail(conn)) {
- failf (data, "The requested URL returned error: %d",
- data->req.httpcode);
+ failf(data, "The requested URL returned error: %d",
+ data->req.httpcode);
result = CURLE_HTTP_RETURNED_ERROR;
}
if(auth) {
infof(data, "%s auth using %s with user '%s'\n",
proxy ? "Proxy" : "Server", auth,
- proxy ? (conn->proxyuser ? conn->proxyuser : "") :
+ proxy ? (conn->http_proxy.user ? conn->http_proxy.user : "") :
(conn->user ? conn->user : ""));
authstatus->multi = (!authstatus->done) ? TRUE : FALSE;
}
conn->bits.netrc ||
!data->state.first_host ||
data->set.http_disable_hostname_check_before_authentication ||
- Curl_raw_equal(data->state.first_host, conn->host.name)) {
+ strcasecompare(data->state.first_host, conn->host.name)) {
result = output_auth_headers(conn, authhost, request, path, FALSE);
}
else
auth += strlen("NTLM");
while(*auth && ISSPACE(*auth))
auth++;
- if(*auth)
- if((conn->challenge_header = strdup(auth)) == NULL)
+ if(*auth) {
+ conn->challenge_header = strdup(auth);
+ if(!conn->challenge_header)
return CURLE_OUT_OF_MEMORY;
+ }
}
}
#endif
return result;
}
- if((conn->handler->flags & PROTOPT_SSL) && conn->httpversion != 20) {
+ if((conn->handler->flags & PROTOPT_SSL ||
+ conn->http_proxy.proxytype == CURLPROXY_HTTPS)
+ && conn->httpversion != 20) {
/* We never send more than CURL_MAX_WRITE_SIZE bytes in one single chunk
when we speak HTTPS, as if only a fraction of it is sent now, this data
needs to fit into the normal read-callback buffer later on and that
if(in->buffer)
/* we have a buffer, enlarge the existing one */
- new_rb = realloc(in->buffer, new_size);
+ new_rb = Curl_saferealloc(in->buffer, new_size);
else
/* create a new buffer */
new_rb = malloc(new_size);
if(!new_rb) {
/* If we failed, we cleanup the whole buffer and return error */
- Curl_safefree(in->buffer);
free(in);
return CURLE_OUT_OF_MEMORY;
}
const char *start;
const char *end;
- if(!Curl_raw_nequal(headerline, header, hlen))
+ if(!strncasecompare(headerline, header, hlen))
return FALSE; /* doesn't start with header */
/* pass the header */
/* find the content string in the rest of the line */
for(;len>=clen;len--, start++) {
- if(Curl_raw_nequal(start, content, clen))
+ if(strncasecompare(start, content, clen))
return TRUE; /* match! */
}
connkeep(conn, "HTTP default");
/* the CONNECT procedure might not have been completed */
- result = Curl_proxy_connect(conn);
+ result = Curl_proxy_connect(conn, FIRSTSOCKET);
if(result)
return result;
+ if(CONNECT_FIRSTSOCKET_PROXY_SSL())
+ return CURLE_OK; /* wait for HTTPS proxy SSL initialization to complete */
+
if(conn->tunnel_state[FIRSTSOCKET] == TUNNEL_CONNECT)
/* nothing else to do except wait right now - we're not done here. */
return CURLE_OK;
return result;
}
-#endif
-#if defined(USE_OPENSSL) || defined(USE_GNUTLS) || defined(USE_SCHANNEL) || \
- defined(USE_DARWINSSL) || defined(USE_POLARSSL) || defined(USE_NSS) || \
- defined(USE_MBEDTLS)
-/* This function is for OpenSSL, GnuTLS, darwinssl, schannel and polarssl only.
- It should be made to query the generic SSL layer instead. */
static int https_getsock(struct connectdata *conn,
curl_socket_t *socks,
int numsocks)
{
- if(conn->handler->flags & PROTOPT_SSL) {
- struct ssl_connect_data *connssl = &conn->ssl[FIRSTSOCKET];
-
- if(!numsocks)
- return GETSOCK_BLANK;
-
- if(connssl->connecting_state == ssl_connect_2_writing) {
- /* write mode */
- socks[0] = conn->sock[FIRSTSOCKET];
- return GETSOCK_WRITESOCK(0);
- }
- else if(connssl->connecting_state == ssl_connect_2_reading) {
- /* read mode */
- socks[0] = conn->sock[FIRSTSOCKET];
- return GETSOCK_READSOCK(0);
- }
- }
-
- return CURLE_OK;
-}
-#else
-#ifdef USE_SSL
-static int https_getsock(struct connectdata *conn,
- curl_socket_t *socks,
- int numsocks)
-{
- (void)conn;
- (void)socks;
- (void)numsocks;
+ if(conn->handler->flags & PROTOPT_SSL)
+ return Curl_ssl_getsock(conn, socks, numsocks);
return GETSOCK_BLANK;
}
#endif /* USE_SSL */
-#endif /* USE_OPENSSL || USE_GNUTLS || USE_SCHANNEL */
/*
* Curl_http_done() gets called after a single HTTP request has been
struct Curl_easy *data = conn->data;
struct HTTP *http = data->req.protop;
- infof(data, "Curl_http_done: called premature == %d\n", premature);
-
Curl_unencode_cleanup(conn);
#ifdef USE_SPNEGO
(data->set.httpversion >= CURL_HTTP_VERSION_1_1));
}
+static const char *get_http_string(const struct Curl_easy *data,
+ const struct connectdata *conn)
+{
+#ifdef USE_NGHTTP2
+ if(conn->proto.httpc.h2)
+ return "2";
+#endif
+
+ if(use_http_1_1plus(data, conn))
+ return "1.1";
+
+ return "1.0";
+}
+
/* check and possibly add an Expect: header */
static CURLcode expect100(struct Curl_easy *data,
struct connectdata *conn,
Connection: */
checkprefix("Connection", headers->data))
;
+ else if((conn->httpversion == 20) &&
+ checkprefix("Transfer-Encoding:", headers->data))
+ /* HTTP/2 doesn't support chunked requests */
+ ;
else {
CURLcode result = Curl_add_bufferf(req_buffer, "%s\r\n",
headers->data);
}
#endif
- if(conn->httpversion == 20)
- /* In HTTP2 forbids Transfer-Encoding: chunked */
- ptr = NULL;
+ ptr = Curl_checkheaders(conn, "Transfer-Encoding:");
+ if(ptr) {
+ /* Some kind of TE is requested, check if 'chunked' is chosen */
+ data->req.upload_chunky =
+ Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
+ }
else {
- ptr = Curl_checkheaders(conn, "Transfer-Encoding:");
- if(ptr) {
- /* Some kind of TE is requested, check if 'chunked' is chosen */
- data->req.upload_chunky =
- Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
- }
- else {
- if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
- data->set.upload &&
- (data->state.infilesize == -1)) {
- if(conn->bits.authneg)
- /* don't enable chunked during auth neg */
- ;
- else if(use_http_1_1plus(data, conn)) {
- /* HTTP, upload, unknown file size and not HTTP 1.0 */
- data->req.upload_chunky = TRUE;
- }
- else {
- failf(data, "Chunky upload is not supported by HTTP 1.0");
- return CURLE_UPLOAD_FAILED;
- }
+ if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
+ data->set.upload &&
+ (data->state.infilesize == -1)) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+ ;
+ else if(use_http_1_1plus(data, conn)) {
+ /* HTTP, upload, unknown file size and not HTTP 1.0 */
+ data->req.upload_chunky = TRUE;
}
else {
- /* else, no chunky upload */
- data->req.upload_chunky = FALSE;
+ failf(data, "Chunky upload is not supported by HTTP 1.0");
+ return CURLE_UPLOAD_FAILED;
}
-
- if(data->req.upload_chunky)
- te = "Transfer-Encoding: chunked\r\n";
}
+ else {
+ /* else, no chunky upload */
+ data->req.upload_chunky = FALSE;
+ }
+
+ if(data->req.upload_chunky)
+ te = "Transfer-Encoding: chunked\r\n";
}
Curl_safefree(conn->allocptr.host);
ptr = Curl_checkheaders(conn, "Host:");
if(ptr && (!data->state.this_is_a_follow ||
- Curl_raw_equal(data->state.first_host, conn->host.name))) {
+ strcasecompare(data->state.first_host, conn->host.name))) {
#if !defined(CURL_DISABLE_COOKIES)
/* If we have a given custom Host: header, we extract the host name in
order to possibly use it for cookie reasons later on. We only allow the
/* when doing ftp, append ;type=<a|i> if not present */
char *type = strstr(ppath, ";type=");
if(type && type[6] && type[7] == 0) {
- switch (Curl_raw_toupper(type[6])) {
+ switch(Curl_raw_toupper(type[6])) {
case 'A':
case 'D':
case 'I':
}
}
- /* Use 1.1 unless the user specifically asked for 1.0 or the server only
- supports 1.0 */
- httpstring= use_http_1_1plus(data, conn)?"1.1":"1.0";
+ httpstring = get_http_string(data, conn);
/* initialize a dynamic send-buffer */
req_buffer = Curl_add_buffer_init();
* Free proxyuserpwd for Negotiate/NTLM. Cannot reuse as it is associated
* with the connection and shouldn't be repeated over it either.
*/
- switch (data->state.authproxy.picked) {
+ switch(data->state.authproxy.picked) {
case CURLAUTH_NEGOTIATE:
case CURLAUTH_NTLM:
case CURLAUTH_NTLM_WB:
}
co = co->next; /* next cookie please */
}
- Curl_cookie_freelist(store, FALSE); /* free the cookie list */
+ Curl_cookie_freelist(store);
}
if(addcookies && !result) {
if(!count)
postsize = data->state.infilesize;
if((postsize != -1) && !data->req.upload_chunky &&
- !Curl_checkheaders(conn, "Content-Length:")) {
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length:"))) {
/* only add Content-Length if not uploading chunked */
result = Curl_add_bufferf(req_buffer,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
we don't upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length) */
if((postsize != -1) && !data->req.upload_chunky &&
- !Curl_checkheaders(conn, "Content-Length:")) {
+ (conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length:"))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
result = Curl_add_bufferf(req_buffer,
}
}
+ if((conn->httpversion == 20) && data->req.upload_chunky)
+ /* upload_chunky was set above to set up the request in a chunky fashion,
+ but is disabled here again to avoid that the chunked encoded version is
+ actually used when sending the request body over h2 */
+ data->req.upload_chunky = FALSE;
return result;
}
/* convert from the network encoding using a scratch area */
char *scratch = strdup(s);
if(NULL == scratch) {
- failf (data, "Failed to allocate memory for conversion!");
+ failf(data, "Failed to allocate memory for conversion!");
return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
}
if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
/* convert from the network encoding using a scratch area */
char *scratch = strdup(s);
if(NULL == scratch) {
- failf (data, "Failed to allocate memory for conversion!");
+ failf(data, "Failed to allocate memory for conversion!");
return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
}
if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
/* The reason to have a max limit for this is to avoid the risk of a bad
server feeding libcurl with a never-ending header that will cause
reallocs infinitely */
- failf (data, "Avoided giant realloc for header (max is %d)!",
- CURL_MAX_HTTP_HEADER);
+ failf(data, "Avoided giant realloc for header (max is %d)!",
+ CURL_MAX_HTTP_HEADER);
return CURLE_OUT_OF_MEMORY;
}
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = realloc(data->state.headerbuff, newsize);
if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
+ failf(data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
}
data->state.headersize=newsize;
* up and return an error.
*/
if(http_should_fail(conn)) {
- failf (data, "The requested URL returned error: %d",
- k->httpcode);
+ failf(data, "The requested URL returned error: %d",
+ k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
}
* connection for closure after we've read the entire response.
*/
if(!k->upload_done) {
- infof(data, "HTTP error before end of send, stop sending\n");
- streamclose(conn, "Stop sending data before everything sent");
- k->upload_done = TRUE;
- k->keepon &= ~KEEP_SEND; /* don't send */
- if(data->state.expect100header)
- k->exp100 = EXP100_FAILED;
+ if(data->set.http_keep_sending_on_error) {
+ infof(data, "HTTP error before end of send, keep sending\n");
+ if(k->exp100 > EXP100_SEND_DATA) {
+ k->exp100 = EXP100_SEND_DATA;
+ k->keepon |= KEEP_SEND;
+ }
+ }
+ else {
+ infof(data, "HTTP error before end of send, stop sending\n");
+ streamclose(conn, "Stop sending data before everything sent");
+ k->upload_done = TRUE;
+ k->keepon &= ~KEEP_SEND; /* don't send */
+ if(data->state.expect100header)
+ k->exp100 = EXP100_FAILED;
+ }
}
break;