CHUNKED REQUESTS!
authorKenneth Reitz <me@kennethreitz.com>
Thu, 10 Jan 2013 06:58:29 +0000 (01:58 -0500)
committerKenneth Reitz <me@kennethreitz.com>
Thu, 10 Jan 2013 06:58:29 +0000 (01:58 -0500)
requests/adapters.py
requests/models.py
requests/structures.py
requests/utils.py

index dd0b08d8dc2a9d2a04d6de0bbe738ceff0d85f65..5f9d9c79310addc0eecd97317205b8665d77e7a7 100644 (file)
@@ -12,6 +12,7 @@ import socket
 
 from .models import Response
 from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
+from .packages.urllib3.response import HTTPResponse
 from .hooks import dispatch_hook
 from .compat import urlparse, basestring, urldefrag
 from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
@@ -154,23 +155,52 @@ class HTTPAdapter(BaseAdapter):
         conn = self.get_connection(request.url, proxies)
 
         self.cert_verify(conn, request.url, verify, cert)
-
         url = self.request_url(request, proxies)
 
+        chunked = not (request.body is None or 'Content-Length' in request.headers)
+
         try:
+            if not chunked:
+                resp = conn.urlopen(
+                    method=request.method,
+                    url=url,
+                    body=request.body,
+                    headers=request.headers,
+                    redirect=False,
+                    assert_same_host=False,
+                    preload_content=False,
+                    decode_content=False,
+                    retries=self.max_retries,
+                    timeout=timeout
+                )
+
             # Send the request.
-            resp = conn.urlopen(
-                method=request.method,
-                url=url,
-                body=request.body,
-                headers=request.headers,
-                redirect=False,
-                assert_same_host=False,
-                preload_content=False,
-                decode_content=False,
-                retries=self.max_retries,
-                timeout=timeout,
-            )
+            else:
+                if hasattr(conn, 'proxy_pool'):
+                    conn = conn.proxy_pool
+
+                low_conn = conn._get_conn(timeout=timeout)
+                low_conn.putrequest(request.method, url, skip_accept_encoding=True)
+
+                for header, value in request.headers.items():
+                    low_conn.putheader(header, value)
+
+                low_conn.endheaders()
+
+                for i in request.body:
+                    low_conn.send(hex(len(i))[2:].encode('utf-8'))
+                    low_conn.send(b'\r\n')
+                    low_conn.send(i)
+                    low_conn.send(b'\r\n')
+                low_conn.send(b'0\r\n\r\n')
+
+                r = low_conn.getresponse()
+                resp = HTTPResponse.from_httplib(r,
+                    pool=conn,
+                    connection=low_conn,
+                    preload_content=False,
+                    decode_content=False
+                )
 
         except socket.error as sockerr:
             raise ConnectionError(sockerr)
index 9ddea45f48c2bdb1cc1c89b52ca7c6cd150b530a..1e8808dfaab9d367c7ae73f099ea11d1cfcfae87 100644 (file)
@@ -22,7 +22,7 @@ from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
 from .utils import (
     stream_untransfer, guess_filename, requote_uri,
     stream_decode_response_unicode, to_key_val_list, parse_header_links,
-    iter_slices, guess_json_utf)
+    iter_slices, guess_json_utf, super_len)
 from .compat import (
     cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
     is_py2, chardet, json, builtin_str, basestring)
@@ -321,37 +321,62 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
     def prepare_body(self, data, files):
         """Prepares the given HTTP body data."""
 
-        # If a generator is provided, error out.
-        if isinstance(data, type(_ for _ in [])):
-            raise NotImplementedError('Generator bodies are not supported yet.')
+        # Check if file, fo, generator, iterator.
+        # If not, run through normal process.
 
         # Nottin' on you.
         body = None
         content_type = None
+        length = None
+        is_stream = False
 
-        # Multi-part file uploads.
-        if files:
-            (body, content_type) = self._encode_files(files, data)
-        else:
-            if data:
+        is_stream = all([
+            hasattr(data, '__iter__'),
+            not isinstance(data, basestring),
+            not isinstance(data, dict)
+        ])
 
-                body = self._encode_params(data)
-                if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
-                    content_type = None
-                else:
-                    content_type = 'application/x-www-form-urlencoded'
-
-        self.headers['Content-Length'] = '0'
-        if hasattr(body, 'seek') and hasattr(body, 'tell'):
-            body.seek(0, 2)
-            self.headers['Content-Length'] = str(body.tell())
-            body.seek(0, 0)
-        elif body is not None:
-            self.headers['Content-Length'] = str(len(body))
-
-        # Add content-type if it wasn't explicitly provided.
-        if (content_type) and (not 'content-type' in self.headers):
-            self.headers['Content-Type'] = content_type
+        try:
+            length = super_len(data)
+        except (TypeError, AttributeError):
+            length = False
+
+        if is_stream:
+            body = data
+
+            if files:
+                raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
+
+            if length:
+                self.headers['Content-Length'] = length
+            else:
+                self.headers['Transfer-Encoding'] = 'chunked'
+        # Check if file, fo, generator, iterator.
+        # If not, run through normal process.
+
+        else:
+            # Multi-part file uploads.
+            if files:
+                (body, content_type) = self._encode_files(files, data)
+            else:
+                if data:
+                    body = self._encode_params(data)
+                    if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
+                        content_type = None
+                    else:
+                        content_type = 'application/x-www-form-urlencoded'
+
+            self.headers['Content-Length'] = '0'
+            if hasattr(body, 'seek') and hasattr(body, 'tell'):
+                body.seek(0, 2)
+                self.headers['Content-Length'] = str(body.tell())
+                body.seek(0, 0)
+            elif body is not None:
+                self.headers['Content-Length'] = str(len(body))
+
+            # Add content-type if it wasn't explicitly provided.
+            if (content_type) and (not 'content-type' in self.headers):
+                self.headers['Content-Type'] = content_type
 
         self.body = body
 
index 3fda9843482540b6c51edb735bc96d943f46145f..6c2e0b212a3d604ef257710a4290ad4e71ab1168 100644 (file)
@@ -8,6 +8,28 @@ Data structures that power Requests.
 
 """
 
+import os
+from itertools import islice
+
+class IteratorProxy(object):
+    """docstring for IteratorProxy"""
+    def __init__(self, i):
+        self.i = i
+        # self.i = chain.from_iterable(i)
+
+    def __iter__(self):
+        return self.i
+
+    def __len__(self):
+        if hasattr(self.i, '__len__'):
+            return len(self.i)
+        if hasattr(self.i, 'len'):
+            return self.i.len
+        if hasattr(self.i, 'fileno'):
+            return os.fstat(self.i.fileno()).st_size
+
+    def read(self, n):
+        return "".join(islice(self.i, None, n))
 
 class CaseInsensitiveDict(dict):
     """Case-insensitive Dictionary
index dadd6203273c40eb2303ed1029470ce2a2b8500c..f5f6b9595b622ac80f23e3ddb41677cd428b931d 100644 (file)
@@ -40,6 +40,13 @@ def dict_to_sequence(d):
 
     return d
 
+def super_len(o):
+    if hasattr(o, '__len__'):
+        return len(o)
+    if hasattr(o, 'len'):
+        return o.len
+    if hasattr(o, 'fileno'):
+        return os.fstat(o.fileno()).st_size
 
 def get_netrc_auth(url):
     """Returns the Requests tuple auth for a given url from netrc."""