from .utils import (
get_encoding_from_headers, stream_untransfer, guess_filename, requote_uri,
stream_decode_response_unicode, get_netrc_auth, get_environ_proxies,
- to_key_val_list, DEFAULT_CA_BUNDLE_PATH, parse_header_links)
+ to_key_val_list, DEFAULT_CA_BUNDLE_PATH, parse_header_links, iter_slices)
from .compat import (
cookielib, urlparse, urlunparse, urljoin, urlsplit, urlencode, str, bytes,
StringIO, is_py2, chardet, json, builtin_str, numeric_types)
length of each item returned as decoding can take place.
"""
if self._content_consumed:
- raise RuntimeError(
- 'The content for this response was already consumed'
- )
+ # simulate reading small chunks of the content
+ return iter_slices(self._content, chunk_size)
def generate():
while 1:
if rv:
yield rv
+def iter_slices(string, slice_length):
+ """Iterate over slices of a string."""
+ pos = 0
+ while pos < len(string):
+ yield string[pos:pos+slice_length]
+ pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
joined = lines[0] + '\n' + lines[1] + '\r\n' + lines[2]
self.assertEqual(joined, quote)
+ def test_permissive_iter_content(self):
+ """Test that iter_content and iter_lines work even after the body has been fetched."""
+ r = get(httpbin('stream', '10'), prefetch=True)
+ assert r._content_consumed
+ # iter_lines should still work without crashing
+ self.assertEqual(len(list(r.iter_lines())), 10)
+
+ # iter_content should return a one-item iterator over the whole content
+ iter_content_list = list(r.iter_content(chunk_size=1))
+ self.assertTrue(all(len(item) == 1 for item in iter_content_list))
+ # when joined, it should be exactly the original content
+ self.assertEqual(bytes().join(iter_content_list), r.content)
+
+ # test different chunk sizes:
+ for chunk_size in range(2, 20):
+ self.assertEqual(bytes().join(r.iter_content(chunk_size=chunk_size)), r.content)
+
+
# def test_safe_mode(self):
# safe = requests.session(config=dict(safe_mode=True))