from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3 import connectionpool, poolmanager
from .packages.urllib3.filepost import encode_multipart_formdata
+from .defaults import SCHEMAS
from .exceptions import (
ConnectionError, HTTPError, RequestException, Timeout, TooManyRedirects,
URLRequired, SSLError)
from .utils import (
get_encoding_from_headers, stream_untransfer, guess_filename, requote_uri,
- dict_from_string, supported_schemes, stream_decode_response_unicode)
+ dict_from_string, stream_decode_response_unicode)
from .compat import (
urlparse, urlunparse, urljoin, urlsplit, urlencode, str, bytes,
SimpleCookie, is_py2)
if not scheme:
raise ValueError("Invalid URL %r: No schema supplied" % url)
- if not scheme in supported_schemes():
+ if not scheme in SCHEMAS:
raise ValueError("Invalid scheme %r" % scheme)
netloc = netloc.encode('idna').decode('utf-8')
import cgi
import codecs
-import os
import random
import re
import zlib
from .compat import parse_http_list as _parse_list_header
-from .compat import quote, unquote, cookielib, SimpleCookie, is_py2
+from .compat import quote, cookielib, SimpleCookie, is_py2
from .compat import basestring, bytes
c = SimpleCookie()
c.load(s)
- for k,v in list(c.items()):
+ for k, v in list(c.items()):
cookies.update({k: v.value})
return cookies
+
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return name
+
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
collector.append('; '.join(_params))
- if not len(headers) == i+1:
+ if not len(headers) == i + 1:
collector.append(', ')
-
# Remove trailing separators.
if collector[-1] in (', ', '; '):
del collector[-1]
return ''.join(collector)
-
def randombytes(n):
"""Return n random bytes."""
if is_py2:
except TypeError:
return r.content
+
def stream_decompress(iterator, mode='gzip'):
"""
Stream decodes an iterator over compressed data
if rv:
yield rv
+
def stream_untransfer(gen, resp):
if 'gzip' in resp.headers.get('content-encoding', ''):
gen = stream_decompress(gen, mode='gzip')
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
+
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters.
parts[i] = '%' + parts[i]
return ''.join(parts)
+
def requote_uri(uri):
"""Re-quote the given URI.
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
- return "/".join(parts)
-
-def supported_schemes():
- """A list of schemes supported by requests.
-
- return: a list of strings.
- """
- return ["http","https"]