From fa19aebba072af77dc9abd9805a5f6ae4d17f13d Mon Sep 17 00:00:00 2001 From: JinWang An Date: Tue, 28 Mar 2023 15:57:05 +0900 Subject: [PATCH] [CVE-2022-0391] urllib_parse newline parsing Change-Id: I2f6fb8c3ed7bc02716d11952b891331f62f1843f Signed-off-by: JinWang An --- Doc/library/urlparse.rst | 18 ++++++++++++++ Doc/whatsnew/2.7.rst | 7 ++++++ Lib/test/test_urlparse.py | 50 +++++++++++++++++++++++++++++++++++++++ Lib/urlparse.py | 10 ++++++++ 4 files changed, 85 insertions(+) diff --git a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst index 0989c88..37bac33 100644 --- a/Doc/library/urlparse.rst +++ b/Doc/library/urlparse.rst @@ -248,6 +248,11 @@ The :mod:`urlparse` module defines the following functions: decomposed before parsing, or is not a Unicode string, no error will be raised. + Following the `WHATWG spec`_ that updates RFC 3986, ASCII + newline ``\n``, ``\r`` and tab ``\t`` characters are stripped + from the URL. + + .. versionadded:: 2.2 .. versionchanged:: 2.5 @@ -257,6 +262,11 @@ The :mod:`urlparse` module defines the following functions: Characters that affect netloc parsing under NFKC normalization will now raise :exc:`ValueError`. + .. versionchanged:: 3.6.14 + ASCII newline and tab characters are stripped from the URL. + +.. _WHATWG spec: https://url.spec.whatwg.org/#concept-basic-url-parser + .. function:: urlunsplit(parts) @@ -308,6 +318,12 @@ The :mod:`urlparse` module defines the following functions: .. seealso:: + `WHATWG`_ - URL Living standard + Working Group for the URL Standard that defines URLs, + domains, IP addresses, the application/x-www-form-urlencoded format, + and their API. + + :rfc:`3986` - Uniform Resource Identifiers This is the current standard (STD66). Any changes to urlparse module should conform to this. Certain deviations could be observed, which are @@ -332,6 +348,8 @@ The :mod:`urlparse` module defines the following functions: :rfc:`1738` - Uniform Resource Locators (URL) This specifies the formal syntax and semantics of absolute URLs. +.. _WHATWG: https://url.spec.whatwg.org/ + .. _urlparse-result-object: diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst index 992658e..fe61416 100644 --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -165,6 +165,13 @@ by calling ``warnings.simplefilter('default')``. The ``unittest`` module also automatically reenables deprecation warnings when running tests. +The presence of newline or tab characters in parts of a URL allows for some +forms of attacks. Following the WHATWG specification that updates RFC 3986, +ASCII newline ``\n``, ``\r`` and tab ``\t`` characters are stripped from the +URL by the parser :func:`urlparse` preventing such attacks. The removal +characters are controlled by a new module level variable +``urlparse._UNSAFE_URL_BYTES_TO_REMOVE``. (See :issue:`43882`) + Python 3.1 Features ======================= diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py index 86c4a05..fe599f0 100644 --- a/Lib/test/test_urlparse.py +++ b/Lib/test/test_urlparse.py @@ -504,6 +504,56 @@ class UrlParseTestCase(unittest.TestCase): p = urlparse.urlsplit(url) self.assertEqual(p.port, None) + + def test_urlsplit_remove_unsafe_bytes(self): + # Remove ASCII tabs and newlines from input, for http common case scenario. + url = "h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment" + p = urlparse.urlsplit(url) + self.assertEqual(p.scheme, "http") + self.assertEqual(p.netloc, "www.python.org") + self.assertEqual(p.path, "/javascript:alert('msg')/") + self.assertEqual(p.query, "query=something") + self.assertEqual(p.fragment, "fragment") + self.assertEqual(p.username, None) + self.assertEqual(p.password, None) + self.assertEqual(p.hostname, "www.python.org") + self.assertEqual(p.port, None) + self.assertEqual(p.geturl(), "http://www.python.org/javascript:alert('msg')/?query=something#fragment") + + # Remove ASCII tabs and newlines from input as bytes, for http common case scenario. + url = b"h\nttp://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment" + p = urlparse.urlsplit(url) + self.assertEqual(p.scheme, b"http") + self.assertEqual(p.netloc, b"www.python.org") + self.assertEqual(p.path, b"/javascript:alert('msg')/") + self.assertEqual(p.query, b"query=something") + self.assertEqual(p.fragment, b"fragment") + self.assertEqual(p.username, None) + self.assertEqual(p.password, None) + self.assertEqual(p.hostname, b"www.python.org") + self.assertEqual(p.port, None) + self.assertEqual(p.geturl(), b"http://www.python.org/javascript:alert('msg')/?query=something#fragment") + + # any scheme + url = "x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment" + p = urlparse.urlsplit(url) + self.assertEqual(p.geturl(), "x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment") + + # Remove ASCII tabs and newlines from input as bytes, any scheme. + url = b"x-new-scheme\t://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment" + p = urlparse.urlsplit(url) + self.assertEqual(p.geturl(), b"x-new-scheme://www.python.org/javascript:alert('msg')/?query=something#fragment") + + # Unsafe bytes is not returned from urlparse cache. + # scheme is stored after parsing, sending an scheme with unsafe bytes *will not* return an unsafe scheme + url = "https://www.python\n.org\t/java\nscript:\talert('msg\r\n')/?query\n=\tsomething#frag\nment" + scheme = "htt\nps" + for _ in range(2): + p = urlparse.urlsplit(url, scheme=scheme) + self.assertEqual(p.scheme, "https") + self.assertEqual(p.geturl(), "https://www.python.org/javascript:alert('msg')/?query=something#fragment") + + def test_issue14072(self): p1 = urlparse.urlsplit('tel:+31-641044153') self.assertEqual(p1.scheme, 'tel') diff --git a/Lib/urlparse.py b/Lib/urlparse.py index 798b467..d015f2c 100644 --- a/Lib/urlparse.py +++ b/Lib/urlparse.py @@ -62,6 +62,9 @@ scheme_chars = ('abcdefghijklmnopqrstuvwxyz' '0123456789' '+-.') +# Unsafe bytes to be removed per WHATWG spec +_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] + MAX_CACHE_SIZE = 20 _parse_cache = {} @@ -184,12 +187,19 @@ def _checknetloc(netloc): "under NFKC normalization" % netloc) +def _remove_unsafe_bytes_from_url(url): + for b in _UNSAFE_URL_BYTES_TO_REMOVE: + url = url.replace(b, "") + return url + def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: :///?# Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" + url = _remove_unsafe_bytes_from_url(url) + scheme = _remove_unsafe_bytes_from_url(scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) -- 2.34.1