File CVE-2019-10160-netloc-port-regression.patch of Package python3-base

From 8d0ef0b5edeae52960c7ed05ae8a12388324f87e Mon Sep 17 00:00:00 2001
From: Steve Dower <steve.dower@python.org>
Date: Tue, 4 Jun 2019 08:55:30 -0700
Subject: [PATCH] bpo-36742: Corrects fix to handle decomposition in usernames
 (#13812)

---
 Lib/test/test_urlparse.py |   11 ++++++-----
 Lib/urllib/parse.py       |   14 +++++++++-----
 2 files changed, 15 insertions(+), 10 deletions(-)

--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -886,11 +886,12 @@ class UrlParseTestCase(unittest.TestCase
         self.assertIn('\uFF03', denorm_chars)
 
         for scheme in ["http", "https", "ftp"]:
-            for c in denorm_chars:
-                url = "{}://netloc{}false.netloc/path".format(scheme, c)
-                with self.subTest(url=url, char='{:04X}'.format(ord(c))):
-                    with self.assertRaises(ValueError):
-                        urllib.parse.urlsplit(url)
+            for netloc in ["netloc{}false.netloc", "n{}user@netloc"]:
+                for c in denorm_chars:
+                    url = "{}://{}/path".format(scheme, netloc.format(c))
+                    with self.subTest(url=url, char='{:04X}'.format(ord(c))):
+                        with self.assertRaises(ValueError):
+                            urllib.parse.urlsplit(url)
 
 class Utility_Tests(unittest.TestCase):
     """Testcase to test the various utility functions in the urllib."""
--- a/Lib/urllib/parse.py
+++ b/Lib/urllib/parse.py
@@ -74,6 +74,7 @@ def clear_cache():
     _parse_cache.clear()
     _safe_quoters.clear()
 
+isascii = lambda s: len(s) == len(s.encode())
 
 # Helpers for bytes handling
 # For 3.2, we deliberately require applications that
@@ -317,18 +318,21 @@ def _splitnetloc(url, start=0):
     return url[start:delim], url[delim:]   # return (domain, rest)
 
 def _checknetloc(netloc):
-    if not netloc or not any(ord(c) > 127 for c in netloc):
+    if not netloc or isascii(netloc):
         return
     # looking for characters like \u2100 that expand to 'a/c'
     # IDNA uses NFKC equivalence, so normalize for this check
     import unicodedata
-    netloc2 = unicodedata.normalize('NFKC', netloc)
-    if netloc == netloc2:
+    n = netloc.replace('@', '')   # ignore characters already included
+    n = n.replace(':', '')        # but not the surrounding text
+    n = n.replace('#', '')
+    n = n.replace('?', '')
+    netloc2 = unicodedata.normalize('NFKC', n)
+    if n == netloc2:
         return
-    _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
     for c in '/?#@:':
         if c in netloc2:
-            raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+            raise ValueError("netloc '" + netloc + "' contains invalid " +
                              "characters under NFKC normalization")
 
 def urlsplit(url, scheme='', allow_fragments=True):
openSUSE Build Service is sponsored by